max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 7
115
| max_stars_count
int64 101
368k
| id
stringlengths 2
8
| content
stringlengths 6
1.03M
|
---|---|---|---|---|
DQM/EcalMonitorClient/python/TimingClient_cfi.py | ckamtsikis/cmssw | 852 | 12758949 | <gh_stars>100-1000
import FWCore.ParameterSet.Config as cms
from DQM.EcalMonitorTasks.TimingTask_cfi import ecalTimingTask
from DQM.EcalMonitorClient.IntegrityClient_cfi import ecalIntegrityClient
minChannelEntries = 1
minTowerEntries = 3
toleranceMean = 2.
toleranceRMS = 6.
minChannelEntriesFwd = 8
minTowerEntriesFwd = 24
toleranceMeanFwd = 6.
toleranceRMSFwd = 12.
tailPopulThreshold = 0.4
timeWindow = 25.
ecalTimingClient = cms.untracked.PSet(
params = cms.untracked.PSet(
minChannelEntries = cms.untracked.int32(minChannelEntries),
minTowerEntries = cms.untracked.int32(minTowerEntries),
toleranceMean = cms.untracked.double(toleranceMean),
toleranceRMS = cms.untracked.double(toleranceRMS),
minChannelEntriesFwd = cms.untracked.int32(minChannelEntriesFwd),
minTowerEntriesFwd = cms.untracked.int32(minTowerEntriesFwd),
toleranceMeanFwd = cms.untracked.double(toleranceMeanFwd),
toleranceRMSFwd = cms.untracked.double(toleranceRMSFwd),
tailPopulThreshold = cms.untracked.double(tailPopulThreshold)
),
sources = cms.untracked.PSet(
TimeAllMap = ecalTimingTask.MEs.TimeAllMap,
TimeMap = ecalTimingTask.MEs.TimeMap,
TimeMapByLS = ecalTimingTask.MEs.TimeMapByLS,
ChStatus = ecalIntegrityClient.MEs.ChStatus
),
MEs = cms.untracked.PSet(
RMSAll = cms.untracked.PSet(
path = cms.untracked.string('%(subdet)s/%(prefix)sSummaryClient/%(prefix)sTMT%(suffix)s timing rms 1D summary'),
kind = cms.untracked.string('TH1F'),
otype = cms.untracked.string('Ecal3P'),
xaxis = cms.untracked.PSet(
high = cms.untracked.double(10.0),
nbins = cms.untracked.int32(100),
low = cms.untracked.double(0.0),
title = cms.untracked.string('time (ns)')
),
btype = cms.untracked.string('User'),
description = cms.untracked.string('Distribution of per-channel timing RMS. Channels with entries less than ' + str(minChannelEntries) + ' are not considered.')
),
ProjEta = cms.untracked.PSet(
path = cms.untracked.string('%(subdet)s/%(prefix)sTimingClient/%(prefix)sTMT timing projection eta%(suffix)s'),
kind = cms.untracked.string('TProfile'),
yaxis = cms.untracked.PSet(
title = cms.untracked.string('time (ns)')
),
otype = cms.untracked.string('Ecal3P'),
btype = cms.untracked.string('ProjEta'),
description = cms.untracked.string('Projection of per-channel mean timing. Channels with entries less than ' + str(minChannelEntries) + ' are not considered.')
),
FwdBkwdDiff = cms.untracked.PSet(
path = cms.untracked.string('%(subdet)s/%(prefix)sTimingTask/%(prefix)sTMT timing %(prefix)s+ - %(prefix)s-'),
kind = cms.untracked.string('TH1F'),
otype = cms.untracked.string('Ecal2P'),
xaxis = cms.untracked.PSet(
high = cms.untracked.double(5.0),
nbins = cms.untracked.int32(100),
low = cms.untracked.double(-5.0),
title = cms.untracked.string('time (ns)')
),
btype = cms.untracked.string('User'),
description = cms.untracked.string('Forward-backward asymmetry of per-channel mean timing. Channels with entries less than ' + str(minChannelEntries) + ' are not considered.')
),
FwdvBkwd = cms.untracked.PSet(
kind = cms.untracked.string('TH2F'),
yaxis = cms.untracked.PSet(
high = cms.untracked.double(timeWindow),
nbins = cms.untracked.int32(50),
low = cms.untracked.double(-timeWindow),
title = cms.untracked.string('time (ns)')
),
otype = cms.untracked.string('Ecal2P'),
xaxis = cms.untracked.PSet(
high = cms.untracked.double(timeWindow),
nbins = cms.untracked.int32(50),
low = cms.untracked.double(-timeWindow)
),
btype = cms.untracked.string('User'),
path = cms.untracked.string('%(subdet)s/%(prefix)sTimingTask/%(prefix)sTMT timing %(prefix)s+ vs %(prefix)s-'),
description = cms.untracked.string('Forward-backward correlation of per-channel mean timing. Channels with entries less than ' + str(minChannelEntries) + ' are not considered.')
),
ProjPhi = cms.untracked.PSet(
path = cms.untracked.string('%(subdet)s/%(prefix)sTimingClient/%(prefix)sTMT timing projection phi%(suffix)s'),
kind = cms.untracked.string('TProfile'),
yaxis = cms.untracked.PSet(
title = cms.untracked.string('time (ns)')
),
otype = cms.untracked.string('Ecal3P'),
btype = cms.untracked.string('ProjPhi'),
description = cms.untracked.string('Projection of per-channel mean timing. Channels with entries less than ' + str(minChannelEntries) + ' are not considered.')
),
MeanSM = cms.untracked.PSet(
kind = cms.untracked.string('TH1F'),
yaxis = cms.untracked.PSet(
title = cms.untracked.string('time (ns)')
),
otype = cms.untracked.string('SM'),
xaxis = cms.untracked.PSet(
high = cms.untracked.double(timeWindow),
nbins = cms.untracked.int32(100),
low = cms.untracked.double(-timeWindow)
),
btype = cms.untracked.string('User'),
path = cms.untracked.string('%(subdet)s/%(prefix)sTimingClient/%(prefix)sTMT timing mean %(sm)s'),
description = cms.untracked.string('Distribution of per-channel timing mean. Channels with entries less than ' + str(minChannelEntries) + ' are not considered.')
),
RMSMap = cms.untracked.PSet(
path = cms.untracked.string('%(subdet)s/%(prefix)sTimingClient/%(prefix)sTMT timing rms %(sm)s'),
kind = cms.untracked.string('TH2F'),
zaxis = cms.untracked.PSet(
title = cms.untracked.string('rms (ns)')
),
otype = cms.untracked.string('SM'),
btype = cms.untracked.string('Crystal'),
description = cms.untracked.string('2D distribution of per-channel timing RMS. Channels with entries less than ' + str(minChannelEntries) + ' are not considered.')
),
QualitySummary = cms.untracked.PSet(
path = cms.untracked.string('%(subdet)s/%(prefix)sSummaryClient/%(prefix)sTMT%(suffix)s timing quality summary'),
kind = cms.untracked.string('TH2F'),
otype = cms.untracked.string('Ecal3P'),
btype = cms.untracked.string('SuperCrystal'),
description = cms.untracked.string('Summary of the timing data quality. A 5x5 tower is red if the mean timing of the tower is off by more than ' + str(toleranceMean) + ' or RMS is greater than ' + str(toleranceRMS) + ' (' + str(toleranceMeanFwd) + ' and ' + str(toleranceRMSFwd) + ' in forward region). Towers with total entries less than ' + str(minTowerEntries) + ' are not subject to this evaluation. Since 5x5 tower timings are calculated with a tighter time-window than per-channel timings, a tower can additionally become red if its the sum of per-channel timing histogram entries is greater than per-tower histogram entries by factor ' + str(1. / (1. - tailPopulThreshold)) + ' (significant fraction of events fall outside the tight time-window).')
),
Quality = cms.untracked.PSet(
path = cms.untracked.string('%(subdet)s/%(prefix)sTimingClient/%(prefix)sTMT timing quality %(sm)s'),
kind = cms.untracked.string('TH2F'),
otype = cms.untracked.string('SM'),
btype = cms.untracked.string('Crystal'),
description = cms.untracked.string('Summary of the timing data quality. A channel is red if its mean timing is off by more than ' + str(toleranceMean) + ' or RMS is greater than ' + str(toleranceRMS) + '. Channels with entries less than ' + str(minChannelEntries) + ' are not considered.')
),
MeanAll = cms.untracked.PSet(
path = cms.untracked.string('%(subdet)s/%(prefix)sSummaryClient/%(prefix)sTMT%(suffix)s timing mean 1D summary'),
kind = cms.untracked.string('TH1F'),
otype = cms.untracked.string('Ecal3P'),
xaxis = cms.untracked.PSet(
high = cms.untracked.double(timeWindow),
nbins = cms.untracked.int32(100),
low = cms.untracked.double(-timeWindow),
title = cms.untracked.string('time (ns)')
),
btype = cms.untracked.string('User'),
description = cms.untracked.string('Distribution of per-channel timing mean. Channels with entries less than ' + str(minChannelEntries) + ' are not considered.')
),
TrendMean = cms.untracked.PSet(
path = cms.untracked.string('Ecal/Trends/TimingClient %(prefix)s timing mean'),
kind = cms.untracked.string('TProfile'),
otype = cms.untracked.string('Ecal2P'),
btype = cms.untracked.string('Trend'),
description = cms.untracked.string('Trend of timing mean. Plots simple average of all channel timing means at each lumisection.')
),
TrendRMS = cms.untracked.PSet(
path = cms.untracked.string('Ecal/Trends/TimingClient %(prefix)s timing rms'),
kind = cms.untracked.string('TProfile'),
otype = cms.untracked.string('Ecal2P'),
btype = cms.untracked.string('Trend'),
description = cms.untracked.string('Trend of timing rms. Plots simple average of all channel timing rms at each lumisection.')
)
)
)
|
test/gallery/auction/testAuction.py | jeanqasaur/jeeves | 253 | 12758983 | import macropy.activate
import JeevesLib
from smt.Z3 import *
import unittest
from Auction import AuctionContext, Bid, User
import JeevesLib
class TestAuction(unittest.TestCase):
def setUp(self):
JeevesLib.init()
self.aliceUser = User(0)
self.bobUser = User(1)
self.claireUser = User(2)
def testOwnerCanSee(self):
policy = lambda oc: False
aliceBid = Bid(3, self.aliceUser, policy)
ctxt0 = AuctionContext(self.aliceUser, 0, [])
self.assertEqual(3
, JeevesLib.concretize(ctxt0, aliceBid.value))
ctxt1 = AuctionContext(self.bobUser, 0, [])
self.assertEqual(-1
, JeevesLib.concretize(ctxt1, aliceBid.value))
def testTimeSensitiveRelease(self):
auctionEndTime = 10
policy = lambda oc: oc.time > auctionEndTime
aliceBid = Bid(3, self.aliceUser, policy)
self.assertEqual(3
, JeevesLib.concretize(
AuctionContext(self.bobUser, 11, []), aliceBid.value))
self.assertEqual(-1
, JeevesLib.concretize(
AuctionContext(self.bobUser, 10, []), aliceBid.value))
def testSealedAuction(self):
# Function that returns true if the context contains a bid from the given
# user.
def hasBidFromUser(ctxt, u):
return JeevesLib.jhasElt(ctxt.bids, lambda b: b.owner == u)
allUsers = [self.aliceUser, self.bobUser, self.claireUser]
policy = lambda oc: reduce(lambda acc, c: JeevesLib.jand(
lambda: hasBidFromUser(oc, c), lambda: acc)
, allUsers)
aliceBid = Bid(3, self.aliceUser, policy)
bobBid = Bid(4, self.bobUser, policy)
claireBid = Bid(5, self.claireUser, policy)
self.assertEqual(-1,
JeevesLib.concretize(
AuctionContext(self.bobUser, 11, [aliceBid]), aliceBid.value))
if __name__ == '__main__':
unittest.main()
|
client/verta/verta/integrations/__init__.py | fool-sec-review/modeldb | 835 | 12758985 | <filename>client/verta/verta/integrations/__init__.py
"""Automated logging support for common scientific libraries."""
|
leo/external/leoftsindex.py | ATikhonov2/leo-editor | 1,550 | 12759022 | """
Stand alone GUI free index builder for Leo's full text search system::
python leoftsindex.py <file1> <file2> <file3>...
If the file name starts with @ it's a assumed to be a simple
text file listing files to be indexed.
If <file> does not contain '#' it's assumed to be a .leo file
to index, and is indexed.
If <file> does contain '#' it's assumed to be a .leo file
containing a list of .leo files to index, with the list in
the node indicated by the UNL after the #, e.g.::
path/to/myfile.leo#Lists-->List of outlines
In the latter case, if the node identified by the UNL has children,
the list of files to scan is built from the first line of the body
of each child node of the identified node (works well with bookmarks.py).
If the node identified by the UNL does not have children, the
node's body is assumed to be a simple text listing of paths to .leo files).
.. note::
It may be necessary to quote the "file" on the command line,
as the '#' may be interpreted as a comment delimiter::
python leoftsindex.py "workbook.leo#Links"
"""
import sys
# add folder containing 'leo' folder to path
# sys.path.append("/home/tbrown/Package/leo/bzr/leo.repo/trunk")
import leo.core.leoBridge as leoBridge
import leo.plugins.leofts as leofts
controller = leoBridge.controller(
gui='nullGui',
loadPlugins=False, # True: attempt to load plugins.
readSettings=False, # True: read standard settings files.
silent=False, # True: don't print signon messages.
verbose=False
)
g = controller.globals()
# list of "files" to process
files = sys.argv[1:]
# set up leofts
leofts.set_leo(g)
g._gnxcache = leofts.GnxCache()
fts = leofts.get_fts()
fn2c = {} # cache to avoid loading same outline twice
done = set() # outlines scanned, to avoid repetition repetition
todo = list(files)
while todo:
item = todo.pop(0)
print ("INDEX: %s"%item)
if '#' in item:
fn, node = item.split('#', 1)
else:
fn, node = item, None
if node:
c = fn2c.setdefault(fn, controller.openLeoFile(fn))
found, dummy, p = g.recursiveUNLSearch(node.split('-->'), c)
if not found:
print("Could not find '%s'"%item)
break
if not p:
p = c.p
if p.hasChildren():
# use file named in first node of each child
files = [chl.b.strip().split('\n', 1)[0].strip() for chl in p.children()]
else:
# use all files listed in body
files = [i.strip() for i in p.b.strip().split('\n')]
elif fn.startswith('@'):
todo.extend(open(fn[1:]).read().strip().split('\n'))
files = []
else:
files = [fn]
for fn in files:
# file names may still have '#' if taken from a node list
real_name = fn.split('#', 1)[0]
if real_name in done:
continue
done.add(real_name)
if len(files) != 1:
print (" FILE: %s"%real_name)
c = fn2c.setdefault(real_name, controller.openLeoFile(fn))
fts.drop_document(real_name)
fts.index_nodes(c)
|
bsuite/bsuite/utils/wrappers_test.py | hbutsuak95/iv_rl | 1,337 | 12759058 | <reponame>hbutsuak95/iv_rl
# python3
# pylint: disable=g-bad-file-header
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests for bsuite.utils.wrapper."""
from absl.testing import absltest
from absl.testing import parameterized
from bsuite import environments
from bsuite.environments import catch
from bsuite.utils import wrappers
import dm_env
from dm_env import specs
from dm_env import test_utils
import mock
import numpy as np
class FakeEnvironment(environments.Environment):
"""An environment that returns pre-determined rewards and observations."""
def __init__(self, time_steps):
"""Initializes a new FakeEnvironment.
Args:
time_steps: A sequence of time step namedtuples. This could represent
one episode, or several. This class just repeatedly plays through the
sequence and doesn't inspect the contents.
"""
super().__init__()
self.bsuite_num_episodes = 1000
self._time_steps = time_steps
obs = np.asarray(self._time_steps[0].observation)
self._observation_spec = specs.Array(shape=obs.shape, dtype=obs.dtype)
self._step_index = 0
self._reset_next_step = True
def reset(self):
self._reset_next_step = False
self._step_index = 0
return self._time_steps[0]
def step(self, action):
del action
if self._reset_next_step:
return self.reset()
self._step_index += 1
self._step_index %= len(self._time_steps)
return self._time_steps[self._step_index]
def _reset(self):
raise NotImplementedError
def _step(self, action: int):
raise NotImplementedError
def observation_spec(self):
return self._observation_spec
def action_spec(self):
return specs.Array(shape=(), dtype=np.int32)
def bsuite_info(self):
return {}
class WrapperTest(absltest.TestCase):
def test_wrapper(self):
"""Tests that the wrapper computes and logs the correct data."""
mock_logger = mock.MagicMock()
mock_logger.write = mock.MagicMock()
# Make a fake environment that cycles through these time steps.
timesteps = [
dm_env.restart([]),
dm_env.transition(1, []),
dm_env.transition(2, []),
dm_env.termination(3, []),
]
expected_episode_return = 6
fake_env = FakeEnvironment(timesteps)
env = wrappers.Logging(env=fake_env, logger=mock_logger, log_every=True) # pytype: disable=wrong-arg-types
num_episodes = 5
for _ in range(num_episodes):
timestep = env.reset()
while not timestep.last():
timestep = env.step(action=0)
# We count the number of transitions, hence the -1.
expected_episode_length = len(timesteps) - 1
expected_calls = []
for i in range(1, num_episodes + 1):
expected_calls.append(
mock.call(dict(
steps=expected_episode_length * i,
episode=i,
total_return=expected_episode_return * i,
episode_len=expected_episode_length,
episode_return=expected_episode_return,
))
)
mock_logger.write.assert_has_calls(expected_calls)
def test_unwrap(self):
raw_env = FakeEnvironment([dm_env.restart([])])
scale_env = wrappers.RewardScale(raw_env, reward_scale=1.)
noise_env = wrappers.RewardNoise(scale_env, noise_scale=1.)
logging_env = wrappers.Logging(noise_env, logger=None) # pytype: disable=wrong-arg-types
unwrapped = logging_env.raw_env
self.assertEqual(id(raw_env), id(unwrapped))
class ImageObservationTest(parameterized.TestCase):
@parameterized.parameters(
((84, 84, 4), np.array([1, 2])),
((70, 90), np.array([[1, 0, 2, 3]])),
)
def test_to_image(self, shape, observation):
image = wrappers.to_image(shape, observation)
self.assertEqual(image.shape, shape)
self.assertCountEqual(np.unique(image), np.unique(observation))
class ImageWrapperCatchTest(test_utils.EnvironmentTestMixin, absltest.TestCase):
def make_object_under_test(self):
env = catch.Catch()
return wrappers.ImageObservation(env, (84, 84, 4))
def make_action_sequence(self):
actions = [0, 1, 2]
rng = np.random.RandomState(42)
for _ in range(100):
yield rng.choice(actions)
if __name__ == '__main__':
absltest.main()
|
Security/02 - Terminology and Concepts/02 - Security Key Spaces.py | srgeyK87/Hacker-Rank-30-days-challlenge | 275 | 12759122 | <filename>Security/02 - Terminology and Concepts/02 - Security Key Spaces.py<gh_stars>100-1000
# ========================
# Information
# ========================
# Direct Link: https://www.hackerrank.com/challenges/security-key-spaces/problem
# Difficulty: Easy
# Max Score: 10
# Language: Python
# ========================
# Solution
# ========================
num = (input())
e = int(input())
print(''.join([str((int(i)+e) % 10) for i in num]))
|
src/other/ext/stepcode/misc/wiki-scripts/update-matrix.py | dservin/brlcad | 262 | 12759128 | <gh_stars>100-1000
#!/usr/bin/env python
#you probably want to run the ctest script that calls this instead:
# ctest -S ctest_matrix.cmake
#must be ran from scl/build_matrix
from __future__ import print_function
from xml.etree import ElementTree as ET
import os
from datetime import date
import subprocess
import codecs
import io
#ctest xml file layout
#<Site ...=...>
# <testing>
# <StartDateTime>..</>
# <StartTestTime>..</>
# <TestList>..</>
# <Test status=..>..</>
# <EndDateTime>Dec 28 17:49 EST</EndDateTime>
# <EndTestTime>1325112579</EndTestTime>
# <ElapsedMinutes>1.9</ElapsedMinutes>
# </Testing>
#</Site>
#summary (aka 's') is a table near the top of the document
#body (aka 'b') contains the details for all schemas
def main():
xml_file = find_xml()
wikipath, matrix = find_wiki()
#codecs.open is deprecated but io.open doesn't seem to work, and open() complains of unicode
out = codecs.open(matrix,encoding='utf-8',mode='w')
out.write( header() )
out.write( read_tests(xml_file) )
out.close()
git_push(wikipath, matrix)
def find_xml():
#find xml file
i = 0
for dirname in os.listdir("Testing"):
if str(date.today().year) in dirname:
i += 1
if i > 1:
print("Too many directories, exiting")
exit(1)
xml = os.path.join("Testing", dirname, "Test.xml")
return xml
def find_wiki():
#find wiki and matrix file, issue 'git pull'
wikipath = os.path.abspath("../../wiki-scl")
if not os.path.isdir(os.path.join(wikipath,".git")):
print("Can't find wiki or not a git repo")
exit(1)
p = subprocess.call(["git", "pull", "origin"], cwd=wikipath)
if not p == 0:
print("'git pull' exited with error")
exit(1)
matrix = os.path.join(wikipath, "Schema-build-matrix.md")
if not os.path.isfile(matrix):
print("Matrix file doesn't exist or isn't a file")
exit(1)
return wikipath,matrix
def git_push(path,f):
p = subprocess.call(["git", "add", f], cwd=path)
if not p == 0:
print("'git add' exited with error")
exit(1)
msg = date.today().__str__() + " - schema matrix updated by update-matrix.py"
p = subprocess.call(["git", "commit", "-m", msg ], cwd=path)
if not p == 0:
print("'git commit' exited with error")
exit(1)
p = subprocess.call(["git", "push", "origin"], cwd=path)
if not p == 0:
print("'git push' exited with error")
exit(1)
def header():
h = "## Created " + date.today().__str__() + "\n" + "### Current as of commit "
l = subprocess.check_output(["git", "log", """--pretty=format:%H Commit Summary: %s<br>Author: %aN<br>Date: %aD""", "-n1"])
h += "[" + l[:8] + "](http://github.com/mpictor/StepClassLibrary/commit/" + l[:l.find(" ")]
h += ") --\n<font color=grey>" + l[l.find(" ")+1:] + "</font>\n\n----\n"
h += "### Summary\n<table width=100%><tr><th>Schema</th><th>Generate</th><th>Build</th></tr>"
return h
def read_tests(xml):
# read all <Test>s in xml, create mixed html/markdown
try:
tree = ET.parse(xml)
except Exception as inst:
print("Unexpected error opening %s: %s" % (xml, inst))
return
root = tree.getroot()
testing = root.find("Testing")
tests = testing.findall("Test")
summary = ""
body = ""
for test in tests:
s,b = schema_info(test,tests)
summary += s
body += b
summary += "</table>\n\n"
return summary + body
def schema_info(test,tests):
# this returns html & markdown formatted summary and body strings
# for the generate and build tests for a single schema
s=""
b=""
name = test.find("Name").text
if name.startswith("generate_cpp_"):
#print this one. if it passes, find and print build.
ap = name[len("generate_cpp_"):]
s += "<tr><td><a href=#" + ap + ">" + ap.title() + "</a></td><td>"
s += test_status(test) + "</td><td>"
b += "----\n<a name=\"wiki-" + ap + "\"></a>\n"
b += "### Schema " + ap.title()
b += "<table width=100%>"
b += test_table("generation",test)
if test.get("Status") == "passed":
for build in tests:
if build.find("Name").text == "build_cpp_sdai_" + ap:
s += test_status(build) + "</td></tr>\n"
b += test_table("compilation",build)
break
else:
s += "----</td></tr>\n"
b += "</table>\n"
return s,b
def test_table(ttype, test):
# populate the table for one test
# returns: html & markdown formatted text to be added to 'body'
b = "<tr><td>Code " + ttype
output = test.find("Results").find("Measurement").find("Value").text
w = output.count("WARNING")
w += output.count("warning")
lines = output.split("\n")
if "The rest of the test output was removed since it exceeds the threshold of" in lines[-2]:
trunc1 = "at least "
trunc2 = "(ctest truncated output)"
else:
trunc1 = ""
trunc2 = ""
if test.get("Status") == "passed":
#print summary in b
b += " succeeded with " + trunc1 + w.__str__() + " warnings " + trunc2
if w == 0: #nothing to print in the table, so skip it
b += "</td></tr>\n"
return b
else:
#print warnings and errors in b
e = output.count("ERROR")
e += output.count("error")
b += " failed with %s%d warnings and %d errors %s" %(trunc1, w, e, trunc2)
b += "<br>\n<table border=1 width=100%>\n"
b += "<tr><th>Line</th><th>Text</th></tr>\n"
# ERRORs
# 242_n2813_mim_lf.exp:2278: --ERROR: Expected a type...
# gcc errors look like ???
l=0
for line in lines:
if ": --ERROR:" in line:
l += 1
c1 = line.find(":")
c2 = line.find(":",c1+1)
b += "<tr><td>" + line[c1+1:c2] + "</td><td>" + line[c2+4:] + "</td></tr>\n"
elif ": error:" in line:
l += 1
c1 = line.find(":")
c2 = line.find(":",c1+1)
c3 = line.find(":",c2+1) #skip the character number
b += "<tr><td>" + line[c1+1:c2] + "</td><td>" + line[c3+2:] + "</td></tr>\n"
if l > 20:
b += "<tr><td>-</td><td><font color=red>-- maximum number of errors printed --</font></td></tr>\n"
break
# WARNINGs
# ap239_arm_lf.exp:2731: WARNING: Implicit downcast...
# WARNING: in SELECT TYPE date_or_date... (multi-line warning)
# compstructs.cc:28:23: warning: unused
l=0
for line in lines:
if ": WARNING" in line:
l += 1
c1 = line.find(":")
c2 = line.find(":",c1+1)
b += "<tr><td>" + line[c1+1:c2] + "</td><td>" + line[c2+2:] + "</td></tr>\n"
elif "WARNING" in line:
b += "<tr><td>????</td><td>" + line + "</td></tr>\n"
elif ": warning:" in line:
l += 1
c1 = line.find(":")
c2 = line.find(":",c1+1)
c3 = line.find(":",c2+1) #skip the character number
b += "<tr><td>" + line[c1+1:c2] + "</td><td>" + line[c3+2:] + "</td></tr>\n"
if l > 20:
b += "<tr><td>-</td><td><font color=red>-- maximum number of warnings printed --</font></td></tr>\n"
break
b += "</table></td></tr>\n"
return b
def test_status(test):
t = ""
for m in test.find("Results").findall("NamedMeasurement"):
if m.get("name") == "Execution Time":
t = " in " + m.find("Value").text + "s"
break
if test.get("Status") == "passed":
s = "<font color=green>PASS</font>"
elif test.get("Status") == "failed":
s = "<font color=red>FAIL</font>"
else:
s = "<font color=cyan>" + test.get("Status") + "</font>"
return s + t
if __name__ == "__main__":
# Someone is launching this directly
main()
|
gluon/gluoncv2/models/shakedropresnet_cifar.py | naviocean/imgclsmob | 2,649 | 12759165 | """
ShakeDrop-ResNet for CIFAR/SVHN, implemented in Gluon.
Original paper: 'ShakeDrop Regularization for Deep Residual Learning,' https://arxiv.org/abs/1802.02375.
"""
__all__ = ['CIFARShakeDropResNet', 'shakedropresnet20_cifar10', 'shakedropresnet20_cifar100', 'shakedropresnet20_svhn']
import os
import numpy as np
import mxnet as mx
from mxnet import cpu
from mxnet.gluon import nn, HybridBlock
from .common import conv1x1_block, conv3x3_block
from .resnet import ResBlock, ResBottleneck
class ShakeDrop(mx.autograd.Function):
"""
ShakeDrop function.
Parameters:
----------
p : float
ShakeDrop specific probability (of life) for Bernoulli random variable.
"""
def __init__(self, p):
super(ShakeDrop, self).__init__()
self.p = p
def forward(self, x):
if mx.autograd.is_training():
b = np.random.binomial(n=1, p=self.p)
alpha = mx.nd.random.uniform_like(x.slice(begin=(None, 0, 0, 0), end=(None, 1, 1, 1)), low=-1.0, high=1.0)
y = mx.nd.broadcast_mul(b + alpha - b * alpha, x)
self.save_for_backward(b)
else:
y = self.p * x
return y
def backward(self, dy):
b, = self.saved_tensors
beta = mx.nd.random.uniform_like(dy.slice(begin=(None, 0, 0, 0), end=(None, 1, 1, 1)), low=0.0, high=1.0)
return mx.nd.broadcast_mul(b + beta - b * beta, dy)
class ShakeDropResUnit(HybridBlock):
"""
ShakeDrop-ResNet unit with residual connection.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int
Strides of the convolution.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
bottleneck : bool
Whether to use a bottleneck or simple block in units.
life_prob : float
Residual branch life probability.
"""
def __init__(self,
in_channels,
out_channels,
strides,
bn_use_global_stats,
bottleneck,
life_prob,
**kwargs):
super(ShakeDropResUnit, self).__init__(**kwargs)
self.life_prob = life_prob
self.resize_identity = (in_channels != out_channels) or (strides != 1)
body_class = ResBottleneck if bottleneck else ResBlock
with self.name_scope():
self.body = body_class(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
bn_use_global_stats=bn_use_global_stats)
if self.resize_identity:
self.identity_conv = conv1x1_block(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
bn_use_global_stats=bn_use_global_stats,
activation=None)
self.activ = nn.Activation("relu")
# self.shake_drop = ShakeDrop(self.life_prob)
def hybrid_forward(self, F, x):
if self.resize_identity:
identity = self.identity_conv(x)
else:
identity = x
x = self.body(x)
x = ShakeDrop(self.life_prob)(x) + identity
# x = self.shake_drop(x) + identity
x = self.activ(x)
return x
class CIFARShakeDropResNet(HybridBlock):
"""
ShakeDrop-ResNet model for CIFAR from 'ShakeDrop Regularization for Deep Residual Learning,'
https://arxiv.org/abs/1802.02375.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
bottleneck : bool
Whether to use a bottleneck or simple block in units.
life_probs : list of float
Residual branch life probability for each unit.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
Useful for fine-tuning.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (32, 32)
Spatial size of the expected input image.
classes : int, default 10
Number of classification classes.
"""
def __init__(self,
channels,
init_block_channels,
bottleneck,
life_probs,
bn_use_global_stats=False,
in_channels=3,
in_size=(32, 32),
classes=10,
**kwargs):
super(CIFARShakeDropResNet, self).__init__(**kwargs)
self.in_size = in_size
self.classes = classes
with self.name_scope():
self.features = nn.HybridSequential(prefix="")
self.features.add(conv3x3_block(
in_channels=in_channels,
out_channels=init_block_channels,
bn_use_global_stats=bn_use_global_stats))
in_channels = init_block_channels
k = 0
for i, channels_per_stage in enumerate(channels):
stage = nn.HybridSequential(prefix="stage{}_".format(i + 1))
with stage.name_scope():
for j, out_channels in enumerate(channels_per_stage):
strides = 2 if (j == 0) and (i != 0) else 1
stage.add(ShakeDropResUnit(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
bn_use_global_stats=bn_use_global_stats,
bottleneck=bottleneck,
life_prob=life_probs[k]))
in_channels = out_channels
k += 1
self.features.add(stage)
self.features.add(nn.AvgPool2D(
pool_size=8,
strides=1))
self.output = nn.HybridSequential(prefix="")
self.output.add(nn.Flatten())
self.output.add(nn.Dense(
units=classes,
in_units=in_channels))
def hybrid_forward(self, F, x):
x = self.features(x)
x = self.output(x)
return x
def get_shakedropresnet_cifar(classes,
blocks,
bottleneck,
model_name=None,
pretrained=False,
ctx=cpu(),
root=os.path.join("~", ".mxnet", "models"),
**kwargs):
"""
Create ShakeDrop-ResNet model for CIFAR with specific parameters.
Parameters:
----------
classes : int
Number of classification classes.
blocks : int
Number of blocks.
bottleneck : bool
Whether to use a bottleneck or simple block in units.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
assert (classes in [10, 100])
if bottleneck:
assert ((blocks - 2) % 9 == 0)
layers = [(blocks - 2) // 9] * 3
else:
assert ((blocks - 2) % 6 == 0)
layers = [(blocks - 2) // 6] * 3
init_block_channels = 16
channels_per_layers = [16, 32, 64]
channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)]
if bottleneck:
channels = [[cij * 4 for cij in ci] for ci in channels]
total_layers = sum(layers)
final_death_prob = 0.5
life_probs = [1.0 - float(i + 1) / float(total_layers) * final_death_prob for i in range(total_layers)]
net = CIFARShakeDropResNet(
channels=channels,
init_block_channels=init_block_channels,
bottleneck=bottleneck,
life_probs=life_probs,
classes=classes,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
net.load_parameters(
filename=get_model_file(
model_name=model_name,
local_model_store_dir_path=root),
ctx=ctx)
return net
def shakedropresnet20_cifar10(classes=10, **kwargs):
"""
ShakeDrop-ResNet-20 model for CIFAR-10 from 'ShakeDrop Regularization for Deep Residual Learning,'
https://arxiv.org/abs/1802.02375.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_shakedropresnet_cifar(classes=classes, blocks=20, bottleneck=False,
model_name="shakedropresnet20_cifar10", **kwargs)
def shakedropresnet20_cifar100(classes=100, **kwargs):
"""
ShakeDrop-ResNet-20 model for CIFAR-100 from 'ShakeDrop Regularization for Deep Residual Learning,'
https://arxiv.org/abs/1802.02375.
Parameters:
----------
classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_shakedropresnet_cifar(classes=classes, blocks=20, bottleneck=False,
model_name="shakedropresnet20_cifar100", **kwargs)
def shakedropresnet20_svhn(classes=10, **kwargs):
"""
ShakeDrop-ResNet-20 model for SVHN from 'ShakeDrop Regularization for Deep Residual Learning,'
https://arxiv.org/abs/1802.02375.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_shakedropresnet_cifar(classes=classes, blocks=20, bottleneck=False,
model_name="shakedropresnet20_svhn", **kwargs)
def _test():
import numpy as np
import mxnet as mx
pretrained = False
models = [
(shakedropresnet20_cifar10, 10),
(shakedropresnet20_cifar100, 100),
(shakedropresnet20_svhn, 10),
]
for model, classes in models:
net = model(pretrained=pretrained)
ctx = mx.cpu()
if not pretrained:
net.initialize(ctx=ctx)
# net.hybridize()
net_params = net.collect_params()
weight_count = 0
for param in net_params.values():
if (param.shape is None) or (not param._differentiable):
continue
weight_count += np.prod(param.shape)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != shakedropresnet20_cifar10 or weight_count == 272474)
assert (model != shakedropresnet20_cifar100 or weight_count == 278324)
assert (model != shakedropresnet20_svhn or weight_count == 272474)
x = mx.nd.zeros((14, 3, 32, 32), ctx=ctx)
# y = net(x)
with mx.autograd.record():
y = net(x)
y.backward()
assert (y.shape == (14, classes))
if __name__ == "__main__":
_test()
|
x509_pki/migrations/0010_auto_20211017_0936.py | repleo/bounca | 142 | 12759171 | # Generated by Django 3.2.7 on 2021-10-17 07:36
import django.db.models.deletion
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('x509_pki', '0009_auto_20211017_0921'),
]
operations = [
migrations.RemoveField(
model_name='keystore',
name='crl',
),
migrations.CreateModel(
name='CrlStore',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('crl', models.TextField(blank=True, null=True, verbose_name='Serialized CRL certificate')),
('certificate', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to='x509_pki.certificate')),
],
),
]
|
src/oci/log_analytics/models/log_analytics_em_bridge_summary_report.py | Manny27nyc/oci-python-sdk | 249 | 12759182 | # coding: utf-8
# Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401
from oci.decorators import init_model_state_from_kwargs
@init_model_state_from_kwargs
class LogAnalyticsEmBridgeSummaryReport(object):
"""
Log-Analytics EM Bridge counts summary.
"""
def __init__(self, **kwargs):
"""
Initializes a new LogAnalyticsEmBridgeSummaryReport object with values from keyword arguments.
The following keyword arguments are supported (corresponding to the getters/setters of this class):
:param compartment_id:
The value to assign to the compartment_id property of this LogAnalyticsEmBridgeSummaryReport.
:type compartment_id: str
:param active_em_bridge_count:
The value to assign to the active_em_bridge_count property of this LogAnalyticsEmBridgeSummaryReport.
:type active_em_bridge_count: int
:param creating_em_bridge_count:
The value to assign to the creating_em_bridge_count property of this LogAnalyticsEmBridgeSummaryReport.
:type creating_em_bridge_count: int
:param needs_attention_em_bridge_count:
The value to assign to the needs_attention_em_bridge_count property of this LogAnalyticsEmBridgeSummaryReport.
:type needs_attention_em_bridge_count: int
:param deleted_em_bridge_count:
The value to assign to the deleted_em_bridge_count property of this LogAnalyticsEmBridgeSummaryReport.
:type deleted_em_bridge_count: int
:param total_em_bridge_count:
The value to assign to the total_em_bridge_count property of this LogAnalyticsEmBridgeSummaryReport.
:type total_em_bridge_count: int
"""
self.swagger_types = {
'compartment_id': 'str',
'active_em_bridge_count': 'int',
'creating_em_bridge_count': 'int',
'needs_attention_em_bridge_count': 'int',
'deleted_em_bridge_count': 'int',
'total_em_bridge_count': 'int'
}
self.attribute_map = {
'compartment_id': 'compartmentId',
'active_em_bridge_count': 'activeEmBridgeCount',
'creating_em_bridge_count': 'creatingEmBridgeCount',
'needs_attention_em_bridge_count': 'needsAttentionEmBridgeCount',
'deleted_em_bridge_count': 'deletedEmBridgeCount',
'total_em_bridge_count': 'totalEmBridgeCount'
}
self._compartment_id = None
self._active_em_bridge_count = None
self._creating_em_bridge_count = None
self._needs_attention_em_bridge_count = None
self._deleted_em_bridge_count = None
self._total_em_bridge_count = None
@property
def compartment_id(self):
"""
**[Required]** Gets the compartment_id of this LogAnalyticsEmBridgeSummaryReport.
Compartment Identifier `OCID]`__.
__ https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm
:return: The compartment_id of this LogAnalyticsEmBridgeSummaryReport.
:rtype: str
"""
return self._compartment_id
@compartment_id.setter
def compartment_id(self, compartment_id):
"""
Sets the compartment_id of this LogAnalyticsEmBridgeSummaryReport.
Compartment Identifier `OCID]`__.
__ https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm
:param compartment_id: The compartment_id of this LogAnalyticsEmBridgeSummaryReport.
:type: str
"""
self._compartment_id = compartment_id
@property
def active_em_bridge_count(self):
"""
**[Required]** Gets the active_em_bridge_count of this LogAnalyticsEmBridgeSummaryReport.
Total number of ACTIVE enterprise manager bridges.
:return: The active_em_bridge_count of this LogAnalyticsEmBridgeSummaryReport.
:rtype: int
"""
return self._active_em_bridge_count
@active_em_bridge_count.setter
def active_em_bridge_count(self, active_em_bridge_count):
"""
Sets the active_em_bridge_count of this LogAnalyticsEmBridgeSummaryReport.
Total number of ACTIVE enterprise manager bridges.
:param active_em_bridge_count: The active_em_bridge_count of this LogAnalyticsEmBridgeSummaryReport.
:type: int
"""
self._active_em_bridge_count = active_em_bridge_count
@property
def creating_em_bridge_count(self):
"""
**[Required]** Gets the creating_em_bridge_count of this LogAnalyticsEmBridgeSummaryReport.
Number of enterprise manager bridges in CREATING state.
:return: The creating_em_bridge_count of this LogAnalyticsEmBridgeSummaryReport.
:rtype: int
"""
return self._creating_em_bridge_count
@creating_em_bridge_count.setter
def creating_em_bridge_count(self, creating_em_bridge_count):
"""
Sets the creating_em_bridge_count of this LogAnalyticsEmBridgeSummaryReport.
Number of enterprise manager bridges in CREATING state.
:param creating_em_bridge_count: The creating_em_bridge_count of this LogAnalyticsEmBridgeSummaryReport.
:type: int
"""
self._creating_em_bridge_count = creating_em_bridge_count
@property
def needs_attention_em_bridge_count(self):
"""
**[Required]** Gets the needs_attention_em_bridge_count of this LogAnalyticsEmBridgeSummaryReport.
Number of enterprise manager bridges in NEEDS_ATTENTION state.
:return: The needs_attention_em_bridge_count of this LogAnalyticsEmBridgeSummaryReport.
:rtype: int
"""
return self._needs_attention_em_bridge_count
@needs_attention_em_bridge_count.setter
def needs_attention_em_bridge_count(self, needs_attention_em_bridge_count):
"""
Sets the needs_attention_em_bridge_count of this LogAnalyticsEmBridgeSummaryReport.
Number of enterprise manager bridges in NEEDS_ATTENTION state.
:param needs_attention_em_bridge_count: The needs_attention_em_bridge_count of this LogAnalyticsEmBridgeSummaryReport.
:type: int
"""
self._needs_attention_em_bridge_count = needs_attention_em_bridge_count
@property
def deleted_em_bridge_count(self):
"""
**[Required]** Gets the deleted_em_bridge_count of this LogAnalyticsEmBridgeSummaryReport.
Number of enterprise manager bridges in DELETED state.
:return: The deleted_em_bridge_count of this LogAnalyticsEmBridgeSummaryReport.
:rtype: int
"""
return self._deleted_em_bridge_count
@deleted_em_bridge_count.setter
def deleted_em_bridge_count(self, deleted_em_bridge_count):
"""
Sets the deleted_em_bridge_count of this LogAnalyticsEmBridgeSummaryReport.
Number of enterprise manager bridges in DELETED state.
:param deleted_em_bridge_count: The deleted_em_bridge_count of this LogAnalyticsEmBridgeSummaryReport.
:type: int
"""
self._deleted_em_bridge_count = deleted_em_bridge_count
@property
def total_em_bridge_count(self):
"""
**[Required]** Gets the total_em_bridge_count of this LogAnalyticsEmBridgeSummaryReport.
Total number of enterprise manager bridges.
:return: The total_em_bridge_count of this LogAnalyticsEmBridgeSummaryReport.
:rtype: int
"""
return self._total_em_bridge_count
@total_em_bridge_count.setter
def total_em_bridge_count(self, total_em_bridge_count):
"""
Sets the total_em_bridge_count of this LogAnalyticsEmBridgeSummaryReport.
Total number of enterprise manager bridges.
:param total_em_bridge_count: The total_em_bridge_count of this LogAnalyticsEmBridgeSummaryReport.
:type: int
"""
self._total_em_bridge_count = total_em_bridge_count
def __repr__(self):
return formatted_flat_dict(self)
def __eq__(self, other):
if other is None:
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
|
nextgen/bcbio/distributed/lsf.py | bgruening/bcbb | 339 | 12759199 | <gh_stars>100-1000
"""Commandline interaction with LSF schedulers.
"""
import re
import subprocess
_jobid_pat = re.compile("Job <(?P<jobid>\d+)> is")
def submit_job(scheduler_args, command):
"""Submit a job to the scheduler, returning the supplied job ID.
"""
cl = ["bsub"] + scheduler_args + command
status = subprocess.check_output(cl)
match = _jobid_pat.search(status)
return match.groups("jobid")[0]
def stop_job(jobid):
cl = ["bkill", jobid]
subprocess.check_call(cl)
def are_running(jobids):
"""Check if all of the submitted job IDs are running.
"""
run_info = subprocess.check_output(["bjobs"])
running = []
for parts in (l.split() for l in run_info.split("\n") if l.strip()):
if len(parts) >= 3:
pid, _, status = parts[:3]
if status.lower() in ["run"]:
running.append(pid)
want_running = set(running).intersection(set(jobids))
return len(want_running) == len(jobids)
|
pronto/xref.py | flying-sheep/pronto | 182 | 12759205 | """Cross-reference object definition.
"""
import typing
import fastobo
from .utils.meta import roundrepr, typechecked
__all__ = ["Xref"]
@roundrepr
class Xref(object):
"""A cross-reference to another document or resource.
Cross-references (xrefs for short) can be used to back-up definitions of
entities, synonyms, or to link ontological entities to other resources
they may have been derived from. Although originally intended to provide
links to databases, cross-references in OBO ontologies gained additional
purposes, such as helping for header macros expansion, or being used to
alias external relationships with local unprefixed IDs.
The OBO format version 1.4 expects references to be proper OBO identifiers
that can be translated to actual IRIs, which is a breaking change from the
previous format. Therefore, cross-references are encouraged to be given as
plain IRIs or as prefixed IDs using an ID from the IDspace mapping defined
in the header.
Example:
A cross-reference in the Mammalian Phenotype ontology linking a term
to some related Web resource:
>>> mp = pronto.Ontology.from_obo_library("mp.obo")
>>> mp["MP:0030151"].name
'abnormal buccinator muscle morphology'
>>> mp["MP:0030151"].xrefs
frozenset({Xref('https://en.wikipedia.org/wiki/Buccinator_muscle')})
Caution:
`Xref` instances compare only using their identifiers; this means it
is not possible to have several cross-references with the same
identifier and different descriptions in the same set.
Todo:
Make sure to resolve header macros for xrefs expansion (such as
``treat-xrefs-as-is_a``) when creating an ontology, or provide a
method on `~pronto.Ontology` doing so when called.
"""
id: str
description: typing.Optional[str]
__slots__ = ("__weakref__", "id", "description") # noqa: E0602
@typechecked()
def __init__(self, id: str, description: typing.Optional[str] = None):
"""Create a new cross-reference.
Arguments:
id (str): the identifier of the cross-reference, either as a URL,
a prefixed identifier, or an unprefixed identifier.
description (str or None): a human-readable description of the
cross-reference, if any.
"""
# check the id is valid using fastobo
if not fastobo.id.is_valid(id):
raise ValueError("invalid identifier: {}".format(id))
self.id: str = id
self.description = description
def __eq__(self, other: object) -> bool:
if isinstance(other, Xref):
return self.id == other.id
return False
def __gt__(self, other: object) -> bool:
if isinstance(other, Xref):
return self.id > other.id
return NotImplemented
def __ge__(self, other: object) -> bool:
if isinstance(other, Xref):
return self.id >= other.id
return NotImplemented
def __lt__(self, other: object) -> bool:
if isinstance(other, Xref):
return self.id < other.id
return NotImplemented
def __le__(self, other: object) -> bool:
if isinstance(other, Xref):
return self.id <= other.id
return NotImplemented
def __hash__(self):
return hash(self.id)
|
blesuite/connection_manager.py | decidedlygray/BLESuite | 198 | 12759208 | from blesuite.pybt.roles import LECentral, LEPeripheral
from blesuite.pybt.core import Connection
from blesuite.pybt.gatt import UUID, AttributeDatabase, Server
from blesuite.pybt.gap import GAP
from blesuite.gatt_procedures import gatt_procedure_write_handle, gatt_procedure_write_handle_async, \
gatt_procedure_read_handle, gatt_procedure_read_handle_async, \
gatt_procedure_read_uuid, gatt_procedure_read_uuid_async, \
gatt_procedure_discover_primary_services, \
gatt_procedure_discover_secondary_services, \
gatt_procedure_discover_characteristics, \
gatt_procedure_discover_includes, \
gatt_procedure_discover_descriptors, gatt_procedure_prepare_write_handle, \
gatt_procedure_prepare_write_handle_async, gatt_procedure_execute_write, \
gatt_procedure_execute_write_async, gatt_procedure_write_command_handle, \
gatt_procedure_read_multiple_handles, \
gatt_procedure_read_multiple_handles_async, \
gatt_procedure_read_blob_handle, gatt_procedure_read_blob_handle_async
from blesuite.smart_scan import blesuite_smart_scan
from blesuite.entities.gatt_device import BLEDevice
from blesuite.event_handler import BTEventHandler
import logging
import gevent
import os
logger = logging.getLogger(__name__)
logger.addHandler(logging.NullHandler())
ROLE_CENTRAL = 0x00
ROLE_PERIPHERAL = 0x01
PUBLIC_DEVICE_ADDRESS = 0x00
RANDOM_DEVICE_ADDRESS = 0x01
class BLEConnection(object):
"""
BLEConnection is used to represent a connection between the BLEConnection manager
and a BLE device. This object is commonly returned to the user to represent a connection and is passed
to further BLEConnectionManager functions to interact with the connections.
:param address: The address of the peer BLEDevice that the HCI device is connected to.
:param address_type: The address type of the peer BLEDevice [Central = 0x00 | Peripheral = 0x01]
:param connection_handle: The connection handle used to interact with the associated peer BLE device.
:type address: str
:type address_type: int
:type connection_handle: int
"""
def __init__(self, address, address_type, connection_handle=None):
self.address = address
self.address_type = address_type
self.connection_handle = connection_handle
self.interval_min = None
self.interval_max = None
self.mtu = 23 # default as per spec
def __repr__(self):
return '<{} address={}, type={}>'.format(
self.__class__.__name__,
self.address,
{0: "random", 1: "public"}.get(self.address_type, "Unknown")
)
class BLEConnectionManager(object):
"""
BLEConnectionManager is used to manage connections to Bluetooth Low Energy Devices.
The connection manager is associated with an HCI device, such as a Bluetooth USB adapter,
and is responsible for creating the BLE stack and providing a user-friendly interface for
interacting with the BLE stack in order to send and receive packets.
:param adapter: BTLE adapter on host machine to use for connection (defaults to first found adapter). If left blank, the host's default adapter is used.
:param role: Type of role to create for the HCI device [central | peripheral]
:param our_address_type: Type of address for our Bluetooth Adapter. [public | random] (default: "public"). Note: We currently only support static random addresses, not resolvable or non-resolvable private addresses.
:param random_address: If our address type is set to random, supply a random address or one will be randomly generated ("AA:BB:CC:DD:EE:FF") (default: None)
:param psm: Specific PSM (default: 0)
:param mtu: Specific MTU (default: 23 as per spec BLUETOOTH SPECIFICATION Version 4.2 [Vol 3, Part G] 5.2.1)
:param gatt_server: GATT Server from pybt. Used to assign a custom blesuite.pybt.gatt Server object as the server for a peripheral. Alternatively, by default if the peripheral role is configured, a GATT Server object will be created with no services or characteristics that the user can add to through BLEConnectionManager class methods.
:param event_handler: BTEventHandler class instance that will be called when packets are received by the blesuite.pybt.core packet routing class (SocketHandler).
:param att_operation_event_hook: ATT operation hook functions triggered when the ATT server receives an ATT request
:param att_security_event_hook: ATT security hook functions triggered when the ATT server receives an ATT request and security checks are made
:type att_security_event_hook: blesuite.event_handler.ATTSecurityHook
:type att_operation_event_hook: blesuite.event_handler.ATTEventHook
:type adapter: int
:type role: str
:type our_address_type: str
:type random_address: str
:type psm: int
:type mtu: int
:type gatt_server: Server
:type event_handler: BTEventHandler
"""
def __init__(self, adapter, role, our_address_type="public", random_address=None,
psm=0, mtu=23, gatt_server=None, event_handler=None, att_operation_event_hook=None,
att_security_event_hook=None):
self.role_name = role
self.adapter = adapter
self.requester = None
self.responses = []
self.response_counter = 0
self.psm = psm
self.mtu = mtu
self.gatt_server = gatt_server
self.event_handler = event_handler
self.att_operation_event_hook = att_operation_event_hook
self.att_security_event_hook = att_security_event_hook
self.address = None
self.our_address_type_name = our_address_type
if self.our_address_type_name.lower() == "random":
self.our_address_type = RANDOM_DEVICE_ADDRESS
else:
self.our_address_type = PUBLIC_DEVICE_ADDRESS
if self.our_address_type == RANDOM_DEVICE_ADDRESS and random_address is None:
self.random_address = ':'.join(map(lambda x: x.encode('hex'), os.urandom(6)))
elif self.our_address_type == RANDOM_DEVICE_ADDRESS:
self.random_address = random_address
else:
self.random_address = None
self.central = None
self.stack_connection = None
self.connections = []
if role is 'central':
logger.debug("creating central")
self._create_central()
logger.debug("creating PyBT connection")
self._create_stack_connection(ROLE_CENTRAL)
logger.debug("creating listeners")
self._start_listeners()
elif role is 'peripheral':
logger.debug("creating peripheral role")
self._create_peripheral()
logger.debug("creating PyBT connection")
self._create_stack_connection(ROLE_PERIPHERAL)
logger.debug("creating listeners")
self._start_listeners()
else:
logger.error("Unknown role: %s" % role)
raise RuntimeError("Unknown role: %s" % role)
self.address = self.role.stack.addr
def __enter__(self):
return self
def __del__(self):
if self.stack_connection is not None:
for connection in self.connections:
if self.stack_connection.is_connected(connection.connection_handle):
self.stack_connection.disconnect(connection.connection_handle, 0x16)
self.stack_connection.destroy()
self.stack_connection = None
def __exit__(self, exc_type, exc_val, exc_tb):
logger.debug("Exiting bleConnectionManager. exc_type:%s exc_val:%s exc_tb:%s" % (exc_type, exc_val, exc_tb))
if self.stack_connection is not None:
self.stack_connection.destroy()
self.stack_connection = None
if self.role is not None:
self.role.destroy()
self.role = None
def _create_central(self):
if self.adapter is None:
self.role = LECentral(address_type=self.our_address_type, random=self.random_address,
att_operation_event_hook=self.att_operation_event_hook)
else:
self.role = LECentral(adapter=self.adapter, address_type=self.our_address_type, random=self.random_address,
att_operation_event_hook=self.att_operation_event_hook)
def _create_peripheral(self):
if self.gatt_server is None:
self.attribute_db = AttributeDatabase(event_handler=self.att_security_event_hook)
self.gatt_server = Server(self.attribute_db)
self.gatt_server.set_mtu(self.mtu)
if self.adapter is None:
self.role = LEPeripheral(self.gatt_server, mtu=self.mtu, address_type=self.our_address_type,
random=self.random_address,
att_operation_event_hook=self.att_operation_event_hook)
else:
self.role = LEPeripheral(self.gatt_server, adapter=self.adapter, mtu=self.mtu,
address_type=self.our_address_type, random=self.random_address,
att_operation_event_hook=self.att_operation_event_hook)
def _create_stack_connection(self, role_type):
if self.event_handler is None:
self.event_handler = BTEventHandler(self)
self.stack_connection = Connection(self.role, role_type, self.event_handler)
def _start_listeners(self):
self.stack_connection.start()
def get_address(self):
""" Get the address of the HCI device represented by the BLEConnectionManager.
:return: The HCI device address
:rtype: str
"""
return self.address
def get_discovered_devices(self):
"""
Get a dictionary of address seen during a scan and the associated advertising data.
:return: Dictionary of seen addresses and advertising data
:rtype: dict {"<address>":(<addressTypeInt>, "<advertisingData>")}
"""
return self.stack_connection.seen
def set_event_handler(self, event_class):
"""
Set the BTEventHandler for the pybt.core.SocketHandler class that will trigger when a Bluetooth Event
is received by the stack.
:param event_class: Event handler class instance.
:type event_class: BTEventHandler
:return: Success state
:rtype: bool
"""
logger.debug("Trying to set event handler")
self.event_handler = event_class
if self.stack_connection.socket_handler is not None:
logger.debug("Stack connection found, setting event handler")
self.stack_connection.set_event_handler(event_class)
return True
return False
def set_att_operation_hook(self, event_class):
"""
Set the ATTEventHook for the pybt.att.AttributeProtocol class that will trigger when an ATT operation
against the ATT database running locally is received.
:param event_class: ATT event class hook instance.
:type event_class: ATTEventHook
:return: Success state
:rtype: bool
"""
logger.debug("Trying to set ATT operation hook")
self.att_operation_event_hook = event_class
self.role.att.event_handler = self.att_operation_event_hook
return True
def set_att_security_hook(self, event_class):
"""
Set the ATTSecurityHook for the pybt.gatt.AttributeDatabase class that will trigger when a security
check against an ATT operation acting on the ATT database occurs. These checks cover encryption,
authentication, and authorization.
:param event_class: ATT security event hook class instance.
:type event_class: ATTSecurityHook
:return: Success state
:rtype: bool
"""
logger.debug("Trying to set ATT security hook")
self.att_security_event_hook = event_class
if self.gatt_server is None:
logger.debug("No GATT server running, setting security hook failed.")
return False
self.gatt_server.db.att_security_hooks = self.att_security_event_hook
return True
def is_connected(self, connection):
""" Return whether the specified connection is connected to the peer device.
:return: Return connection status
:rtype: bool
"""
return self.stack_connection.is_connected(connection.connection_handle)
def init_connection(self, address, address_type):
"""
Create BLEConnection object that represents the host's connection to a BLE peripheral.
:param address: BD_ADDR of target BLE Peripheral
:param address_type: Address type of target BLE Peripheral [public | random]
:type address: string
:type address_type: string
:return: Return BLEConnection object that is used in any communication function.
:rtype: BLEConnection
"""
address = address.upper()
if address_type == "public":
address_type = PUBLIC_DEVICE_ADDRESS
elif address_type == "private":
address_type = RANDOM_DEVICE_ADDRESS
ble_connection = BLEConnection(address, address_type)
self.connections.append(ble_connection)
return ble_connection
def get_bleconnection_from_connection_handle(self, connection_handle):
"""
Lookup a BLEConnection based on a supplied connection handle value.
:param connection_handle: Connection handle used to look up an existing BLEConnection
:type connection_handle: int
:return: BLEConnection or None
:rtype: BLEConnection or None
"""
for connection in self.connections:
if connection.connection_handle is not None and connection.connection_handle == connection_handle:
return connection
return None
def connect(self, ble_connection, timeout=15):
"""
Initiate a connection with a peer BLEDevice.
:param ble_connection: BLEConnection that represents the connection between our HCI device and the peer
:type ble_connection: BLEConnection
:param timeout: Connection timeout in seconds (default: 15)
:type timeout: int
:return: Connected status
:rtype: bool
"""
import time
start = time.time()
if not self.stack_connection.is_connected(ble_connection.connection_handle):
request = self.stack_connection.connect(ble_connection.connection_handle, ble_connection.address,
kind=ble_connection.address_type)
while not request.has_response():
if timeout is not None and time.time() - start >= timeout:
logger.debug("Connection failed: Connection timeout reached.")
return False
logger.debug("Is not connected")
gevent.sleep(1)
ble_connection.connection_handle = request.response.conn_handle
logger.debug("Connected")
return True
def disconnect(self, connection, reason=0x16):
"""
Disconnect from a peer BLE device.
:param connection: BLEConnection to disconnect
:type connection: BLEConnection
:param reason: The reason for the disconnection (default: 0x16 - Connection terminated by local host). Reasons defined in BLUETOOTH SPECIFICATION Version 5.0 | Vol 2, Part E page 777
:type reason: int
"""
self.stack_connection.disconnect(connection.connection_handle, reason)
def pair(self, ble_connection, timeout=15):
"""
Initiate pairing with a peer BLE device. This method is blocking and will wait
until a paired connection is received, pairing fails, or the timeout is reached.
If custom pairing request parameters are required, configure
the parameters prior to calling this function.
:param ble_connection: The BLEConnection to initiate pairing on
:type ble_connection: BLEConnection
:param timeout: Pairing timeout in seconds (default: 15)
:type timeout: int
:return: Pairing status
:rtype: bool
"""
import time
self.initiate_pairing(ble_connection)
start = time.time()
while not self.role.smp.get_connection_encryption_status(ble_connection.connection_handle):
if self.role.smp.did_pairing_fail(ble_connection.address):
logger.debug("Pairing Failed")
return False
if timeout is not None and time.time() - start >= timeout:
return False
logger.debug("Pairing in progress. Pairing Failed: %s " % self.role.smp.did_pairing_fail(ble_connection.address))
gevent.sleep(1)
logger.debug("Paired")
return True
def initiate_pairing(self, ble_connection):
"""
Send pairing request to peer device. This is meant as an asynchronous way for a user to initiate pairing
and manage the connection while waiting for the pairing process to complete. Use BLEConnectionManager.pair
for a synchronous pairing procedure.
:param ble_connection: The BLEConnection to initiate pairing on
:type ble_connection: BLEConnection
:return:
:rtype:
"""
if not self.is_connected(ble_connection):
self.connect(ble_connection)
self.role.smp.send_pairing_request(ble_connection.address, ble_connection.connection_handle)
def is_pairing_in_progress(self, ble_connection):
"""
Retrieve pairing status of BLEConnection
:param ble_connection: The BLEConnection to view the pairing status of
:type ble_connection: BLEConnection
:return: Status of BLE pairing
:rtype: bool
"""
return self.role.smp.is_pairing_in_progress(ble_connection.address)
def did_pairing_fail(self, ble_connection):
"""
Lookup whether a pairing failed status was triggered
:param ble_connection: The BLEConnection to check for a pairing failure
:type ble_connection: BLEConnection
:return: Pairing failure status (True means failure was triggered)
:rtype: bool
"""
return self.role.smp.did_pairing_fail(ble_connection.address)
def is_connection_encrypted(self, ble_connection):
"""
Retrieve BLEConnection encryption status
:param ble_connection: The BLEConnection to check the encryption status of
:type ble_connection: BLEConnection
:return: Encryption status
:rtype: bool
"""
return self.role.smp.get_connection_encryption_status(ble_connection.connection_handle)
def resume_connection_encryption(self, ble_connection):
"""
Initiate BLEConnection encryption with encryption keys present in the Security Manager's LongTermKeyDatabase.
Encryption key look-up is done based on the address of the peer device's address.
:param ble_connection: The BLEConnection to resume encryption on
:type ble_connection: BLEConnection
:return: Result of encryption initiation with existing keys (True if encryption initiation was successfully start, False if encryption keys were not found)
:rtype: bool
"""
result = self.role.smp.initiate_encryption_with_existing_keys(ble_connection.address,
ble_connection.address_type,
ble_connection.connection_handle, self.address,
self.our_address_type, self.role)
return result
def get_security_manager_long_term_key_database(self):
"""
Retrieve the LongTermKeyDatabase from the Security Manager
:return: LongTermKeyDatabase from the Security Manager
:rtype: blesuite.pybt.sm.LongTermKeyDatabase
"""
return self.role.smp.long_term_key_db
def add_key_to_security_manager_long_term_key_database(self, address, address_type, ltk, ediv, rand, irk, csrk, security_mode,
security_level):
"""
Add an entry to the LongTermKeyDatabase that will be used for encryption key lookups when encryption
on a BLEConnection is initiated
:param address: Address of peer device (byte form, big-endian)
:type address: str
:param address_type: Address type of peer device
:type address_type: int
:param ltk: Long term key for peer (big-endian)
:type ltk: str
:param ediv: EDIV for peer. Required for LE Legacy encryption resumption
:type ediv: int
:param rand: Encryption Random for peer (big-endian). Required for LE Legacy encryption resumption
:type rand: str
:param irk: IRK for peer (big-endian)
:type irk: str
:param csrk: CSRK for peer
:type csrk: str
:param security_mode: Security mode associated with encryption keys. This mode will be applied to a connection encrypted with these keys.
:type security_mode: int
:param security_level: Security level associated with encryption keys. This level will be applied to a connection encrypted with these keys.
:type security_level: int
:return:
:rtype:
"""
self.role.smp.long_term_key_db.add_long_term_key_entry(address, address_type,
ltk, ediv, rand, irk, csrk, security_mode,
security_level)
def export_security_manager_long_term_key_database_for_storage(self):
"""
Export Security Manager LongTermKeyDatabase as a list of dictionary containing BLE
encryption properties (LTK, EDIV, random,
CSRK, IRK, security mode, security level) with integers and hex encoded strings
:return: LongTermKeyDatabase as a list of dictionaries with integers and hex encoded strings (user-friendly exportable version)
:rtype: dict
"""
ltk_db = self.role.smp.long_term_key_db.get_long_term_key_database()
for entry in ltk_db:
temp = entry['address']
if temp is not None:
temp = temp.encode('hex')
entry['address'] = temp
temp = entry['ltk']
if temp is not None:
temp = temp.encode('hex')
entry['ltk'] = temp
temp = entry['rand']
if temp is not None:
temp = temp.encode('hex')
entry['rand'] = temp
temp = entry['irk']
if temp is not None:
temp = temp.encode('hex')
entry['irk'] = temp
temp = entry['csrk']
if temp is not None:
temp = temp.encode('hex')
entry['csrk'] = temp
return ltk_db
def import_long_term_key_database_to_security_manager(self, long_term_key_database):
"""
Import LongTermKeyDatabase and apply it to the Security Manager. Import database format is identical
to the LongTermKeyDatabase export format with integers and hex encoded strings. The function will perform
some input validation to ensure proper encoding and value types.
:param long_term_key_database: List of dictionaries of LongTermKeyDatabase entries with integers and hex encoded strings
:type long_term_key_database: list of dict
:return:
:rtype:
"""
import blesuite.utils.validators as validator
for entry in long_term_key_database:
keys = entry.keys()
if 'address' in keys:
peer_address = entry['address'].decode('hex')
else:
peer_address = "00" * 6
if 'address_type' in keys:
peer_address_type = entry['address_type']
else:
peer_address_type = 0
if 'ltk' in keys:
ltk = validator.validate_ltk(entry['ltk']).decode('hex')
else:
raise validator.InvalidSMLTK(None)
if 'ediv' in keys:
ediv = entry['ediv']
else:
ediv = 0
if 'rand' in keys:
rand = validator.validate_rand(entry['rand']).decode('hex')
else:
rand = '\x00' * 8
if 'irk' in keys:
irk = validator.validate_irk(entry['irk']).decode('hex')
else:
irk = '\x00' * 16
if 'csrk' in keys:
csrk = validator.validate_csrk(entry['csrk']).decode('hex')
else:
csrk = '\x00' * 16
if 'security_mode' in keys:
mode = entry['security_mode']
else:
mode = 1
if 'security_level' in keys:
level = entry['security_level']
else:
level = 1
mode, level = validator.validate_att_security_mode(mode, level)
self.role.smp.long_term_key_db.add_long_term_key_entry(peer_address, peer_address_type, ltk, ediv, rand,
irk, csrk, mode, level)
def get_security_manager_protocol_default_pairing_parameters(self):
"""
Get the default pairing parameters that will be applied to Security Managers by default.
The pairing parameters are used by the devices to determine the type of pairing to use, the temporary key
sharing method (association model), and which keys will be exchanged when pairing is complete (if any).
See BLUETOOTH SPECIFICATION Version 5.0 | Vol 3, Part H
page 2340 - 2342 for more details.
(Security Managers are created per BLE connection and can be modified independently)
:return: {io_cap, oob, mitm, bond, lesc, keypress, ct2, rfu, max_key_size, initiator_key_distribution, responder_key_distribution}
:rtype: dict
"""
return self.role.smp.get_default_pairing_parameters()
def set_security_manager_protocol_default_pairing_parameters(self, default_io_cap=0x03, default_oob=0x00,
default_mitm=0x00,
default_bond=0x01, default_lesc=0x00,
default_keypress=0x00,
default_ct2=0x01, default_rfu=0x00,
default_max_key_size=16,
default_initiator_key_distribution=0x01,
default_responder_key_distribution=0x01):
"""
Set the default pairing parameters that will be applied to Security Managers by default.
The pairing parameters are used by the devices to determine the type of pairing to use, the temporary key
sharing method (association model), and which keys will be exchanged when pairing is complete (if any).
See BLUETOOTH SPECIFICATION Version 5.0 | Vol 3, Part H
page 2340 - 2342 for more details.
(Security Managers are created per BLE connection and can be modified independently)
:param default_io_cap: IO Capabilities (default: 0x03 - No Input, No Output)
:type default_io_cap: int
:param default_oob: Out-of-band Data present and available (default: 0x00)
:type default_oob: int
:param default_mitm: Request man-in-the-middle pairing protections (default: 0x01)
:type default_mitm: int
:param default_bond: Request bonding (default: 0x01)
:type default_bond: int
:param default_lesc: LE Secure Connections supported (default: 0x00)
:type default_lesc: int
:param default_keypress: Keypress notifications (default: 0x00)
:type default_keypress: int
:param default_ct2: CT2 (default: 0x01)
:type default_ct2: int
:param default_rfu: Reserved for future use bits (default: 0x00)
:type default_rfu: int
:param default_max_key_size: Max encryption key size (default: 16)
:type default_max_key_size: int
:param default_initiator_key_distribution: Requested keys to be sent by the initiator (central) (default: 0x01)
:type default_initiator_key_distribution: int
:param default_responder_key_distribution: Requested keys to be sent by the responder (peripheral) (default: 0x01)
:type default_responder_key_distribution: int
:return:
:rtype:
"""
self.role.smp.set_default_pairing_parameters(default_io_cap, default_oob, default_mitm, default_bond,
default_lesc, default_keypress, default_ct2, default_rfu,
default_max_key_size, default_initiator_key_distribution,
default_responder_key_distribution)
def get_security_manager_protocol_pairing_parameters_for_connection(self, ble_connection):
"""
Get the default pairing parameters for the Security Manager associated with a BLEConnection (based on the
peer address).
The pairing parameters are used by the devices to determine the type of pairing to use, the temporary key
sharing method (association model), and which keys will be exchanged when pairing is complete (if any).
See BLUETOOTH SPECIFICATION Version 5.0 | Vol 3, Part H
page 2340 - 2342 for more details.
:param ble_connection: BLEConnection to modify Security Manager pairing parameters of
:type ble_connection: BLEConnection
:return: {io_cap, oob, mitm, bond, lesc, keypress, ct2, rfu, max_key_size, initiator_key_distribution, responder_key_distribution}
:rtype: dict
"""
return self.role.smp.get_pairing_parameters_for_connection(ble_connection.address)
def set_security_manager_protocol_pairing_parameters_for_connection(self, ble_connection, io_cap=0x03, oob=0x00,
mitm=0x00,
bond=0x01, lesc=0x00, keypress=0x0, ct2=0x01,
rfu=0x00, max_key_size=16,
initiator_key_distribution=0x01,
responder_key_distribution=0x01):
"""
Set the default pairing parameters for the Security Manager associated with a BLEConnection (based on the
peer address).
The pairing parameters are used by the devices to determine the type of pairing to use, the temporary key
sharing method (association model), and which keys will be exchanged when pairing is complete (if any).
See BLUETOOTH SPECIFICATION Version 5.0 | Vol 3, Part H
page 2340 - 2342 for more details.
:param ble_connection: BLEConnection to modify Security Manager pairing parameters of
:type ble_connection: BLEConnection
:param io_cap: IO Capabilities (default: 0x03 - No Input, No Output)
:type io_cap: int
:param oob: Out-of-band Data present and available (default: 0x00)
:type oob: int
:param mitm: Request man-in-the-middle pairing protections (default: 0x01)
:type mitm: int
:param bond: Request bonding (default: 0x01)
:type bond: int
:param lesc: LE Secure Connections supported (default: 0x00)
:type lesc: int
:param keypress: Keypress notifications (default: 0x00)
:type keypress: int
:param ct2: CT2 (default: 0x01)
:type ct2: int
:param rfu: Reserved for future use bits (default: 0x00)
:type rfu: int
:param max_key_size: Max encryption key size (default: 16)
:type max_key_size: int
:param initiator_key_distribution: Requested keys to be sent by the initiator (central) (default: 0x01)
:type initiator_key_distribution: int
:param responder_key_distribution: Requested keys to be sent by the responder (peripheral) (default: 0x01)
:type responder_key_distribution: int
:return: Success status of pairing parameter configuration (False is returned if BLEConnection does not have a valid connection or a security manager set)
:rtype: bool
"""
return self.role.smp.set_pairing_parameters_for_connection(ble_connection.address, io_cap, oob, mitm,
bond, lesc, keypress, ct2, rfu, max_key_size,
initiator_key_distribution,
responder_key_distribution)
def decode_gap_data(self, data):
"""
Decode GAP data into GAP class object
:param data: GAP binary data
:type data: str
:return: GAP object containing the GAP data that has been parsed
:rtype: blesuite.pybt.gap.GAP
"""
gap = GAP()
try:
gap.decode(data)
except Exception as e:
if "Data too short" in str(e):
logger.debug("Data too short, leaving off malformed data")
else:
raise e
return gap
def generate_gap_data_dict(self, gap):
"""
Generates a dictionary of user-friendly strings that describe the GAP data in the supplied GAP object.
:param gap: GAP object to retrieve data from
:type gap: blesuite.pybt.gap.GAP
:return: Dictionary of readable strings that represent the GAP data stored in the object
:rtype: dict
"""
return gap.gap_dict()
# Scanning/Discovery Functions
def scan(self, timeout):
"""
Carry-out BLE scan for the specified timeout and return discovered devices.
:param timeout: Scan timeout in seconds
:type timeout: int
:return: Discovered devices
:rtype: dict
"""
import time
self.start_scan()
start = time.time() * 1000
logger.debug("Starting sleep loop")
# comparing time in ms
while ((time.time() * 1000) - start) < timeout:
logger.debug("Scanning...")
gevent.sleep(1)
self.stop_scan()
logger.debug("Done scanning!")
discovered_devices = self.get_discovered_devices()
return discovered_devices
def start_scan(self):
"""
Enable scanning on HCI device.
:return:
:rtype:
"""
self.stack_connection.scan("on")
def stop_scan(self):
"""
Stop scanning on HCI device
:return:
:rtype:
"""
self.stack_connection.scan("off")
def advertise_and_wait_for_connection(self):
"""
Begin advertising with the HCI device and wait for a connection to be established.
:return: Status of connection with a peer device and the BLEConnection
:rtype: tuple - bool, (BLEConnection | None)
"""
self.start_advertising()
while self.is_advertising():
gevent.sleep(1)
if len(self.stack_connection.connection_statuses.keys()) > 0:
connection_handle = self.stack_connection.connection_statuses.keys()[0]
peer_address = self.stack_connection.peer_addresses_by_connection_handle[connection_handle]
peer_address_type = self.stack_connection.connected_addr_type_by_connection_handle[connection_handle]
return True, BLEConnection(peer_address, peer_address_type, connection_handle=connection_handle)
else:
logger.error("Advertising stopped and no connections are present. Something went wrong.")
return False, None
def start_advertising(self):
"""
Enable advertising on HCI device.
:return:
:rtype:
"""
self.stack_connection.start_advertising()
def stop_advertising(self):
"""
Disable advertising on HCI device.
:return:
:rtype:
"""
self.stack_connection.stop_advertising()
def is_advertising(self):
"""
Retrieve advertising status of HCI device.
:return: Status of advertising
:rtype: bool
"""
return self.stack_connection.is_advertising()
def set_advertising_data(self, data):
"""
Set advertising data.
:param data: Data to include in advertising packets
:type data: str
:return:
:rtype:
"""
self.stack_connection.set_advertising_data(data)
def set_scan_response_data(self, data):
"""
Set scan response data.
:param data: Data to return when a scan packet is received.
:type data: str
:return:
:rtype:
"""
self.stack_connection.set_scan_response_data(data)
def set_advertising_parameters(self, advertisement_type, channel_map, interval_min, interval_max,
destination_addr, destination_addr_type):
"""
Set advertising parameters. See: BLUETOOTH SPECIFICATION Version 5.0 | Vol 2, Part E page 1251
:param advertisement_type: Advertising packet type (see blesuite.utils.GAP_ADV_TYPES)
:type advertisement_type: int
:param channel_map: Bit field that indicates the advertising channels to use. (Channel 37 - 0x01, Channel 38 - 0x02, Channel 39 - 0x04, all channels - 0x07)
:type channel_map: int
:param interval_min: Minimum advertising interval for undirected and low duty cycle directed advertising. (Range 0x00020 - 0x4000, default 0x0800 or 1.28 seconds. Time conversion = interval * 0.625ms)
:type interval_min: int
:param interval_max: Maximum advertising interval for undirected and low duty cycle directed advertising. (Range 0x00020 - 0x4000, default 0x0800 or 1.28 seconds. Time conversion = interval * 0.625ms)
:type interval_max: int
:param destination_addr: Destination address for directed advertising (set to 00:00:00:00:00:00 if using undirected advertising)
:type destination_addr: str
:param destination_addr_type: Destination address type (set to 0x00 if using undirected advertising)
:type destination_addr_type: int
:return:
:rtype:
"""
self.stack_connection.set_advertising_parameters(advertisement_type, channel_map, interval_min, interval_max,
destination_addr, destination_addr_type)
def set_local_name(self, name, enforce_null_termination=True):
"""
Set the local name of the HCI device. (Bluetooth Spec says the value needs to be null terminated. If it is
intended to write a string that is not null terminated, then set the enforcement flag to False).
:param name: Local name to write to HCI device
:type name: str
:param enforce_null_termination: Flag to enforce null termination (default: True)
:type enforce_null_termination: bool
:return:
:rtype:
"""
if enforce_null_termination:
if len(name) != 248:
padding = 248 - len(name)
name = name + ('\0' * padding)
self.stack_connection.set_local_name(name)
def get_gatt_server(self):
"""
Retrieve the GATT server for the BLEConnectionManager instance.
:return: GATT Server
:rtype: blesuite.pybt.gatt.Server
"""
return self.gatt_server
def set_server_mtu(self, mtu):
"""
Configures the MTU (max transmission unit) on the GATT server and ATT class instance. MTU is used
to restrict the size of data the stack returns in ATT packets. Note: The MTU used by the class
is determined by the MTUs exchanged by both connected BLE devices (uses the minimum value of the
exchanged MTUs).
:param mtu: MTU size in bytes (Bluetooth Spec default is 23 bytes)
:type mtu: int
:return:
:rtype:
"""
self.mtu = mtu
self.role.att.set_mtu(mtu)
def get_server_mtu(self):
"""
Returns the MTU size from the GATT server.
:return: GATT server MTU (bytes)
:rtype: int
"""
if self.role.att.gatt_server is not None:
return self.role.att.gatt_server.mtu
def initialize_gatt_server_from_ble_device(self, ble_device, use_handles_from_ble_device=False):
"""
Initializes the GATT server based on a supplied BLEDevice entity. All services, includes, characteristics,
and descriptors are retrieved from the BLEDevice entity and added to the GATT server using the
properties and permissions configured in the BLEDevice object.
:param ble_device: BLEDevice object to replicate with the GATT server
:type ble_device: BLEDevice
:param use_handles_from_ble_device: Flag to indicate that the GATT server should use the attribute handles specified in each BLE entity withhin the BLEDevice. If set to false (default), then the GATT server will automatically assign handles in the order that entites are added to the server.
:type use_handles_from_ble_device: bool
:return:
:rtype:
"""
from pybt.gatt import GATTService, GATTCharacteristic, GATTCharacteristicDescriptorDeclaration,\
GATTInclude, UUID
if self.gatt_server is None:
att_db = AttributeDatabase()
self.gatt_server = Server(att_db)
self.gatt_server.set_mtu(self.mtu)
for service in ble_device.get_services():
gatt_service = GATTService(UUID(service.attribute_type), UUID(service.uuid))
gatt_service.start = service.start
gatt_service.end = service.end
gatt_service.handle = service.start
for incl in service.get_includes():
include_1 = GATTInclude(incl.included_service_att_handle, incl.included_service_end_group_handle,
UUID(incl.included_service_uuid),
incl.include_definition_attribute_properties,
incl.include_definition_attribute_read_permission,
incl.include_definition_attribute_write_permission,
incl.include_definition_attribute_require_authorization)
include_1.handle = incl.handle
gatt_service.add_include(include_1)
for characteristic in service.get_characteristics():
# create general characteristic (note: this method doesn't apply permissions and properties to the
# characteristic declaration descriptor)
characteristic_1 = GATTCharacteristic(characteristic.value, characteristic.gatt_properties,
UUID(characteristic.uuid),
characteristic.characteristic_value_attribute_properties,
characteristic.characteristic_value_attribute_read_permission,
characteristic.characteristic_value_attribute_write_permission,
characteristic.characteristic_value_attribute_require_authorization)
# update characteristic declaration descriptor with configured permissions and authz
characteristic_1.declaration.attribute_properties = characteristic.characteristic_definition_attribute_properties
characteristic_1.declaration.attribute_read_permission = characteristic.characteristic_definition_attribute_read_permission
characteristic_1.declaration.attribute_write_permission = characteristic.characteristic_definition_attribute_write_permission
characteristic_1.declaration.require_authorization = characteristic.characteristic_definition_attribute_require_authorization
characteristic_1.declaration.handle = characteristic.handle
characteristic_1.declaration.value_attribute_handle = characteristic.value_handle
characteristic_1.value_declaration.handle = characteristic.value_handle
for descriptor in characteristic.get_descriptors():
# characteristic declaration is already created when we created the characteristic attribute
if descriptor.type == 0x2803:
pass
descriptor_1 = GATTCharacteristicDescriptorDeclaration(UUID(descriptor.uuid),
descriptor.value,
descriptor.characteristic_descriptor_attribute_properties,
descriptor.characteristic_descriptor_attribute_read_permission,
descriptor.characteristic_descriptor_attribute_write_permission,
descriptor.characteristic_descriptor_attribute_require_authorization)
descriptor_1.handle = descriptor.handle
characteristic_1.add_descriptor(descriptor_1)
gatt_service.add_characteristic(characteristic_1)
self.gatt_server.add_service(gatt_service)
self.gatt_server.refresh_database(calculate_handles=(not use_handles_from_ble_device))
def set_extended_inquiry_response(self, fec_required=0, formatted_eir_data=None):
"""
Set the extended inquiry response on the HCI device.
:param fec_required: FEC required (default: 0)
:type fec_required: 0
:param formatted_eir_data: Formatted extended inquiry response data (default: None)
:type formatted_eir_data: str
:return:
:rtype:
"""
self.stack_connection.set_eir_response(fec_required=fec_required, formatted_eir_data=formatted_eir_data)
def read_remote_used_features(self, connection):
"""
Issues a read remote used features command to the connected peer device.
:param connection: BLEConnection of target connection
:type connection: BLEConnection
:return:
:rtype:
"""
self.stack_connection.read_remote_used_features(connection.connection_handle)
return
# ATT Packets / GATT Procedures
def exchange_mtu(self, connection, mtu, timeout=15 * 1000):
"""
Sends Exchange MTU packet using the supplied BLEConnection object
and returns a GATTRequest object containing the request or any received errors.
Synchronous method. Note: Sending this packet as a peripheral will not
change the MTU configured on the GATT server.
:param connection: BLEConnection with connection to target device
:param mtu: Desired MTU (bytes)
:param timeout: Timeout for exhange MTU response (in milliseconds)
:type connection: BLEConnection
:type mtu: int
:rtype: blesuite.pybt.core.GATTRequest
"""
request = self.stack_connection.exchange_mtu_sync(mtu, connection.connection_handle, timeout=timeout)
if request.has_error():
logger.debug("Exchange MTU Response Error")
else:
logger.debug("Exchange MTU Response Data(str): %s" % request.response.data)
if not request.has_error() and request.has_response():
connection.mtu = mtu
return request
def gatt_discover_primary_services(self, connection, device=None):
"""
Discover primary GATT services of a peer GATT server and populate (or generate) a BLEDevice object
with the discovered entities.
:param connection: BLEConnection with the connected GATT server
:type connection: BLEConnection
:param device: BLEDevice to populate. If None is supplied (default) a new BLEDevice object with the discovered entities will be added.
:type device: BLEDevice
:return: Populated BLEDevice
:rtype: BLEDevice
"""
if device is None:
device = BLEDevice(connection.address)
return gatt_procedure_discover_primary_services(self, connection, device)
def gatt_discover_secondary_services(self, connection, device=None):
"""
Discover secondary GATT services of a peer GATT server and populate (or generate) a BLEDevice object
with the discovered entities.
:param connection: BLEConnection with the connected GATT server
:type connection: BLEConnection
:param device: BLEDevice to populate. If None is supplied (default) a new BLEDevice object with the discovered entities will be added.
:type device: BLEDevice
:return: Populated BLEDevice
:rtype: BLEDevice
"""
if device is None:
device = BLEDevice(connection.address)
return gatt_procedure_discover_secondary_services(self, connection, device)
def gatt_discover_characteristics(self, connection, device=None):
"""
Discover GATT characteristics of a peer GATT server and populate (or generate) a BLEDevice object
with the discovered entities.
:param connection: BLEConnection with the connected GATT server
:type connection: BLEConnection
:param device: BLEDevice to populate. If None is supplied (default) a new BLEDevice object with the discovered entities will be added.
:type device: BLEDevice
:return: Populated BLEDevice
:rtype: BLEDevice
"""
if device is None:
device = BLEDevice(connection.address)
return gatt_procedure_discover_characteristics(self, connection, device)
def gatt_discover_includes(self, connection, device=None):
"""
Discover GATT service includes of a peer GATT server and populate (or generate) a BLEDevice object
with the discovered entities.
:param connection: BLEConnection with the connected GATT server
:type connection: BLEConnection
:param device: BLEDevice to populate. If None is supplied (default) a new BLEDevice object with the discovered entities will be added.
:type device: BLEDevice
:return: Populated BLEDevice
:rtype: BLEDevice
"""
if device is None:
device = BLEDevice(connection.address)
return gatt_procedure_discover_includes(self, connection, device)
def gatt_discover_descriptors(self, connection, device):
"""
Discover GATT characteristic descriptors of a peer GATT server and populate (or generate) a BLEDevice object
with the discovered entities.
:param connection: BLEConnection with the connected GATT server
:type connection: BLEConnection
:param device: BLEDevice to populate. If None is supplied (default) a new BLEDevice object with the discovered entities will be added.
:type device: BLEDevice
:return: Populated BLEDevice
:rtype: BLEDevice
"""
return gatt_procedure_discover_descriptors(self, connection, device)
def smart_scan(self, connection, device=None, look_for_device_info=True, attempt_desc_read=False,
timeout=15 * 1000):
"""
Initiate a BLE Smart Scan, which is an all inclusive way to scan a BLE peripheral for all
services, includes, characteristics, and descriptors. The scan can also attempt to reach from each
attribute handle discovered during the scan (regardless of GATT properties returned by the server) in
order to quickly view data exposed by the device.
:param connection: BLEConnection with the connected GATT server
:type connection: BLEConnection
:param device: BLEDevice to populate. If None is supplied (default) a new BLEDevice object with the discovered entities will be added.
:type device: BLEDevice
:param look_for_device_info: Flag to indicate the scan should scan for several basic types of information based on UUIDs defined by the Bluetooth Special Interest Group (default: True)
:type look_for_device_info: bool
:param attempt_desc_read: Flag to indicate the scan should attempt to read from each attribute discovered during the scan (default: False). Note: This may significantly slow down the scan. If the target peripheral disconnects, the scan will attempt to reconnect to the server.
:type attempt_desc_read: bool
:param timeout: Request timeout (milliseconds)
:type timeout: int
:return: Populated BLEDevice
:rtype: BLEDevice
"""
if device is None:
device = BLEDevice(connection.address)
return blesuite_smart_scan(self, connection, device, look_for_device_info=look_for_device_info,
attempt_desc_read=attempt_desc_read, timeout=timeout)
def gatt_write_handle(self, connection, handle, data, timeout=15 * 1000):
"""
Send an ATT Write request to the peer device associated with the supplied BLEConnection, attribute
handle, and data. This is a synchronous call that will wait for either a successful response, error response,
or the specified timeout (milliseconds) to be reached.
:param connection: BLEConnection with the connected GATT server
:type connection: BLEConnection
:param handle: Attribute handle of target attribute (0x01 - 0xFFFF)
:type handle: int
:param data: Data to place in ATT write request.
:type data: str
:param timeout: Request timeout (milliseconds)
:type timeout: int
:return: GATTRequest that contains the GATTResponse or GATTError result
:rtype: blesuite.pybt.core.GATTRequest
"""
return gatt_procedure_write_handle(self.stack_connection, connection.connection_handle, handle,
data, timeout=timeout)
def gatt_write_handle_async(self, connection, handle, data, timeout=15 * 1000):
"""
Send an ATT Write request to the peer device associated with the supplied BLEConnection, attribute
handle, and data. This is an asynchronous call that will send the request to the peer device and
return a GATTRequest object that can be monitored for a GATTResponse or GATTError (either through a valid
peer response, peer error response, or timeout error triggering).
:param connection: BLEConnection with the connected GATT server
:type connection: BLEConnection
:param handle: Attribute handle of target attribute (0x01 - 0xFFFF)
:type handle: int
:param data: Data to place in ATT write request.
:type data: str
:param timeout: Request timeout (milliseconds)
:type timeout: int
:return: GATTRequest that contains the GATTResponse or GATTError result
:rtype: blesuite.pybt.core.GATTRequest
"""
return gatt_procedure_write_handle_async(self.stack_connection, connection.connection_handle, handle, data,
timeout=timeout)
def gatt_write_command_handle(self, connection, handle, data):
"""
Send an ATT Write Command request to the peer device associated with the supplied BLEConnection, attribute
handle, and data. This is an asynchronous call that will send the request to the peer device. No GATTRequest
will be generated since this command should not ever receive a response from the peer.
:param connection: BLEConnection with the connected GATT server
:type connection: BLEConnection
:param handle: Attribute handle of target attribute (0x01 - 0xFFFF)
:type handle: int
:param data: Data to place in ATT write request.
:type data: str
:param timeout: Request timeout (milliseconds)
:type timeout: int
"""
gatt_procedure_write_command_handle(self.stack_connection, connection.connection_handle, handle, data)
def gatt_prepare_write_handle(self, connection, handle, data, offset, timeout=15 * 1000):
"""
Send an ATT Prepare Write request to the peer device associated with the supplied BLEConnection, attribute
handle, offset, and data. This is a synchronous call that will wait for either a successful response,
error response,
or the specified timeout (milliseconds) to be reached.
Note: Prepare write is used in conjunction with execute write to write a large set of data.
The user will send a series of prepare
write requests with data and the correct offsets to set a large value for a write operation. An execute
write request will then be issued to carry out the write. (Permission / Auth checks should happen on the
prepare write request).
:param connection: BLEConnection with the connected GATT server
:type connection: BLEConnection
:param handle: Attribute handle of target attribute (0x01 - 0xFFFF)
:type handle: int
:param data: Data to place in ATT write request.
:type data: str
:param offset: Offset to write the data
:type offset: int
:param timeout: Request timeout (milliseconds)
:type timeout: int
:return: GATTRequest that contains the GATTResponse or GATTError result
:rtype: blesuite.pybt.core.GATTRequest
"""
return gatt_procedure_prepare_write_handle(self.stack_connection, connection.connection_handle, handle,
data, offset, timeout=timeout)
def gatt_prepare_write_handle_async(self, connection, handle, data, offset, timeout=15 * 1000):
"""
Send an ATT Prepare Write request to the peer device associated with the supplied BLEConnection, attribute
handle, offset, and data. This is an asynchronous call that will send the request to the peer device and
return a GATTRequest object that can be monitored for a GATTResponse or GATTError (either through a valid
peer response, peer error response, or timeout error triggering).
Note: Prepare write is used in conjunction with execute write to write a large set of data.
The user will send a series of prepare
write requests with data and the correct offsets to set a large value for a write operation. An execute
write request will then be issued to carry out the write. (Permission / Auth checks should happen on the
prepare write request).
:param connection: BLEConnection with the connected GATT server
:type connection: BLEConnection
:param handle: Attribute handle of target attribute (0x01 - 0xFFFF)
:type handle: int
:param data: Data to place in ATT write request.
:type data: str
:param offset: Offset to write the data
:type offset: int
:param timeout: Request timeout (milliseconds)
:type timeout: int
:return: GATTRequest that contains the GATTResponse or GATTError result
:rtype: blesuite.pybt.core.GATTRequest
"""
return gatt_procedure_prepare_write_handle_async(self.stack_connection, connection.connection_handle,
handle, data, offset, timeout=timeout)
def gatt_execute_write(self, connection, flags, timeout=15 * 1000):
"""
Send an ATT Execute Write request to the peer device associated with the supplied BLEConnection and flag.
This is a synchronous call that will wait for either a successful response, error response,
or the specified timeout (milliseconds) to be reached.
Note: Execute write is used in conjunction with prepare write
to write a large set of data. The user will send a series of prepare
write requests with data and the correct offsets to set a large value for a write operation. An execute
write request will then be issued to carry out the write. (Permission / Auth checks should happen on the
prepare write request).
:param connection: BLEConnection with the connected GATT server
:type connection: BLEConnection
:param flags: Specifies which execute write operation should be performed (0x00 - Cancel all prepared writes, 0x01 - Immediately write all pending prepared values.
:type flags: int
:param timeout: Request timeout (milliseconds)
:type timeout: int
:return: GATTRequest that contains the GATTResponse or GATTError result
:rtype: blesuite.pybt.core.GATTRequest
"""
return gatt_procedure_execute_write(self.stack_connection, connection.connection_handle, flags, timeout=timeout)
def gatt_execute_write_async(self, connection, flags, timeout=15 * 1000):
"""
Send an ATT Execute Write request to the peer device associated with the supplied BLEConnection and flag.
This is an asynchronous call that will send the request to the peer device and
return a GATTRequest object that can be monitored for a GATTResponse or GATTError (either through a valid
peer response, peer error response, or timeout error triggering).
Note: Execute write is used in conjunction with prepare write
to write a large set of data. The user will send a series of prepare
write requests with data and the correct offsets to set a large value for a write operation. An execute
write request will then be issued to carry out the write. (Permission / Auth checks should happen on the
prepare write request).
:param connection: BLEConnection with the connected GATT server
:type connection: BLEConnection
:param flags: Specifies which execute write operation should be performed (0x00 - Cancel all prepared writes, 0x01 - Immediately write all pending prepared values.
:type flags: int
:param timeout: Request timeout (milliseconds)
:type timeout: int
:return: GATTRequest that contains the GATTResponse or GATTError result
:rtype: blesuite.pybt.core.GATTRequest
"""
return gatt_procedure_execute_write_async(self.stack_connection, connection.connection_handle, flags,
timeout=timeout)
def gatt_read_handle(self, connection, handle, timeout=15 * 1000):
"""
Send an ATT Read request to the peer device associated with the supplied BLEConnection and attribute
handle. This is a synchronous call that will wait for either a successful response, error response,
or the specified timeout (milliseconds) to be reached.
:param connection: BLEConnection with the connected GATT server
:type connection: BLEConnection
:param handle: Attribute handle of target attribute (0x01 - 0xFFFF)
:type handle: int
:param timeout: Request timeout (milliseconds)
:type timeout: int
:return: GATTRequest that contains the GATTResponse or GATTError result
:rtype: blesuite.pybt.core.GATTRequest
"""
return gatt_procedure_read_handle(self.stack_connection, connection.connection_handle, handle, timeout=timeout)
def gatt_read_handle_async(self, connection, handle, timeout=15 * 1000):
"""
Send an ATT Read request to the peer device associated with the supplied BLEConnection and attribute
handle. This is an asynchronous call that will send the request to the peer device and
return a GATTRequest object that can be monitored for a GATTResponse or GATTError (either through a valid
peer response, peer error response, or timeout error triggering).
:param connection: BLEConnection with the connected GATT server
:type connection: BLEConnection
:param handle: Attribute handle of target attribute (0x01 - 0xFFFF)
:type handle: int
:param timeout: Request timeout (milliseconds)
:type timeout: int
:return: GATTRequest that contains the GATTResponse or GATTError result
:rtype: blesuite.pybt.core.GATTRequest
"""
return gatt_procedure_read_handle_async(self.stack_connection, connection.connection_handle, handle,
timeout=timeout)
def gatt_read_multiple_handles(self, connection, handles, timeout=15 * 1000):
"""
Send an ATT Read Multiple request to the peer device associated with the supplied BLEConnection and
a set of attribute handles.
This is a synchronous call that will wait for either a successful response, error response,
or the specified timeout (milliseconds) to be reached.
:param connection: BLEConnection with the connected GATT server
:type connection: BLEConnection
:param handles: A list of attribute handles for target attributes (0x01 - 0xFFFF)
:type handles: list of int
:param timeout: Request timeout (milliseconds)
:type timeout: int
:return: GATTRequest that contains the GATTResponse or GATTError result
:rtype: blesuite.pybt.core.GATTRequest
"""
return gatt_procedure_read_multiple_handles(self.stack_connection, connection.connection_handle,
handles, timeout=timeout)
def gatt_read_multiple_handles_async(self, connection, handles, timeout=15 * 1000):
"""
Send an ATT Read Multiple request to the peer device associated with the supplied BLEConnection and
a set of attribute handles.
This is an asynchronous call that will send the request to the peer device and
return a GATTRequest object that can be monitorged for a GATTResponse or GATTError (either through a valid
return a GATTRequest object that can be monitored for a GATTResponse or GATTError (either through a valid
peer response, peer error response, or timeout error triggering).
:param connection: BLEConnection with the connected GATT server
:type connection: BLEConnection
:param handles: A list of attribute handles for target attributes (0x01 - 0xFFFF)
:type handles: list of int
:param timeout: Request timeout (milliseconds)
:type timeout: int
:return: GATTRequest that contains the GATTResponse or GATTError result
:rtype: blesuite.pybt.core.GATTRequest
"""
return gatt_procedure_read_multiple_handles_async(self.stack_connection, connection.connection_handle, handles,
timeout=timeout)
def gatt_read_blob_handle(self, connection, handle, offset, timeout=15 * 1000):
"""
Send an ATT Blob Read request to the peer device associated with the supplied BLEConnection, attribute
handle, and an offset. This is a synchronous call that will wait for either a successful response,
error response,
or the specified timeout (milliseconds) to be reached.
:param connection: BLEConnection with the connected GATT server
:type connection: BLEConnection
:param handle: Attribute handle of target attribute (0x01 - 0xFFFF)
:type handle: int
:param offset: Offset to begin reading attribute value
:type offset: int
:param timeout: Request timeout (milliseconds)
:type timeout: int
:return: GATTRequest that contains the GATTResponse or GATTError result
:rtype: blesuite.pybt.core.GATTRequest
"""
return gatt_procedure_read_blob_handle(self.stack_connection, connection.connection_handle, handle, offset,
timeout=timeout)
def gatt_read_blob_handle_async(self, connection, handle, offset, timeout=15 * 1000):
"""
Send an ATT Blob Read request to the peer device associated with the supplied BLEConnection, attribute
handle, and an offset. This is an asynchronous call that will send the request to the peer device and
return a GATTRequest object that can be monitored for a GATTResponse or GATTError (either through a valid
peer response, peer error response, or timeout error triggering).
:param connection: BLEConnection with the connected GATT server
:type connection: BLEConnection
:param handle: Attribute handle of target attribute (0x01 - 0xFFFF)
:type handle: int
:param offset: Offset to begin reading attribute value
:type offset: int
:param timeout: Request timeout (milliseconds)
:type timeout: int
:return: GATTRequest that contains the GATTResponse or GATTError result
:rtype: blesuite.pybt.core.GATTRequest
"""
return gatt_procedure_read_blob_handle_async(self.stack_connection, connection.connection_handle, handle,
offset, timeout=timeout)
def gatt_read_uuid(self, connection, uuid, timeout=15 * 1000):
"""
Send an ATT Read request to the peer device associated with the supplied BLEConnection and GATT UUID.
This is a synchronous call that will wait for either a successful response, error response,
or the specified timeout (milliseconds) to be reached.
:param connection: BLEConnection with the connected GATT server
:type connection: BLEConnection
:param uuid: UUID of target GATT entity (16-bit and 128-bit UUIDs are accepted)
:type uuid: str
:param timeout: Request timeout (milliseconds)
:type timeout: int
:return: GATTRequest that contains the GATTResponse or GATTError result
:rtype: blesuite.pybt.core.GATTRequest
"""
return gatt_procedure_read_uuid(self.stack_connection, connection.connection_handle, UUID(uuid),
timeout=timeout)
def gatt_read_uuid_async(self, connection, uuid, timeout=15 * 1000):
"""
Send an ATT Read request to the peer device associated with the supplied BLEConnection and GATT UUID.
This is an asynchronous call that will send the request to the peer device and
return a GATTRequest object that can be monitored for a GATTResponse or GATTError (either through a valid
peer response, peer error response, or timeout error triggering).
:param connection: BLEConnection with the connected GATT server
:type connection: BLEConnection
:param uuid: UUID of target GATT entity (16-bit and 128-bit UUIDs are accepted)
:type uuid: str
:param timeout: Request timeout (milliseconds)
:type timeout: int
:return: GATTRequest that contains the GATTResponse or GATTError result
:rtype: blesuite.pybt.core.GATTRequest
"""
return gatt_procedure_read_uuid_async(self.stack_connection, connection.connection_handle, UUID(uuid),
timeout=timeout)
def att_send_raw(self, connection, body):
"""
Sends a raw ATT packet using the supplied BLEConnection object
and data supplied. The function does not apply a standard ATT header the supplied body, but L2CAP
and HCI encapsulation is handled.
Note: Valid ATT packets can be constructed using
packets defined in scapy.layers.bluetooth
or using random data for fuzzing.
:param connection: BLEConnection to target device
:param body: ATT request body
:rtype: GATTRequest
"""
request = self.stack_connection.send_raw_att(body, connection.connection_handle)
return request
def l2cap_send_raw(self, connection, body):
"""
Sends a raw L2CAP packet using the supplied BLEConnection object
and data supplied. The function does not apply a standard L2CAP header to the user supplied value,
but HCI encapsulation is applied.
Note: Valid L2CAP packets can be constructed using packets defined in scapy.layers.bluetooth
or using random data for fuzzing.
:param connection: BLEConnection to target device
:param body: L2CAP request body
:rtype: GATTRequest
"""
request = self.stack_connection.send_raw_l2cap(body, connection.connection_handle)
return request
|
indy_node/test/upgrade/test_forced_upgrade_if_request_received_after_propagate.py | Rob-S/indy-node | 627 | 12759228 | from indy_node.server.upgrade_log import UpgradeLog
from indy_node.test import waits
from indy_node.test.upgrade.helper import checkUpgradeScheduled, sdk_ensure_upgrade_sent
from plenum.common.constants import VERSION
from plenum.common.messages.node_messages import Propagate
from plenum.common.request import Request
from plenum.test.delayers import req_delay, ppgDelay
from plenum.test.test_node import getNonPrimaryReplicas
def test_forced_upgrade_handled_once_if_request_received_after_propagate(
looper, nodeSet, sdk_pool_handle, sdk_wallet_trustee,
validUpgradeExpForceTrue):
"""
Verifies that POOL_UPGRADE force=true request is handled one time in case
the node commits the transaction to the ledger but during the 3PC-process
receives the request directly from the client after a PROPAGATE from some
other node
"""
slow_node = getNonPrimaryReplicas(nodeSet, instId=0)[-1].node
slow_node.clientIbStasher.delay(req_delay())
slow_node.nodeIbStasher.delay(ppgDelay(sender_filter='Beta'))
slow_node.nodeIbStasher.delay(ppgDelay(sender_filter='Gamma'))
original_process_propagate = slow_node.nodeMsgRouter.routes[Propagate]
original_process_request = slow_node.clientMsgRouter.routes[Request]
def patched_process_propagate(msg: Propagate, frm: str):
original_process_propagate(msg, frm)
slow_node.clientIbStasher.reset_delays_and_process_delayeds()
slow_node.nodeMsgRouter.routes[Propagate] = original_process_propagate
def patched_process_request(request: Request, frm: str):
original_process_request(request, frm)
slow_node.nodeIbStasher.reset_delays_and_process_delayeds()
slow_node.clientMsgRouter.routes[Request] = original_process_request
slow_node.nodeMsgRouter.routes[Propagate] = patched_process_propagate
slow_node.clientMsgRouter.routes[Request] = patched_process_request
init_len = len(list(slow_node.upgrader._actionLog))
sdk_ensure_upgrade_sent(looper, sdk_pool_handle, sdk_wallet_trustee,
validUpgradeExpForceTrue)
looper.runFor(waits.expectedUpgradeScheduled())
checkUpgradeScheduled([slow_node], validUpgradeExpForceTrue[VERSION])
if init_len ==0:
# first upgrade - should be only one scheduled
assert len(list(slow_node.upgrader._actionLog)) == 1
else:
# one upgrade were already scheduled. we should cancel it and schedule new one
# so action log should be increased by 2
assert len(list(slow_node.upgrader._actionLog)) == init_len + 2
assert slow_node.upgrader._actionLog.last_event.ev_type == UpgradeLog.Events.scheduled
|
src/python/nimbusml/examples/WordTokenizer.py | montehoover/NimbusML | 134 | 12759240 | ###############################################################################
# WordTokenizer
from nimbusml import Pipeline, FileDataStream
from nimbusml.datasets import get_dataset
from nimbusml.preprocessing.text import WordTokenizer
# data input (as a FileDataStream)
path = get_dataset("wiki_detox_train").as_filepath()
data = FileDataStream.read_csv(path, sep='\t')
print(data.head())
# Sentiment SentimentText
# 0 1 ==RUDE== Dude, you are rude upload that carl p...
# 1 1 == OK! == IM GOING TO VANDALIZE WILD ONES WIK...
# 2 1 Stop trolling, zapatancas, calling me a liar m...
# 3 1 ==You're cool== You seem like a really cool g...
# 4 1 ::::: Why are you threatening me? I'm not bein...
tokenize = WordTokenizer(char_array_term_separators=[" "]) << {'wt': 'SentimentText'}
pipeline = Pipeline([tokenize])
tokenize.fit(data)
y = tokenize.transform(data)
print(y.drop(labels='SentimentText', axis=1).head())
# Sentiment wt.000 wt.001 wt.002 wt.003 wt.004 wt.005 ... wt.366 wt.367 wt.368 wt.369 wt.370 wt.371 wt.372
# 0 1 ==RUDE== Dude, you are rude upload ... None None None None None None None
# 1 1 == OK! == IM GOING TO ... None None None None None None None
# 2 1 Stop trolling, zapatancas, calling me a ... None None None None None None None
# 3 1 ==You're cool== You seem like a ... None None None None None None None
# 4 1 ::::: Why are you threatening me? ... None None None None None None None
|
redbot/webui/captcha.py | gusdleon/redbot | 167 | 12759294 | import hmac
from http import cookies
import json
from typing import Callable, TYPE_CHECKING
from urllib.parse import urlencode
import thor
from thor.http import HttpClient, get_header
from thor.http.error import HttpError
from redbot.resource import HttpResource
from redbot.type import RawHeaderListType
token_client = HttpClient()
token_client.idle_timeout = 30
token_client.connect_timeout = 10
token_client.read_timeout = 10
token_client.max_server_conn = 30
if TYPE_CHECKING:
from redbot.webui import RedWebUi # pylint: disable=cyclic-import,unused-import
class CaptchaHandler:
def __init__(
self,
webui: "RedWebUi",
client_id: str,
continue_test: Callable,
error_response: Callable,
) -> None:
self.webui = webui
self.client_id = client_id
self.continue_test = continue_test
self.error_response = error_response
self.secret = webui.config.get("hcaptcha_secret", "").encode("utf-8")
self.token_lifetime = webui.config.getint("token_lifetime", fallback=300)
def run(self) -> None:
captcha_token = self.webui.body_args.get("captcha_token", [None])[0]
cookie_str = b", ".join(get_header(self.webui.req_headers, b"cookie"))
try:
cookiejar = cookies.SimpleCookie(
cookie_str.decode("utf-8", "replace")
) # type: cookies.SimpleCookie
except cookies.CookieError:
self.error_response(
b"400",
b"Bad Request",
"Sorry, your cookies appear corrupted. Please try again.",
f"Cookie Parse Error: {cookie_str.decode('utf-8', 'replace')}",
)
return
human_time = cookiejar.get("human_time", None)
human_hmac = cookiejar.get("human_hmac", None)
if human_time and human_time.value.isdigit() and human_hmac:
if self.verify_human(int(human_time.value), human_hmac.value):
self.continue_test()
else:
self.error_response(
b"403",
b"Forbidden",
"I need to double-check that you're human; please resubmit.",
"Invalid human token",
)
elif captcha_token:
self.verify_captcha(captcha_token)
else:
self.error_response(
b"403",
b"Forbidden",
"I need to double-check that you're human; please resubmit.",
"Invalid captcha.",
)
def verify_captcha(self, presented_token: str) -> None:
exchange = token_client.exchange()
@thor.events.on(exchange)
def error(err_msg: HttpError) -> None:
self.error_response(
b"403",
b"Forbidden",
"There was a problem with the Captcha server; please try again soon.",
f"Captcha error: {err_msg}.",
)
@thor.events.on(exchange)
def response_start(
status: bytes, phrase: bytes, headers: RawHeaderListType
) -> None:
exchange.tmp_status = status
exchange.tmp_res_body = b""
@thor.events.on(exchange)
def response_body(chunk: bytes) -> None:
exchange.tmp_res_body += chunk
@thor.events.on(exchange)
def response_done(trailers: RawHeaderListType) -> None:
try:
results = json.loads(exchange.tmp_res_body)
except ValueError:
if exchange.tmp_status != b"200":
e_str = f"Captcha server returned {exchange.tmp_status.decode('utf-8')} status code"
else:
e_str = f"Captcha server response error"
self.error_response(
b"500",
b"Internal Server Error",
e_str,
e_str,
)
return
if results["success"]:
self.continue_test(self.issue_human())
else:
e_str = f"Captcha errors: {', '.join(results.get('error-codes', ['unknown error']))}"
self.error_response(
b"403",
b"Forbidden",
e_str,
e_str,
)
request_form = {
"secret": self.secret,
"response": presented_token,
"remoteip": self.client_id,
}
exchange.request_start(
b"POST",
b"https://hcaptcha.com/siteverify",
[[b"content-type", b"application/x-www-form-urlencoded"]],
)
exchange.request_body(urlencode(request_form).encode("utf-8", "replace"))
exchange.request_done({})
def issue_human(self) -> RawHeaderListType:
"""
Return cookie headers for later verification that this is a human.
"""
human_time = str(int(thor.time()) + self.token_lifetime)
human_hmac = hmac.new(
self.secret, bytes(human_time, "ascii"), "sha512"
).hexdigest()
return [
(
b"Set-Cookie",
f"human_time={human_time}; Max-Age={self.token_lifetime}; SameSite=Strict".encode(
"ascii"
),
),
(
b"Set-Cookie",
f"human_hmac={human_hmac}; Max-Age={self.token_lifetime}; SameSite=Strict".encode(
"ascii"
),
),
]
def verify_human(self, human_time: int, human_hmac: str) -> bool:
"""
Check the user's human HMAC.
"""
computed_hmac = hmac.new(self.secret, bytes(str(human_time), "ascii"), "sha512")
is_valid = human_hmac == computed_hmac.hexdigest()
if is_valid and human_time >= thor.time():
return True
else:
return False
|
service-workers/service-worker/resources/update-fetch-worker.py | meyerweb/wpt | 14,668 | 12759308 | <reponame>meyerweb/wpt
import random
import time
def main(request, response):
# no-cache itself to ensure the user agent finds a new version for each update.
headers = [(b'Cache-Control', b'no-cache, must-revalidate'),
(b'Pragma', b'no-cache')]
content_type = b''
extra_body = u''
content_type = b'application/javascript'
headers.append((b'Content-Type', content_type))
extra_body = u"self.onfetch = (event) => { event.respondWith(fetch(event.request)); };"
# Return a different script for each access.
return headers, u'/* %s %s */ %s' % (time.time(), random.random(), extra_body)
|
notebooks/thunderdome/eth2spec/test/phase0/epoch_processing/test_process_historical_roots_update.py | casparschwa/beaconrunner | 2,161 | 12759310 | from eth2spec.test.context import spec_state_test, with_all_phases
from eth2spec.test.helpers.epoch_processing import (
run_epoch_processing_with
)
def run_process_historical_roots_update(spec, state):
yield from run_epoch_processing_with(spec, state, 'process_historical_roots_update')
@with_all_phases
@spec_state_test
def test_historical_root_accumulator(spec, state):
# skip ahead to near the end of the historical roots period (excl block before epoch processing)
state.slot = spec.SLOTS_PER_HISTORICAL_ROOT - 1
history_len = len(state.historical_roots)
yield from run_process_historical_roots_update(spec, state)
assert len(state.historical_roots) == history_len + 1
|
src/python/nimbusml/internal/entrypoints/transforms_columnselector.py | michaelgsharp/NimbusML | 134 | 12759320 | <reponame>michaelgsharp/NimbusML<gh_stars>100-1000
# - Generated by tools/entrypoint_compiler.py: do not edit by hand
"""
Transforms.ColumnSelector
"""
from ..utils.entrypoints import EntryPoint
from ..utils.utils import try_set, unlist
def transforms_columnselector(
data,
output_data=None,
model=None,
keep_columns=None,
drop_columns=None,
keep_hidden=False,
ignore_missing=False,
**params):
"""
**Description**
Selects a set of columns, dropping all others
:param keep_columns: List of columns to keep. (inputs).
:param data: Input dataset (inputs).
:param drop_columns: List of columns to drop. (inputs).
:param keep_hidden: Specifies whether to keep or remove hidden
columns. (inputs).
:param ignore_missing: Specifies whether to ignore columns that
are missing from the input. (inputs).
:param output_data: Transformed dataset (outputs).
:param model: Transform model (outputs).
"""
entrypoint_name = 'Transforms.ColumnSelector'
inputs = {}
outputs = {}
if keep_columns is not None:
inputs['KeepColumns'] = try_set(
obj=keep_columns,
none_acceptable=True,
is_of_type=list,
is_column=True)
if data is not None:
inputs['Data'] = try_set(
obj=data,
none_acceptable=False,
is_of_type=str)
if drop_columns is not None:
inputs['DropColumns'] = try_set(
obj=drop_columns,
none_acceptable=True,
is_of_type=list,
is_column=True)
if keep_hidden is not None:
inputs['KeepHidden'] = try_set(
obj=keep_hidden,
none_acceptable=True,
is_of_type=bool)
if ignore_missing is not None:
inputs['IgnoreMissing'] = try_set(
obj=ignore_missing,
none_acceptable=True,
is_of_type=bool)
if output_data is not None:
outputs['OutputData'] = try_set(
obj=output_data,
none_acceptable=False,
is_of_type=str)
if model is not None:
outputs['Model'] = try_set(
obj=model,
none_acceptable=False,
is_of_type=str)
input_variables = {
x for x in unlist(inputs.values())
if isinstance(x, str) and x.startswith("$")}
output_variables = {
x for x in unlist(outputs.values())
if isinstance(x, str) and x.startswith("$")}
entrypoint = EntryPoint(
name=entrypoint_name, inputs=inputs, outputs=outputs,
input_variables=input_variables,
output_variables=output_variables)
return entrypoint
|
var/spack/repos/builtin/packages/orfm/package.py | kkauder/spack | 2,360 | 12759322 | # Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Orfm(AutotoolsPackage):
"""A simple and not slow open reading frame (ORF) caller. No bells or
whistles like frameshift detection, just a straightforward goal of
returning a FASTA file of open reading frames over a certain length
from a FASTA/Q file of nucleotide sequences."""
homepage = "https://github.com/wwood/OrfM"
url = "https://github.com/wwood/OrfM/releases/download/v0.7.1/orfm-0.7.1.tar.gz"
version('0.7.1', sha256='19f39c72bcc48127b757613c5eef4abae95ee6c82dccf96b041db527b27f319a')
depends_on('zlib', type='link')
|
pypeln/task/api/concat_task_test.py | quarckster/pypeln | 1,281 | 12759335 | <gh_stars>1000+
import sys
import time
import typing as tp
from unittest import TestCase
import hypothesis as hp
from hypothesis import strategies as st
import pypeln as pl
MAX_EXAMPLES = 10
T = tp.TypeVar("T")
@hp.given(nums=st.lists(st.integers()))
@hp.settings(max_examples=MAX_EXAMPLES)
def test_concat_basic(nums: tp.List[int]):
nums_py = list(map(lambda x: x + 1, nums))
nums_py1 = list(map(lambda x: x ** 2, nums_py))
nums_py2 = list(map(lambda x: -x, nums_py))
nums_py = nums_py1 + nums_py2
nums_pl = pl.task.map(lambda x: x + 1, nums)
nums_pl1 = pl.task.map(lambda x: x ** 2, nums_pl)
nums_pl2 = pl.task.map(lambda x: -x, nums_pl)
nums_pl = pl.task.concat([nums_pl1, nums_pl2])
assert sorted(nums_pl) == sorted(nums_py)
@hp.given(nums=st.lists(st.integers()))
@hp.settings(max_examples=MAX_EXAMPLES)
@pl.task.utils.run_test_async
async def test_concat_basic_2(nums: tp.List[int]):
nums_py = list(map(lambda x: x + 1, nums))
nums_py1 = list(map(lambda x: x ** 2, nums_py))
nums_py2 = list(map(lambda x: -x, nums_py))
nums_py = nums_py1 + nums_py2
nums_pl = pl.task.map(lambda x: x + 1, nums)
nums_pl1 = pl.task.map(lambda x: x ** 2, nums_pl)
nums_pl2 = pl.task.map(lambda x: -x, nums_pl)
nums_pl = await pl.task.concat([nums_pl1, nums_pl2])
assert sorted(nums_pl) == sorted(nums_py)
# @hp.given(nums=st.lists(st.integers()))
# @hp.settings(max_examples=MAX_EXAMPLES)
def test_concat_multiple(nums: tp.List[int] = [1, 2, 3]):
nums_py = [x + 1 for x in nums]
nums_py1 = nums_py + nums_py
nums_py2 = nums_py1 + nums_py
nums_pl = pl.task.map(lambda x: x + 1, nums)
nums_pl1 = pl.task.concat([nums_pl, nums_pl])
nums_pl2 = pl.task.concat([nums_pl1, nums_pl])
# assert sorted(nums_py1) == sorted(list(nums_pl1))
assert sorted(nums_py2) == sorted(list(nums_pl2))
@pl.task.utils.run_test_async
async def test_concat_multiple_2(nums: tp.List[int] = [1, 2, 3]):
nums_py = [x + 1 for x in nums]
nums_py1 = nums_py + nums_py
nums_py2 = nums_py1 + nums_py
nums_pl = pl.task.map(lambda x: x + 1, nums)
nums_pl1 = pl.task.concat([nums_pl, nums_pl])
nums_pl2 = await pl.task.concat([nums_pl1, nums_pl])
# assert sorted(nums_py1) == sorted(list(nums_pl1))
assert sorted(nums_py2) == sorted(list(nums_pl2))
|
tests/import/gen_context.py | learnforpractice/micropython-cpp | 692 | 12759349 | <gh_stars>100-1000
import gen_context2
GLOBAL = "GLOBAL"
def gen():
print(GLOBAL)
yield 1
gen_context2.call(gen())
|
preprocess/snips_preprocess.py | OlegJakushkin/s3prl | 856 | 12759406 | from random import shuffle
import os
from glob import glob
import shutil
import re
import tqdm
from multiprocessing import Pool
from normalise import normalise
months = {'jan.': 'January', 'feb.': 'February', 'mar.': 'March', 'apr.': 'April', 'may': 'May', 'jun.': 'June', 'jul.': 'July', 'aug.': 'August', 'sep.': 'September', 'oct.': 'October', 'nov.': 'November', 'dec.': 'December', 'jan': 'January', 'feb': 'February', 'mar': 'March', 'apr': 'April', 'jun': 'June', 'jul': 'July', 'aug': 'August', 'sep': 'September', 'oct': 'October', 'nov': 'November', 'dec': 'December'}
replace_words = {'&': 'and', '¡':'', 'r&b':'R and B', 'funtime':'fun time', 'español':'espanol', "'s":'s', 'palylist':'playlist'}
replace_vocab = {'ú':'u', 'ñ':'n', 'Ō':'O', 'â':'a'}
reservations = {'chyi':'chyi', 'Pre-Party':'pre party', 'Chu':'Chu', 'B&B':'B and B', '0944':'nine four four', 'Box':'Box', 'ain’t':'am not', 'Zon':'Zon', 'Yui':'Yui', 'neto':'neto', 'skepta':'skepta', '¡Fiesta':'Fiesta', 'Vue':'Vue', 'iheart':'iheart', 'disco':'disco'}
same = "klose la mejor música para tus fiestas dubstep dangles drejer listas".split(' ')
for word in same:
reservations[word] = word
def word_normalise(words):
ret = []
for word in words:
if word.lower() in months:
word = months[word.lower()]
if word.lower() in replace_words:
word = replace_words[word.lower()]
for regex in replace_vocab:
word = re.sub(regex, '', word)
#word = re.sub(r'(\S)([\.\,\!\?])', r'\1 \2', word)
word = re.sub(r'[\.\,\!\?;\/]', '', word)
ret.append(word)
return ret
def sent_normalise(text, slots_split=None):
norm_slots, norm_texts = [], []
text_split = text.split(' ')
if slots_split is None:
slots_split = ['O']*len(text_split)
for idx in range(len(text_split)):
if text_split[idx] in '.,!?;/]':
continue
if text_split[idx] in reservations:
for word in reservations[text_split[idx]].split(' '):
norm_texts.append(word)
norm_slots.append(slots_split[idx])
continue
norm_text = normalise(word_normalise([text_split[idx]]), variety="AmE", verbose=False)
for phrase in norm_text:
if phrase == '':
continue
for word in re.split(r' |\-', phrase):
word = re.sub(r'[\.\,\!\?;\/]', '', word)
if word == '':
continue
norm_texts.append(word)
norm_slots.append(slots_split[idx])
return norm_slots, norm_texts
def process_raw_snips_file(file, out_f):
with open(file) as f:
content = f.readlines()
content = [x.strip() for x in content]
with open(out_f, 'w') as f:
for cnt, line in enumerate(content):
text = line.split(' <=> ')[0]
intent = line.split(' <=> ')[1]
#[r.split(':')[0] if len(r.split(':')) == 2 else ' ' for r in x.split()]
text_split = [x.replace('::', ':').split(':')[0] if len(x.replace('::', ':').split(':')) == 2 else ' ' for x in text.split()]
text_entities = ' '.join(text_split)
slots_split = [x.replace('::', ':').split(':')[1] for x in text.split()]
slots_entities = ' '.join(slots_split)
assert len(text_split) == len(slots_split), (text_split, slots_split)
f.write('%d | BOS %s EOS | O %s | %s\n' % (cnt, text_entities, slots_entities, intent))
def remove_IBO_from_snipt_vocab_slot(in_f, out_f):
with open(in_f) as f:
content = f.readlines()
content = [x.strip() for x in content]
# get rid of BIO tag from the slots
for idx, line in enumerate(content):
if line != 'O':
content[idx] = line[len('B-'):]
content = set(content) # remove repeating slots
with open(out_f, 'w') as f:
for line in content:
f.write('%s\n' % line)
def process_daniel_snips_file(content):
content = [x.strip() for x in content]
utt_ids = [x.split('\t', 1)[0] for x in content]
valid_uttids = [x for x in utt_ids if x.split('-')[1] == 'valid']
test_uttids = [x for x in utt_ids if x.split('-')[1] == 'test']
train_uttids = [x for x in utt_ids if x.split('-')[1] == 'train']
utt2text, utt2slots, utt2intent = {}, {}, {}
assert len(utt_ids) == len(set(utt_ids))
# create utt2text, utt2slots, utt2intent
for line in content:
uttid, text, slots, intent = line.split('\t')
if len(text.split()) != len(slots.split()): # detect 'empty' in text
assert len(text.split(' ')) == 2
empty_idx = text.split().index(text.split(' ')[0].split()[-1]) + 1
slots_list = slots.split()
del slots_list[empty_idx]
cleaned_slots = ' '.join(slots_list)
assert len(text.split()) == len(slots_list)
cleaned_text = ' '.join(text.split())
#print(cleaned_text, cleaned_slots)
else:
(cleaned_text, cleaned_slots) = (text, slots)
# get rid of the 'intent/' from all slot values
cleaned_slots = ' '.join([x.split('/')[1] if x != 'O' else x for x in cleaned_slots.split()])
# strip the whitespaces before punctuations
#cleaned_text = re.sub(r'\s([?.!,"](?:\s|$))', r'\1', cleaned_text)
utt2text[uttid] = cleaned_text
utt2slots[uttid] = cleaned_slots
utt2intent[uttid] = intent
test_utt2text, test_utt2slots, test_utt2intent = {}, {}, {}
valid_utt2text, valid_utt2slots, valid_utt2intent = {}, {}, {}
train_utt2text, train_utt2slots, train_utt2intent = {}, {}, {}
for utt in valid_uttids:
valid_utt2text[utt] = utt2text[utt]
valid_utt2slots[utt] = utt2slots[utt]
valid_utt2intent[utt] = utt2intent[utt]
for utt in test_uttids:
test_utt2text[utt] = utt2text[utt]
test_utt2slots[utt] = utt2slots[utt]
test_utt2intent[utt] = utt2intent[utt]
for utt in train_uttids:
train_utt2text[utt] = utt2text[utt]
train_utt2slots[utt] = utt2slots[utt]
train_utt2intent[utt] = utt2intent[utt]
assert len(set(valid_utt2intent.values())) == len(set(test_utt2intent.values())) == len(set(train_utt2intent.values())) == 7
assert len(valid_utt2intent.keys()) == len(test_utt2intent.keys()) == 700
assert len(train_utt2intent.keys()) == 13084
def __return_set_of_slots(utt2slots):
all_slots = []
for slot in utt2slots.values():
all_slots.extend(slot.split())
unique_slots = set(all_slots)
return unique_slots
assert len(__return_set_of_slots(valid_utt2slots)) == len(__return_set_of_slots(test_utt2slots)) == \
len(__return_set_of_slots(train_utt2slots)) == 40
return (train_utt2text, train_utt2slots, train_utt2intent), \
(valid_utt2text, valid_utt2slots, valid_utt2intent), \
(test_utt2text, test_utt2slots, test_utt2intent)
def map_and_link_snips_audio(snips_audio_dir, link_dir):
# traverse through snips_audio_dir
result = [y for x in os.walk(snips_audio_dir) for y in glob(os.path.join(x[0], '*.mp3'))]
for path in result:
person = path.split('/')[8].split('_')[1]
filename = path.split('/')[-1]
if filename[:5] != 'snips':
continue
uttid = filename.split('.')[0]
new_uttid = person + '-' + filename
partition = uttid.split('-')[1]
destination = os.path.join(link_dir, partition, new_uttid)
shutil.copyfile(path, destination)
def create_multispk_for_snips(output_dir):
speakers = "<NAME> <NAME>".split(' ')
dataset_info = [{'split':'test', 'num_utts':700}, {'split':'valid', 'num_utts':700}, {'split':'train', 'num_utts':13084}]
test_out_f = open(os.path.join(output_dir, 'all.iob.snips.txt'), 'w')
for data in dataset_info:
num_utts = data['num_utts']
split = data['split']
with open(os.path.join(output_dir, 'single-matched-snips.%s.w-intent'%split)) as f:
content = f.readlines()
utt2line = {x.strip().split()[0]:x.strip() for x in content}
for spk in speakers:
for num in range(num_utts):
uttid = "%s-snips-%s-%d"%(spk, split, num) #mp3.split('/')[-1].split('.')[0]
line = utt2line["snips-%s-%d"%(split, num)] #'-'.join(uttid.split('-')[1:])]
text = line.split('\t')[1].upper()
slots = line.split('\t')[2]
intent = line.split('\t')[3]
test_out_f.write('%s BOS %s EOS\tO %s %s\n' % (uttid, text, slots, intent))
test_out_f.close()
def apply_text_norm_and_modify_slots(all_tsv, output_dir):
train_dirs, valid_dirs, test_dirs = process_daniel_snips_file(all_tsv)
# test
test_file = open(os.path.join(output_dir, 'single-matched-snips.test.w-intent'), 'w')
vocab_slot = {}
for uttid in tqdm.tqdm(test_dirs[0].keys(), desc='Text Normalising on testing set'):
text = test_dirs[0][uttid]
slots = test_dirs[1][uttid]
intent = test_dirs[2][uttid]
slots_split = slots.split()
for s in slots_split:
vocab_slot.setdefault(s, 0)
vocab_slot[s] += 1
norm_slots, norm_texts = sent_normalise(text, slots_split)
assert len(norm_texts) == len(norm_slots), (norm_texts, norm_slots)
# write to file
test_file.write('%s\t%s\t%s\t%s\n' % (uttid, ' '.join(norm_texts).upper(), ' '.join(norm_slots), intent))
test_file.close()
# valid
valid_file = open(os.path.join(output_dir, 'single-matched-snips.valid.w-intent'), 'w')
for uttid in tqdm.tqdm(valid_dirs[0].keys(), desc='Text Normalising on validation set'):
text = valid_dirs[0][uttid]
slots = valid_dirs[1][uttid]
intent = valid_dirs[2][uttid]
slots_split = slots.split()
for s in slots_split:
vocab_slot.setdefault(s, 0)
vocab_slot[s] += 1
norm_slots, norm_texts = sent_normalise(text, slots_split)
assert len(norm_texts) == len(norm_slots), (norm_texts, norm_slots)
# write to file
valid_file.write('%s\t%s\t%s\t%s\n' % (uttid, ' '.join(norm_texts).upper(), ' '.join(norm_slots), intent))
valid_file.close()
# train
train_file = open(os.path.join(output_dir, 'single-matched-snips.train.w-intent'), 'w')
for uttid in tqdm.tqdm(train_dirs[0].keys(), desc='Text Normalising on training set'):
text = train_dirs[0][uttid]
slots = train_dirs[1][uttid]
intent = train_dirs[2][uttid]
slots_split = slots.split()
for s in slots_split:
vocab_slot.setdefault(s, 0)
vocab_slot[s] += 1
norm_slots, norm_texts = sent_normalise(text, slots_split)
assert len(norm_texts) == len(norm_slots), (norm_texts, norm_slots)
# write to file
train_file.write('%s\t%s\t%s\t%s\n' % (uttid, ' '.join(norm_texts).upper(), ' '.join(norm_slots), intent))
train_file.close()
vocab_file = open(os.path.join(output_dir, 'slots.txt'), 'w')
vocab_file.write('\n'.join(sorted(list(vocab_slot.keys()), key=lambda x:vocab_slot[x], reverse=True)))
def sox_func(inputs):
files, root, out_root, speaker = inputs
for name in tqdm.tqdm(files, desc='Process for speaker: '+speaker):
if name.endswith(".mp3"):
split = name.split('-')[1]
out_dir = os.path.join(out_root, split)
os.makedirs(out_dir, exist_ok=True)
orig_file = os.path.join(root, name)
new_file = os.path.join(out_dir, speaker+'-'+name.split('/')[-1].split('.')[0] + '.wav')
bashCommand = "sox " + orig_file + " -t wav -c 1 -r 16000 -b 16 -e signed-integer " + new_file
r = os.popen(bashCommand).read()
def sox_mp3_to_wav(in_root, out_root):
os.makedirs(out_root, exist_ok=True)
pool = Pool(16)
inputs = []
for root, dirs, files in os.walk(in_root):
print('[Processing] enter directory %s'%root)
if not len(files):
continue
speaker = root.split('/')[-2].split('_')[1]
print('[Processing] process %d audio files from speaker %s'%(len(files), speaker))
inputs.append((files, root, out_root, speaker))
pool.map(sox_func, inputs)
if __name__ == '__main__':
import sys, os
mode = sys.argv[1]
if mode == 'text':
repo_dir = sys.argv[2]
dump_dir = sys.argv[3]
os.makedirs(dump_dir, exist_ok=True)
content = []
content += open(os.path.join(repo_dir, 'data/nlu_annotation/valid')).readlines()[1:]
content += open(os.path.join(repo_dir, 'data/nlu_annotation/test')).readlines()[1:]
content += open(os.path.join(repo_dir, 'data/nlu_annotation/train')).readlines()[1:]
apply_text_norm_and_modify_slots(content, dump_dir)
create_multispk_for_snips(dump_dir)
elif mode == 'audio':
audio_dir = sys.argv[2]
dump_dir = sys.argv[3]
# Step: sox the snips *.mp3 to the correct format
sox_mp3_to_wav(audio_dir, dump_dir)
else:
print('Usage: python preprocess.py [text|audio] [data_path] [dump_path]')
|
intake/cli/client/subcommands/drivers.py | mattkram/intake | 149 | 12759416 | <reponame>mattkram/intake
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2018, Anaconda, Inc. and Intake contributors
# All rights reserved.
#
# The full license is in the LICENSE file, distributed with this software.
#-----------------------------------------------------------------------------
"""
CLI for listing, enabling, disabling intake drivers
"""
from intake import __version__
from intake.cli.util import Subcommand
from intake.source.discovery import drivers
import logging
log = logging.getLogger(__name__)
# -----------------------------------------------------------------------------
# API
# -----------------------------------------------------------------------------
class Drivers(Subcommand):
"""
List, enable, and disable intake drivers.
"""
name = "drivers"
def initialize(self):
sub_parser = self.parser.add_subparsers()
list = sub_parser.add_parser(
'list',
help='Show all intake drivers, whether enabled, disabled, '
'or directly inserted into the registry'
)
list.add_argument(
'-v', '--verbose', action='store_true', help='Show module path.')
list.set_defaults(invoke=self._list)
enable = sub_parser.add_parser('enable', help='Enable an intake driver.')
enable.add_argument('name', type=str, help='Driver name')
enable.add_argument('driver', type=str, default=None, nargs='?',
help='Module path and class name, as in '
'package.submodule.ClassName')
enable.set_defaults(invoke=self._enable)
disable = sub_parser.add_parser(
'disable', help='Disable one or more intake drivers.')
disable.add_argument('names', type=str, help='Driver names', nargs='+')
disable.set_defaults(invoke=self._disable)
def invoke(self, args):
self.parser.print_help()
def _list(self, args):
if drivers.do_scan:
print("Package scan:")
for k, v in drivers.scanned.items():
print(f'{k:<30}{v.__module__}.{v.__name__}')
print()
print("Entrypoints:")
eps = [ep for ep in drivers.from_entrypoints()
if ep.name not in drivers.disabled()]
if eps:
for v in eps:
print(f'{v.name:<30}{v.module_name}:{v.object_name}')
else:
print("<none>")
print()
print("From Config:")
eps = [ep for ep in drivers.from_conf()
if ep.name not in drivers.disabled()]
if eps:
for v in eps:
if v.name not in drivers.disabled():
print(f'{v.name:<30}{v.module_name}:{v.object_name}')
else:
print("<none>")
print()
print("Disabled: ", drivers.disabled() or "<none>")
def _enable(self, args):
drivers.enable(args.name, args.driver)
def _disable(self, args):
for name in args.names:
drivers.disable(name)
|
crestify/archivers/__init__.py | punto1/crestify | 214 | 12759438 | <reponame>punto1/crestify<gh_stars>100-1000
from archive_service import ArchiveService, ArchiveException
from archiveorg import ArchiveOrgService
from archivetoday import ArchiveTodayService
|
omaha_server/crash/tests/test_serializers.py | makar21/omaha-server | 142 | 12759459 | <reponame>makar21/omaha-server
# coding: utf8
"""
This software is licensed under the Apache 2 license, quoted below.
Copyright 2014 Crystalnix Limited
Licensed under the Apache License, Version 2.0 (the "License"); you may not
use this file except in compliance with the License. You may obtain a copy of
the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
License for the specific language governing permissions and limitations under
the License.
"""
from builtins import str
import os
from django.test import TestCase
from django.core.files.uploadedfile import SimpleUploadedFile
from omaha.tests.utils import temporary_media_root
from crash.models import Symbols, Crash
from crash.serializers import SymbolsSerializer, CrashSerializer
BASE_DIR = os.path.dirname(__file__)
TEST_DATA_DIR = os.path.join(BASE_DIR, 'testdata')
SYM_FILE = os.path.join(TEST_DATA_DIR, 'BreakpadTestApp.sym')
class SymbolsSerializerTest(TestCase):
def test_serializer(self):
data = dict(file=SimpleUploadedFile('./test.pdb', False),
debug_id='C1C0FA629EAA4B4D9DD2ADE270A231CC1',
debug_file='BreakpadTestApp.pdb')
symbols = Symbols.objects.create(**data)
self.assertDictEqual(SymbolsSerializer(symbols).data,
dict(id=symbols.id,
debug_id='C1C0FA629EAA4B4D9DD2ADE270A231CC1',
debug_file='BreakpadTestApp.pdb',
file=symbols.file.url,
file_size=symbols.file_size,
created=symbols.created.strftime('%Y-%m-%dT%H:%M:%S.%fZ'),
modified=symbols.modified.strftime('%Y-%m-%dT%H:%M:%S.%fZ'), ))
@temporary_media_root(MEDIA_URL='http://cache.pack.google.com/edgedl/chrome/install/782.112/')
def test_auto_fill_file_size(self):
with open(SYM_FILE, 'rb') as f:
data = dict(file=SimpleUploadedFile('./BreakpadTestApp.sym', f.read()))
symbols = SymbolsSerializer(data=data)
self.assertTrue(symbols.is_valid())
symbols_instance = symbols.save()
self.assertEqual(symbols_instance.debug_id, 'C1C0FA629EAA4B4D9DD2ADE270A231CC1')
self.assertEqual(symbols_instance.debug_file, 'BreakpadTestApp.pdb')
self.assertEqual(symbols_instance.file_size, 68149)
class CrashSerializerTest(TestCase):
maxDiff = None
@temporary_media_root(
CELERY_ALWAYS_EAGER=False,
CELERY_EAGER_PROPAGATES_EXCEPTIONS=False,
)
def test_serializer(self):
meta = dict(
lang='en',
version='1.0.0.1',
)
stacktrace_json = dict(
crashing_thread={},
)
app_id = '{D0AB2EBC-931B-4013-9FEB-C9C4C2225C8C}'
user_id = '{2882CF9B-D9C2-4edb-9AAF-8ED5FCF366F7}'
crash = Crash.objects.create(
appid=app_id,
userid=user_id,
upload_file_minidump=SimpleUploadedFile('./dump.dat', b''),
meta=meta,
stacktrace_json=stacktrace_json
)
self.assertDictEqual(CrashSerializer(crash).data,
dict(id=crash.id,
upload_file_minidump=crash.upload_file_minidump.url,
archive=None,
appid=str(crash.appid),
userid=str(crash.userid),
meta=meta,
signature=crash.signature,
stacktrace_json=crash.stacktrace_json,
created=crash.created.strftime('%Y-%m-%dT%H:%M:%S.%fZ'),
modified=crash.modified.strftime('%Y-%m-%dT%H:%M:%S.%fZ'),
os=None,
build_number=None,
channel=''))
|
Hackerearth/The_minionGame.py | Shaswat-2203/HacktoberfestForBeginners | 115 | 12759462 |
def minion_game(string):
length = len(string)
the_vowel = "AEIOU"
kevin = 0
stuart = 0
for i in range(length):
if string[i] in the_vowel:
kevin = kevin + length - i
else:
stuart = stuart + length - i
if kevin > stuart:
print ("Kevin %d" % kevin)
elif kevin < stuart:
print ("Stuart %d" % stuart)
else:
print ("Draw")
|
Chapter03/Arista/eapi_1.py | stavsta/Mastering-Python-Networking-Second-Edition | 107 | 12759476 | <filename>Chapter03/Arista/eapi_1.py
#!/usr/bin/python2
from __future__ import print_function
from jsonrpclib import Server
import ssl
ssl._create_default_https_context = ssl._create_unverified_context
switch = Server("https://admin:[email protected]/command-api")
response = switch.runCmds( 1, [ "show version" ] )
print('Serial Number: ' + response[0]['serialNumber'])
|
nn/clipping.py | awesome-archive/sentence-space | 211 | 12759489 | <gh_stars>100-1000
import theano.tensor as T
class MaxNorm(object):
def __init__(self, max_norm=5):
self.max_norm = max_norm
def __call__(self, grads):
norm = T.sqrt(sum([T.sum(g ** 2) for g in grads]))
return [self.clip_norm(g, self.max_norm, norm) for g in grads]
def clip_norm(self, g, c, n):
if c > 0:
g = T.switch(T.ge(n, c), g * c / n, g)
return g
class Clip(object):
def __init__(self, clip=5):
self.clip = clip
def __call__(self, grads):
return [T.clip(g, -self.clip, self.clip) for g in grads]
|
onegram/exceptions.py | pauloromeira/onegram | 150 | 12759491 | <filename>onegram/exceptions.py
class OnegramException(Exception):
pass
# TODO [romeira]: Login exceptions {06/03/18 23:07}
class AuthException(OnegramException):
pass
class AuthFailed(AuthException):
pass
class AuthUserError(AuthException):
pass
class NotSupportedError(OnegramException):
pass
class RequestFailed(OnegramException):
pass
class RateLimitedError(RequestFailed):
pass
# TODO [romeira]: Query/action exceptions {06/03/18 23:08}
# TODO [romeira]: Session expired exception {06/03/18 23:08}
# TODO [romeira]: Private user exception/warning {06/03/18 23:09}
# TODO [romeira]: Not found exception {06/03/18 23:12}
# TODO [romeira]: Already following/liked/commented? warnings {06/03/18 23:12}
# TODO [romeira]: Timeout exception {06/03/18 23:12}
|
unicode/servers/test_ucd.py | fluentpython/concurrency | 102 | 12759500 | import itertools
import ucd
ABC_LINES = '''
0040;COMMERCIAL AT;Po;0;ON;;;;;N;;;;;
0041;LATIN CAPITAL LETTER A;Lu;0;L;;;;;N;;;;0061;
0042;LATIN CAPITAL LETTER B;Lu;0;L;;;;;N;;;;0062;
0043;LATIN CAPITAL LETTER C;Lu;0;L;;;;;N;;;;0063;
'''.strip()
def test_parse_line():
line_A = '0041;LATIN CAPITAL LETTER A;Lu;0;L;;;;;N;;;;0061;'
code, name, old_name, words = ucd.parse_line(line_A)
assert code == 65
assert name == 'LATIN CAPITAL LETTER A'
assert old_name == ''
assert words == ['A', 'CAPITAL', 'LATIN', 'LETTER']
def test_parse_line_with_hyphen_and_field_10():
cases = [
('002D;HYPHEN-MINUS;Pd;0;ES;;;;;N;;;;;',
45, 'HYPHEN-MINUS', '', ['HYPHEN', 'MINUS']),
('005F;LOW LINE;Pc;0;ON;;;;;N;SPACING UNDERSCORE;;;;',
95, 'LOW LINE', 'SPACING UNDERSCORE',
['LINE', 'LOW', 'SPACING', 'UNDERSCORE']),
('0027;APOSTROPHE;Po;0;ON;;;;;N;APOSTROPHE-QUOTE;;;',
39, 'APOSTROPHE', 'APOSTROPHE-QUOTE', ['APOSTROPHE', 'QUOTE']),
]
for line, *fields_ok in cases:
fields = ucd.parse_line(line)
assert fields == tuple(fields_ok)
def test_parser_top_3():
records = list(itertools.islice(ucd.parser(), 3))
assert records == [
(32, 'SPACE', '', ['SPACE']),
(33, 'EXCLAMATION MARK', '', ['EXCLAMATION', 'MARK']),
(34, 'QUOTATION MARK', '', ['MARK', 'QUOTATION']),
]
def test_index():
line = '003E;GREATER-THAN SIGN;Sm;0;ON;;;;;Y;;;;;'
record = ucd.parse_line(line)
idx = ucd.index([record])
assert idx == {'GREATER': [62], 'SIGN': [62], 'THAN': [62]}
def test_index_abc():
records = [ucd.parse_line(line) for line in ABC_LINES.split('\n')]
idx = ucd.index(records)
assert idx == {
'A': [65],
'AT': [64],
'B': [66],
'C': [67],
'CAPITAL': [65, 66, 67],
'COMMERCIAL': [64],
'LATIN': [65, 66, 67],
'LETTER': [65, 66, 67],
}
|
clickhouse_driver/dbapi/cursor.py | 1024inc/clickhouse-driver | 823 | 12759504 | from collections import namedtuple
from itertools import islice
from ..errors import Error as DriverError
from .errors import InterfaceError, OperationalError, ProgrammingError
Column = namedtuple(
'Column',
'name type_code display_size internal_size precision scale null_ok'
)
class Cursor(object):
class States(object):
(
NONE,
RUNNING,
FINISHED,
CURSOR_CLOSED
) = range(4)
_states = States()
def __init__(self, client, connection):
self._client = client
self._connection = connection
self._reset_state()
self.arraysize = 1
# Begin non-PEP attributes
self._columns_with_types = None
# End non-PEP attributes
super(Cursor, self).__init__()
def __repr__(self):
is_closed = self._state == self._states.CURSOR_CLOSED
return '<cursor object at 0x{0:x}; closed: {1:}>'.format(
id(self), is_closed
)
# Iteration support.
def __iter__(self):
while True:
one = self.fetchone()
if one is None:
return
yield one
# Context manager integrations.
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
@property
def description(self):
if self._state == self._states.NONE:
return None
columns = self._columns or []
types = self._types or []
return [
Column(name, type_code, None, None, None, None, True)
for name, type_code in zip(columns, types)
]
@property
def rowcount(self):
"""
:return: the number of rows that the last .execute*() produced.
"""
return self._rowcount
def close(self):
"""
Close the cursor now. The cursor will be unusable from this point
forward; an :data:`~clickhouse_driver.dbapi.Error` (or subclass)
exception will be raised if any operation is attempted with the
cursor.
"""
self._client.disconnect()
self._state = self._states.CURSOR_CLOSED
try:
# cursor can be already closed
self._connection.cursors.remove(self)
except ValueError:
pass
def execute(self, operation, parameters=None):
"""
Prepare and execute a database operation (query or command).
:param operation: query or command to execute.
:param parameters: sequence or mapping that will be bound to
variables in the operation.
:return: None
"""
self._check_cursor_closed()
self._begin_query()
try:
execute, execute_kwargs = self._prepare()
response = execute(
operation, params=parameters, with_column_types=True,
**execute_kwargs
)
except DriverError as orig:
raise OperationalError(orig)
self._process_response(response)
self._end_query()
def executemany(self, operation, seq_of_parameters):
"""
Prepare a database operation (query or command) and then execute it
against all parameter sequences found in the sequence
`seq_of_parameters`.
:param operation: query or command to execute.
:param seq_of_parameters: sequences or mappings for execution.
:return: None
"""
self._check_cursor_closed()
self._begin_query()
try:
execute, execute_kwargs = self._prepare()
response = execute(
operation, params=seq_of_parameters, **execute_kwargs
)
except DriverError as orig:
raise OperationalError(orig)
self._process_response(response, executemany=True)
self._end_query()
def fetchone(self):
"""
Fetch the next row of a query result set, returning a single sequence,
or None when no more data is available.
:return: the next row of a query result set or None.
"""
self._check_query_started()
if self._stream_results:
return next(self._rows, None)
else:
if not self._rows:
return None
return self._rows.pop(0)
def fetchmany(self, size=None):
"""
Fetch the next set of rows of a query result, returning a sequence of
sequences (e.g. a list of tuples). An empty sequence is returned when
no more rows are available.
:param size: amount of rows to return.
:return: list of fetched rows or empty list.
"""
self._check_query_started()
if size is None:
size = self.arraysize
if self._stream_results:
if size == -1:
return list(self._rows)
else:
return list(islice(self._rows, size))
if size < 0:
rv = self._rows
self._rows = []
else:
rv = self._rows[:size]
self._rows = self._rows[size:]
return rv
def fetchall(self):
"""
Fetch all (remaining) rows of a query result, returning them as a
sequence of sequences (e.g. a list of tuples).
:return: list of fetched rows.
"""
self._check_query_started()
if self._stream_results:
return list(self._rows)
rv = self._rows
self._rows = []
return rv
def setinputsizes(self, sizes):
# Do nothing.
pass
def setoutputsize(self, size, column=None):
# Do nothing.
pass
# Begin non-PEP methods
@property
def columns_with_types(self):
"""
:return: list of column names with corresponding types of the last
.execute*(). E.g. [('x', 'UInt64')].
"""
return self._columns_with_types
def set_stream_results(self, stream_results, max_row_buffer):
"""
Toggles results streaming from server. Driver will consume
block-by-block of `max_row_buffer` size and yield row-by-row from each
block.
:param stream_results: enable or disable results streaming.
:param max_row_buffer: specifies the maximum number of rows to buffer
at a time.
:return: None
"""
self._stream_results = stream_results
self._max_row_buffer = max_row_buffer
def set_settings(self, settings):
"""
Specifies settings for cursor.
:param settings: dictionary of query settings
:return: None
"""
self._settings = settings
def set_types_check(self, types_check):
"""
Toggles type checking for sequence of INSERT parameters.
Disabled by default.
:param types_check: new types check value.
:return: None
"""
self._types_check = types_check
def set_external_table(self, name, structure, data):
"""
Adds external table to cursor context.
If the same table is specified more than once the last one is used.
:param name: name of external table
:param structure: list of tuples (name, type) that defines table
structure. Example [(x, 'Int32')].
:param data: sequence of rows of tuples or dicts for transmission.
:return: None
"""
self._external_tables[name] = (structure, data)
def set_query_id(self, query_id):
"""
Specifies the query identifier for cursor.
:param query_id: the query identifier.
:return: None
"""
self._query_id = query_id
# End non-PEP methods
# Private methods.
def _prepare(self):
external_tables = [
{'name': name, 'structure': structure, 'data': data}
for name, (structure, data) in self._external_tables.items()
] or None
execute = self._client.execute
if self._stream_results:
execute = self._client.execute_iter
self._settings = self._settings or {}
self._settings['max_block_size'] = self._max_row_buffer
execute_kwargs = {
'settings': self._settings,
'external_tables': external_tables,
'types_check': self._types_check,
'query_id': self._query_id
}
return execute, execute_kwargs
def _process_response(self, response, executemany=False):
if executemany:
self._rowcount = response
response = None
if not response or isinstance(response, int):
self._columns = self._types = self._rows = []
if isinstance(response, int):
self._rowcount = response
return
if self._stream_results:
columns_with_types = next(response)
rows = response
else:
rows, columns_with_types = response
self._columns_with_types = columns_with_types
# Only SELECT queries have columns_with_types.
# DDL and INSERT INTO ... SELECT queries have empty columns header.
# We need to obtain rows count only during non-streaming SELECTs.
if columns_with_types:
self._columns, self._types = zip(*columns_with_types)
if not self._stream_results:
self._rowcount = len(rows)
else:
self._columns = self._types = []
self._rows = rows
def _reset_state(self):
"""
Resets query state and get ready for another query.
"""
self._state = self._states.NONE
self._columns = None
self._types = None
self._rows = None
self._rowcount = -1
self._stream_results = False
self._max_row_buffer = 0
self._settings = None
self._query_id = None
self._external_tables = {}
self._types_check = False
def _begin_query(self):
self._state = self._states.RUNNING
def _end_query(self):
self._state = self._states.FINISHED
def _check_cursor_closed(self):
if self._state == self._states.CURSOR_CLOSED:
raise InterfaceError('cursor already closed')
def _check_query_started(self):
if self._state == self._states.NONE:
raise ProgrammingError('no results to fetch')
|
pythonFiles/tests/testing_tools/adapter/.data/complex/mod.py | AlbertDeFusco/vscode-python | 2,461 | 12759570 | <reponame>AlbertDeFusco/vscode-python
"""
Examples:
>>> square(1)
1
>>> square(2)
4
>>> square(3)
9
>>> spam = Spam()
>>> spam.eggs()
42
"""
def square(x):
"""
Examples:
>>> square(1)
1
>>> square(2)
4
>>> square(3)
9
"""
return x * x
class Spam(object):
"""
Examples:
>>> spam = Spam()
>>> spam.eggs()
42
"""
def eggs(self):
"""
Examples:
>>> spam = Spam()
>>> spam.eggs()
42
"""
return 42
|
python/ray/tune/integration/docker.py | mkucijan/ray | 21,382 | 12759574 | <filename>python/ray/tune/integration/docker.py<gh_stars>1000+
import logging
import os
from typing import Optional, Tuple, List
from ray.autoscaler.sdk import rsync, configure_logging
from ray.util import get_node_ip_address
from ray.util.debug import log_once
from ray.tune.syncer import NodeSyncer
from ray.tune.sync_client import SyncClient
from ray.ray_constants import env_integer
logger = logging.getLogger(__name__)
class DockerSyncer(NodeSyncer):
"""DockerSyncer used for synchronization between Docker containers.
This syncer extends the node syncer, but is usually instantiated
without a custom sync client. The sync client defaults to
``DockerSyncClient`` instead.
Set the env var `TUNE_SYNCER_VERBOSITY` to increase verbosity
of syncing operations (0, 1, 2, 3). Defaults to 0.
.. note::
This syncer only works with the Ray cluster launcher.
If you use your own Docker setup, make sure the nodes can connect
to each other via SSH, and try the regular SSH-based syncer instead.
Example:
.. code-block:: python
from ray.tune.integration.docker import DockerSyncer
tune.run(train,
sync_config=tune.SyncConfig(
sync_to_driver=DockerSyncer))
"""
_cluster_config_file = os.path.expanduser("~/ray_bootstrap_config.yaml")
def __init__(self,
local_dir: str,
remote_dir: str,
sync_client: Optional[SyncClient] = None):
configure_logging(
log_style="record",
verbosity=env_integer("TUNE_SYNCER_VERBOSITY", 0))
self.local_ip = get_node_ip_address()
self.worker_ip = None
sync_client = sync_client or DockerSyncClient()
sync_client.configure(self._cluster_config_file)
super(NodeSyncer, self).__init__(local_dir, remote_dir, sync_client)
def set_worker_ip(self, worker_ip: str):
self.worker_ip = worker_ip
@property
def _remote_path(self) -> Tuple[str, str]:
return (self.worker_ip, self._remote_dir)
class DockerSyncClient(SyncClient):
"""DockerSyncClient to be used by DockerSyncer.
This client takes care of executing the synchronization
commands for Docker nodes. In its ``sync_down`` and
``sync_up`` commands, it expects tuples for the source
and target, respectively, for compatibility with docker.
Args:
should_bootstrap: Whether to bootstrap the autoscaler
cofiguration. This may be useful when you are
running into authentication problems; i.e.:
https://github.com/ray-project/ray/issues/17756.
"""
def __init__(self, should_bootstrap: bool = True):
self._command_runners = {}
self._cluster_config = None
if os.environ.get("TUNE_SYNC_DISABLE_BOOTSTRAP") == "1":
should_bootstrap = False
logger.debug("Skipping bootstrap for docker sync client.")
self._should_bootstrap = should_bootstrap
def configure(self, cluster_config_file: str):
self._cluster_config_file = cluster_config_file
def sync_up(self,
source: str,
target: Tuple[str, str],
exclude: Optional[List] = None) -> bool:
"""Here target is a tuple (target_node, target_dir)"""
target_node, target_dir = target
# Add trailing slashes for rsync
source = os.path.join(source, "")
target_dir = os.path.join(target_dir, "")
import click
try:
rsync(
cluster_config=self._cluster_config_file,
source=source,
target=target_dir,
down=False,
ip_address=target_node,
should_bootstrap=self._should_bootstrap,
use_internal_ip=True)
except click.ClickException:
if log_once("docker_rsync_up_fail"):
logger.warning(
"Rsync-up failed. Consider using a durable trainable "
"or setting the `TUNE_SYNC_DISABLE_BOOTSTRAP=1` env var.")
raise
return True
def sync_down(self,
source: Tuple[str, str],
target: str,
exclude: Optional[List] = None) -> bool:
"""Here source is a tuple (source_node, source_dir)"""
source_node, source_dir = source
# Add trailing slashes for rsync
source_dir = os.path.join(source_dir, "")
target = os.path.join(target, "")
import click
try:
rsync(
cluster_config=self._cluster_config_file,
source=source_dir,
target=target,
down=True,
ip_address=source_node,
should_bootstrap=self._should_bootstrap,
use_internal_ip=True)
except click.ClickException:
if log_once("docker_rsync_down_fail"):
logger.warning(
"Rsync-down failed. Consider using a durable trainable "
"or setting the `TUNE_SYNC_DISABLE_BOOTSTRAP=1` env var.")
raise
return True
def delete(self, target: str) -> bool:
raise NotImplementedError
|
objectModel/Python/tests/cdm/projection/test_projection_object_model.py | jocubeit/CDM | 265 | 12759635 | <filename>objectModel/Python/tests/cdm/projection/test_projection_object_model.py
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
import os
import unittest
from cdm.enums import CdmObjectType
from cdm.enums.cdm_operation_type import CdmOperationType
from cdm.objectmodel import CdmCorpusDefinition, CdmFolderDefinition, CdmProjection, CdmOperationAddCountAttribute, \
CdmOperationAddSupportingAttribute, CdmOperationAddTypeAttribute, CdmOperationExcludeAttributes, CdmOperationArrayExpansion, \
CdmOperationCombineAttributes, CdmOperationRenameAttributes, CdmOperationReplaceAsForeignKey, CdmOperationIncludeAttributes, CdmObject
from cdm.storage import LocalAdapter
from tests.common import async_test, TestHelper
from tests.utilities.projection_test_utils import ProjectionTestUtils
class ProjectionObjectModelTest(unittest.TestCase):
foundation_json_path = 'cdm:/foundations.cdm.json'
# The path between TestDataPath and TestName.
tests_subpath = os.path.join('Cdm', 'Projection')
@async_test
async def test_projection_using_object_model(self):
"""Basic test to save projection based entities and then try to reload them and validate that the projections were persisted correctly"""
corpus = ProjectionTestUtils.get_local_corpus(self.tests_subpath, 'test_projection_using_object_model')
corpus.storage.mount('local', LocalAdapter(TestHelper.get_actual_output_folder_path(self.tests_subpath, 'test_projection_using_object_model')))
local_root = corpus.storage.fetch_root_folder('local')
manifest_default = self._create_default_manifest(corpus, local_root)
entity_test_source = self._create_entity_test_source(corpus, manifest_default, local_root)
entity_test_entity_projection = self._create_entity_test_entity_projection(corpus, manifest_default, local_root)
entity_test_entity_nested_projection = self._create_entity_test_entity_nested_projection(corpus, manifest_default, local_root)
entity_test_entity_attribute_projection = self._create_entity_test_entity_attribute_projection(corpus, manifest_default, local_root)
entity_test_operation_collection = self._create_entity_test_operation_collection(corpus, manifest_default, local_root)
# Save manifest and entities
await manifest_default.save_as_async('{}.manifest.cdm.json'.format(manifest_default.manifest_name), True)
expected = 'TestSource'
expected_type = CdmObjectType.PROJECTION_DEF
actual = None
actual_type = CdmObjectType.ERROR
# Try to read back the newly persisted manifest and projection based entities
manifest_read_back = await corpus.fetch_object_async('local:/{}.manifest.cdm.json'.format(manifest_default.manifest_name))
self.assertEqual(5, len(manifest_read_back.entities))
self.assertEqual(entity_test_source.entity_name, manifest_read_back.entities[0].entity_name)
self.assertEqual(entity_test_entity_projection.entity_name, manifest_read_back.entities[1].entity_name)
self.assertEqual(entity_test_entity_nested_projection.entity_name, manifest_read_back.entities[2].entity_name)
self.assertEqual(entity_test_entity_attribute_projection.entity_name, manifest_read_back.entities[3].entity_name)
# Read back the newly persisted manifest and projection based entity TestEntityProjection and validate
entity_test_entity_projection_read_back = await corpus.fetch_object_async('local:/{}.cdm.json/{}'.format(entity_test_entity_projection.entity_name, entity_test_entity_projection.entity_name), manifest_read_back)
self.assertIsNotNone(entity_test_entity_projection_read_back)
actual = entity_test_entity_projection_read_back.extends_entity.explicit_reference.source.named_reference
actual_type = entity_test_entity_projection_read_back.extends_entity.explicit_reference.object_type
self.assertEqual(expected, actual)
self.assertEqual(expected_type, actual_type)
# Read back the newly persisted manifest and projection based entity TestEntityNestedProjection and validate
entity_test_entity_nested_projection_read_back = await corpus.fetch_object_async('local:/{}.cdm.json/{}'.format(entity_test_entity_nested_projection.entity_name, entity_test_entity_nested_projection.entity_name), manifest_read_back)
self.assertIsNotNone(entity_test_entity_nested_projection_read_back)
actual = entity_test_entity_nested_projection_read_back.extends_entity.explicit_reference.source.explicit_reference.source.explicit_reference.source.named_reference
actual_type = entity_test_entity_nested_projection_read_back.extends_entity.explicit_reference.source.explicit_reference.source.explicit_reference.object_type
self.assertEqual(expected, actual)
self.assertEqual(expected_type, actual_type)
# Read back the newly persisted manifest and projection based entity TestEntityAttributeProjection and validate
entity_test_entity_attribute_projection_read_back = await corpus.fetch_object_async('local:/{}.cdm.json/{}'.format(entity_test_entity_attribute_projection.entity_name, entity_test_entity_attribute_projection.entity_name), manifest_read_back)
self.assertIsNotNone(entity_test_entity_attribute_projection_read_back)
actual = entity_test_entity_attribute_projection_read_back.attributes[0].entity.explicit_reference.source.named_reference
actual_type = entity_test_entity_attribute_projection_read_back.attributes[0].entity.explicit_reference.object_type
self.assertEqual(expected, actual)
self.assertEqual(expected_type, actual_type)
# Read back operations collections and validate
entity_test_operation_collection_read_back = await corpus.fetch_object_async('local:/{}.cdm.json/{}'.format(entity_test_operation_collection.entity_name, entity_test_operation_collection.entity_name), manifest_read_back)
self.assertIsNotNone(entity_test_operation_collection_read_back)
actual_operation_count = len(entity_test_operation_collection_read_back.extends_entity.explicit_reference.operations)
self.assertEqual(9, actual_operation_count)
operations = entity_test_operation_collection_read_back.extends_entity.explicit_reference.operations
self.assertEqual(CdmOperationType.ADD_COUNT_ATTRIBUTE, operations[0].type)
self.assertEqual(CdmOperationType.ADD_SUPPORTING_ATTRIBUTE, operations[1].type)
self.assertEqual(CdmOperationType.ADD_TYPE_ATTRIBUTE, operations[2].type)
self.assertEqual(CdmOperationType.EXCLUDE_ATTRIBUTES, operations[3].type)
self.assertEqual(CdmOperationType.ARRAY_EXPANSION, operations[4].type)
self.assertEqual(CdmOperationType.COMBINE_ATTRIBUTES, operations[5].type)
self.assertEqual(CdmOperationType.RENAME_ATTRIBUTES, operations[6].type)
self.assertEqual(CdmOperationType.REPLACE_AS_FOREIGN_KEY, operations[7].type)
self.assertEqual(CdmOperationType.INCLUDE_ATTRIBUTES, operations[8].type)
def _create_default_manifest(self, corpus: 'CdmCorpusDefinition', local_root: 'CdmFolderDefinition') -> 'CdmManifestDefinition':
"""Create a default manifest"""
manifest_name = 'default'
manifest_doc_name = '{}.manifest.cdm.json'.format(manifest_name)
manifest_default = corpus.make_object(CdmObjectType.MANIFEST_DEF, manifest_name)
local_root.documents.append(manifest_default, manifest_doc_name)
return manifest_default
def _create_entity_test_source(self, corpus: 'CdmCorpusDefinition', manifest_default: 'CdmManifestDefinition', local_root: 'CdmFolderDefinition') -> 'CdmEntityDefinition':
"""Create a simple entity called 'TestSource' with a single attribute"""
entity_name = 'TestSource'
entity_test_source = corpus.make_object(CdmObjectType.ENTITY_DEF, entity_name)
attribute_name = 'TestAttribute'
entity_test_attribute = corpus.make_object(CdmObjectType.TYPE_ATTRIBUTE_DEF, attribute_name, False)
entity_test_attribute.data_type = corpus.make_ref(CdmObjectType.DATA_TYPE_REF, 'string', True)
entity_test_attribute.purpose = corpus.make_ref(CdmObjectType.PURPOSE_REF, 'hasA', True)
entity_test_attribute.display_name = attribute_name
entity_test_source.attributes.append(entity_test_attribute)
entity_test_source_doc = corpus.make_object(CdmObjectType.DOCUMENT_DEF, '{}.cdm.json'.format(entity_name), False)
entity_test_source_doc.imports.append(self.foundation_json_path)
entity_test_source_doc.definitions.append(entity_test_source)
local_root.documents.append(entity_test_source_doc, entity_test_source_doc.name)
manifest_default.entities.append(entity_test_source)
return entity_test_source
def _create_projection(self, corpus: 'CdmCorpusDefinition') -> 'CdmProjection':
"""Create a simple projection object"""
projection = corpus.make_object(CdmObjectType.PROJECTION_DEF)
projection.source = corpus.make_object(CdmObjectType.ENTITY_REF, 'TestSource', True)
return projection
def _create_nested_projection(self, corpus: 'CdmCorpusDefinition') -> 'CdmProjection':
"""Create a 3-level nested projection object"""
projection3 = corpus.make_object(CdmObjectType.PROJECTION_DEF)
projection3.source = corpus.make_object(CdmObjectType.ENTITY_REF, 'TestSource', True)
inline_projection_entity_ref3 = corpus.make_object(CdmObjectType.ENTITY_REF, None)
inline_projection_entity_ref3.explicit_reference = projection3
projection2 = corpus.make_object(CdmObjectType.PROJECTION_DEF)
projection2.source = inline_projection_entity_ref3
inline_projection_entity_ref2 = corpus.make_object(CdmObjectType.ENTITY_REF, None)
inline_projection_entity_ref2.explicit_reference = projection2
projection1 = corpus.make_object(CdmObjectType.PROJECTION_DEF)
projection1.source = inline_projection_entity_ref2
return projection1
def _create_entity_test_entity_projection(self, corpus: 'CdmCorpusDefinition', manifest_default: 'CdmManifestDefinition', local_root: 'CdmFolderDefinition') -> 'CdmEntityDefinition':
"""Create an entity 'TestEntityProjection' that extends from a projection"""
entity_name = 'TestEntityProjection'
inline_projection_entity_ref = corpus.make_object(CdmObjectType.ENTITY_REF, None)
inline_projection_entity_ref.explicit_reference = self._create_projection(corpus)
entity_test_entity_projection = corpus.make_object(CdmObjectType.ENTITY_DEF, entity_name)
entity_test_entity_projection.extends_entity = inline_projection_entity_ref
entity_test_entity_projection_doc = corpus.make_object(CdmObjectType.DOCUMENT_DEF, '{}.cdm.json'.format(entity_name), False)
entity_test_entity_projection_doc.imports.append(self.foundation_json_path)
entity_test_entity_projection_doc.imports.append('TestSource.cdm.json')
entity_test_entity_projection_doc.definitions.append(entity_test_entity_projection)
local_root.documents.append(entity_test_entity_projection_doc, entity_test_entity_projection_doc.name)
manifest_default.entities.append(entity_test_entity_projection)
return entity_test_entity_projection
def _create_entity_test_entity_nested_projection(self, corpus: 'CdmCorpusDefinition', manifest_default: 'CdmManifestDefinition', local_root: 'CdmFolderDefinition') -> 'CdmEntityDefinition':
"""Create an entity 'TestEntityNestedProjection' that extends from a projection"""
entity_name = 'TestEntityNestedProjection'
inline_projection_entity_ref = corpus.make_object(CdmObjectType.ENTITY_REF, None)
inline_projection_entity_ref.explicit_reference = self._create_nested_projection(corpus)
entity_test_entity_nested_projection = corpus.make_object(CdmObjectType.ENTITY_DEF, entity_name)
entity_test_entity_nested_projection.extends_entity = inline_projection_entity_ref
entity_test_entity_nested_projection_doc = corpus.make_object(CdmObjectType.DOCUMENT_DEF, '{}.cdm.json'.format(entity_name), False)
entity_test_entity_nested_projection_doc.imports.append(self.foundation_json_path)
entity_test_entity_nested_projection_doc.imports.append('TestSource.cdm.json')
entity_test_entity_nested_projection_doc.definitions.append(entity_test_entity_nested_projection)
local_root.documents.append(entity_test_entity_nested_projection_doc, entity_test_entity_nested_projection_doc.name)
manifest_default.entities.append(entity_test_entity_nested_projection)
return entity_test_entity_nested_projection
def _create_entity_test_entity_attribute_projection(self, corpus: 'CdmCorpusDefinition', manifest_default: 'CdmManifestDefinition', local_root: 'CdmFolderDefinition') -> 'CdmEntityDefinition':
"""Create an entity 'TestEntityAttributeProjection' that contains an entity attribute with a projection as a source entity"""
entity_name = 'TestEntityAttributeProjection'
inline_projection_entity_ref = corpus.make_object(CdmObjectType.ENTITY_REF, None)
inline_projection_entity_ref.explicit_reference = self._create_projection(corpus)
entity_test_entity_attribute_projection = corpus.make_object(CdmObjectType.ENTITY_DEF, entity_name)
attribute_name = 'TestAttribute'
entity_test_entity_attribute = corpus.make_object(CdmObjectType.ENTITY_ATTRIBUTE_DEF, attribute_name, False)
entity_test_entity_attribute.entity = inline_projection_entity_ref
entity_test_entity_attribute_projection.attributes.append(entity_test_entity_attribute)
entity_test_entity_attribute_projection_doc = corpus.make_object(CdmObjectType.DOCUMENT_DEF, '{}.cdm.json'.format(entity_name), False)
entity_test_entity_attribute_projection_doc.imports.append(self.foundation_json_path)
entity_test_entity_attribute_projection_doc.imports.append('TestSource.cdm.json')
entity_test_entity_attribute_projection_doc.definitions.append(entity_test_entity_attribute_projection)
local_root.documents.append(entity_test_entity_attribute_projection_doc, entity_test_entity_attribute_projection_doc.name)
manifest_default.entities.append(entity_test_entity_attribute_projection)
return entity_test_entity_attribute_projection
def _create_projection_with_operation_collection(self, corpus: 'CdmCorpusDefinition', owner: 'CdmObject') -> 'CdmProjection':
"""Create a projection object with operations"""
projection = corpus.make_object(CdmObjectType.PROJECTION_DEF)
projection.source = corpus.make_object(CdmObjectType.ENTITY_REF, 'TestSource', True)
# AddCountAttribute Operation
add_count_attribute_op = CdmOperationAddCountAttribute(corpus.ctx)
add_count_attribute_op.count_attribute = corpus.make_object(CdmObjectType.TYPE_ATTRIBUTE_DEF, 'countAtt')
projection.operations.append(add_count_attribute_op)
# AddSupportingAttribute Operation
add_supporting_attribute_op = CdmOperationAddSupportingAttribute(corpus.ctx)
add_supporting_attribute_op.supporting_attribute = corpus.make_object(CdmObjectType.TYPE_ATTRIBUTE_DEF, 'supportingAtt')
projection.operations.append(add_supporting_attribute_op)
# AddTypeAttribute Operation
add_type_attribute_op = CdmOperationAddTypeAttribute(corpus.ctx)
add_type_attribute_op.type_attribute = corpus.make_object(CdmObjectType.TYPE_ATTRIBUTE_DEF, 'typeAtt')
projection.operations.append(add_type_attribute_op)
# ExcludeAttributes Operation
exclude_attributes_op = CdmOperationExcludeAttributes(corpus.ctx)
exclude_attributes_op.exclude_attributes = []
exclude_attributes_op.exclude_attributes.append('testAttribute1')
projection.operations.append(exclude_attributes_op)
# ArrayExpansion Operation
array_expansion_op = CdmOperationArrayExpansion(corpus.ctx)
array_expansion_op.start_ordinal = 0
array_expansion_op.end_ordinal = 1
projection.operations.append(array_expansion_op)
# CombineAttributes Operation
combine_attributes_op = CdmOperationCombineAttributes(corpus.ctx)
combine_attributes_op.select = []
combine_attributes_op.merge_into = corpus.make_object(CdmObjectType.TYPE_ATTRIBUTE_DEF, 'combineAtt')
combine_attributes_op.select.append('testAttribute1')
projection.operations.append(combine_attributes_op)
# RenameAttributes Operation
rename_attributes_op = CdmOperationRenameAttributes(corpus.ctx)
rename_attributes_op.rename_format = '{m}'
projection.operations.append(rename_attributes_op)
# ReplaceAsForeignKey Operation
replace_as_foreign_key_op = CdmOperationReplaceAsForeignKey(corpus.ctx)
replace_as_foreign_key_op.reference = 'testAttribute1'
replace_as_foreign_key_op.replace_with = corpus.make_object(CdmObjectType.TYPE_ATTRIBUTE_DEF, 'testForeignKey', False)
projection.operations.append(replace_as_foreign_key_op)
# IncludeAttributes Operation
include_attributes_op = CdmOperationIncludeAttributes(corpus.ctx)
include_attributes_op.include_attributes = []
include_attributes_op.include_attributes.append('testAttribute1')
projection.operations.append(include_attributes_op)
return projection
def _create_entity_test_operation_collection(self, corpus: 'CdmCorpusDefinition', manifest_default: 'CdmManifestDefinition', local_root: 'CdmFolderDefinition'):
"""Create an entity 'TestOperationCollection' that extends from a projection with a collection of operations"""
entity_name = 'TestOperationCollection'
inline_projection_entity_ref = corpus.make_object(CdmObjectType.ENTITY_REF, None)
entity_test_operation_collection = corpus.make_object(CdmObjectType.ENTITY_DEF, entity_name)
inline_projection_entity_ref.explicit_reference = self._create_projection_with_operation_collection(corpus, entity_test_operation_collection)
entity_test_operation_collection.extends_entity = inline_projection_entity_ref
entity_test_operation_collection_doc = corpus.make_object(CdmObjectType.DOCUMENT_DEF, '{}.cdm.json'.format(entity_name), False)
entity_test_operation_collection_doc.imports.append(self.foundation_json_path)
entity_test_operation_collection_doc.imports.append('TestSource.cdm.json')
entity_test_operation_collection_doc.definitions.append(entity_test_operation_collection)
local_root.documents.append(entity_test_operation_collection_doc, entity_test_operation_collection_doc.name)
manifest_default.entities.append(entity_test_operation_collection)
return entity_test_operation_collection
|
kafka_utils/kafka_rolling_restart/task.py | dbgrigsby/kafka-utils | 302 | 12759637 | <gh_stars>100-1000
# -*- coding: utf-8 -*-
# Copyright 2016 Yelp Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import shlex
class TaskFailedException(Exception):
pass
class Task(object):
"""Base class for implementing Task
All the args passed can be accessed via self.args
:param args: The program arguments
"""
def __init__(self, args):
if args:
self.args = self.parse_args(list(
shlex.split(args)
))
else:
self.args = self.parse_args([])
def parse_args(self, args):
"""Parse args command line arguments.
:param args: The list of arguments as strings.
"""
pass
def run(self, host):
"""This contains the main logic of the task
Please note an exception from this method will completely stop the restart
:param host: the host on which precheck is executed on
:type host: string
"""
raise NotImplementedError("Implemented in subclass")
class PreStopTask(Task):
"""Class to be used for any pre stop checks"""
class PostStopTask(Task):
"""Class to be used for any post stop checks"""
|
tests/unit/core/metrics/test_regression_metrics.py | cswarth/whylogs | 603 | 12759681 | import os
import pandas as pd
import pytest
from whylogs.core.metrics.regression_metrics import RegressionMetrics
from whylogs.proto import RegressionMetricsMessage
TEST_DATA_PATH = os.path.abspath(
os.path.join(
os.path.realpath(os.path.dirname(__file__)),
os.pardir,
os.pardir,
os.pardir,
os.pardir,
"testdata",
)
)
def my_test():
regmet = RegressionMetrics()
assert regmet.count == 0
assert regmet.sum_diff == 0.0
assert regmet.sum2_diff == 0.0
assert regmet.sum_abs_diff == 0.0
assert regmet.mean_squared_error() is None
assert regmet.mean_absolute_error() is None
assert regmet.root_mean_squared_error() is None
def test_load_parquet():
mean_absolute_error = 85.94534216005789
mean_squared_error = 11474.89611670205
root_mean_squared_error = 107.12094154133472
regmet = RegressionMetrics()
df = pd.read_parquet(os.path.join(os.path.join(TEST_DATA_PATH, "metrics", "2021-02-12.parquet")))
regmet.add(df["predictions"].to_list(), df["targets"].to_list())
assert regmet.count == len(df["predictions"].to_list())
assert regmet.mean_squared_error() == pytest.approx(mean_squared_error, 0.01)
assert regmet.mean_absolute_error() == pytest.approx(mean_absolute_error, 0.01)
assert regmet.root_mean_squared_error() == pytest.approx(root_mean_squared_error, 0.01)
msg = regmet.to_protobuf()
new_regmet = RegressionMetrics.from_protobuf(msg)
assert regmet.count == new_regmet.count
assert regmet.mean_squared_error() == new_regmet.mean_squared_error()
assert regmet.root_mean_squared_error() == new_regmet.root_mean_squared_error()
assert regmet.mean_absolute_error() == new_regmet.mean_absolute_error()
def test_empty_protobuf_should_return_none():
empty_message = RegressionMetricsMessage()
assert RegressionMetrics.from_protobuf(empty_message) is None
def test_merging():
regmet_sum = RegressionMetrics()
regmet = RegressionMetrics(prediction_field="predictions", target_field="targets")
df = pd.read_parquet(os.path.join(os.path.join(TEST_DATA_PATH, "metrics", "2021-02-12.parquet")))
regmet.add(df["predictions"].to_list(), df["targets"].to_list())
regmet_sum.add(df["predictions"].to_list(), df["targets"].to_list())
regmet_2 = RegressionMetrics(prediction_field="predictions", target_field="targets")
df_2 = pd.read_parquet(os.path.join(os.path.join(TEST_DATA_PATH, "metrics", "2021-02-13.parquet")))
regmet_2.add(df_2["predictions"].to_list(), df_2["targets"].to_list())
regmet_sum.add(df_2["predictions"].to_list(), df_2["targets"].to_list())
merged_reg_metr = regmet.merge(regmet_2)
assert merged_reg_metr.count == regmet_sum.count
assert merged_reg_metr.mean_squared_error() == pytest.approx(regmet_sum.mean_squared_error(), 0.001)
assert merged_reg_metr.root_mean_squared_error() == pytest.approx(regmet_sum.root_mean_squared_error(), 0.001)
assert merged_reg_metr.mean_absolute_error() == pytest.approx(regmet_sum.mean_absolute_error(), 0.001)
|
html_parsing/get_population_from_wikidata.py | DazEB2/SimplePyScripts | 117 | 12759719 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = 'ipetrash'
import requests
from bs4 import BeautifulSoup
def get_populations(url: str) -> dict:
rs = requests.get(url)
root = BeautifulSoup(rs.content, 'html.parser')
# P1082 -- идентификатор для population
population_node = root.select_one('#P1082')
populations = dict()
# Перебор строк в соседнем от population столбце
for row in population_node.select('.wikibase-statementview'):
# Небольшая хитрость -- берем только первые 2 значения, поидеи это будут: количество людей и дата
number_str, data_str = row.select('.wikibase-snakview-value')[:2]
# Вытаскиваем текст из
number_str = number_str.text.strip()
data_str = data_str.text.strip()
# Делаем разделение и берем последнуюю часть, после приводим к числу
# "1 July 2012" -> 2012, "2010" -> 2010
year = int(data_str.split()[-1])
# Добавляем в словарь
populations[year] = number_str
return populations
def get_population_by_year(populations: dict, year: int) -> str:
# Если такой год не будет найден, вернем -1
return populations.get(year, -1)
# Аналогично get_population_by_year, но сначала вытащит данные из
# указанного url, а после достанет значение по year
def get_population_from_url_by_year(url: str, year: int) -> str:
populations = get_populations(url)
return get_population_by_year(populations, year)
if __name__ == '__main__':
url = 'https://www.wikidata.org/wiki/Q148'
populations = get_populations(url)
print(populations) # {2012: '1,375,198,619', 2010: '1,359,755,102', 2015: '1,397,028,553', ...
# Выводим данные с сортировкой по ключу: по возрастанию
for year in sorted(populations):
print("{}: {}".format(year, populations[year]))
# 2010: 1,359,755,102
# 2011: 1,367,480,264
# 2012: 1,375,198,619
# 2013: 1,382,793,212
# 2014: 1,390,110,388
# 2015: 1,397,028,553
# 2016: 1,403,500,365
# 2017: 1,409,517,397
print(get_population_by_year(populations, 2012)) # 1,375,198,619
print(get_population_by_year(populations, 2013)) # 1,382,793,212
print(get_population_by_year(populations, 2014)) # 1,390,110,388
|
Algorithms/Searching & Sorting/Fibonacii Search/fibinacci_search.py | strangestroad/interview-techdev-guide | 320 | 12759748 | <reponame>strangestroad/interview-techdev-guide
def FibonacciSearch(arr, key):
fib2 = 0
fib1 = 1
fib = fib1 + fib2
while (fib < len(arr)):
fib2 = fib1
fib1 = fib
fib = fib1 + fib2
index = -1
while (fib > 1):
i = min(index + fib2, (len(arr)-1))
if (arr[i] < key):
fib = fib1
fib1 = fib2
fib2 = fib - fib1
index = i
elif (arr[i] > key):
fib = fib2
fib1 = fib1 - fib2
fib2 = fib - fib1
else :
return i
if(fib1 and index < (len(arr)-1) and arr[index+1] == key):
return index+1
return -1
key= 15
arr = [5, 10, 15, 20, 25, 30, 35]
ans = FibonacciSearch(arr, key)
print(ans)
if (ans):
print("Found at "+ str(ans+1) +" position")
else:
print("Not Found")
|
materials/make_induced_graph.py | lavig17/Knowledge-Graph-Image | 189 | 12759759 | <reponame>lavig17/Knowledge-Graph-Image
import argparse
import json
from nltk.corpus import wordnet as wn
import torch
from glove import GloVe
def getnode(x):
return wn.synset_from_pos_and_offset('n', int(x[1:]))
def getwnid(u):
s = str(u.offset())
return 'n' + (8 - len(s)) * '0' + s
def getedges(s):
dic = {x: i for i, x in enumerate(s)}
edges = []
for i, u in enumerate(s):
for v in u.hypernyms():
j = dic.get(v)
if j is not None:
edges.append((i, j))
return edges
def induce_parents(s, stop_set):
q = s
vis = set(s)
l = 0
while l < len(q):
u = q[l]
l += 1
if u in stop_set:
continue
for p in u.hypernyms():
if p not in vis:
vis.add(p)
q.append(p)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--input', default='imagenet-split.json')
parser.add_argument('--output', default='imagenet-induced-graph.json')
args = parser.parse_args()
print('making graph ...')
xml_wnids = json.load(open('imagenet-xml-wnids.json', 'r'))
xml_nodes = list(map(getnode, xml_wnids))
xml_set = set(xml_nodes)
js = json.load(open(args.input, 'r'))
train_wnids = js['train']
test_wnids = js['test']
key_wnids = train_wnids + test_wnids
s = list(map(getnode, key_wnids))
induce_parents(s, xml_set)
s_set = set(s)
for u in xml_nodes:
if u not in s_set:
s.append(u)
wnids = list(map(getwnid, s))
edges = getedges(s)
print('making glove embedding ...')
glove = GloVe('glove.6B.300d.txt')
vectors = []
for wnid in wnids:
vectors.append(glove[getnode(wnid).lemma_names()])
vectors = torch.stack(vectors)
print('dumping ...')
obj = {}
obj['wnids'] = wnids
obj['vectors'] = vectors.tolist()
obj['edges'] = edges
json.dump(obj, open(args.output, 'w'))
|
applications/tensorflow/dynamic_sparsity/ipu_sparse_ops/fp_slot_opt.py | payoto/graphcore_examples | 260 | 12759771 | # Copyright (c) 2020 Graphcore Ltd. All rights reserved.
"""
This module exposes an Optimizer wrapper to get regular tf.train.Optimizers to
allow for selecting the slots FP precision independently of the variable type.
Currently only supports Adam
"""
import os
import tensorflow.compat.v1 as tf
from tensorflow.python.ops import math_ops
from tensorflow.python.training.optimizer import _var_key
from tensorflow.python.training import slot_creator
from tensorflow.python.training.adam import AdamOptimizer
from typing import Type
from logging import getLogger
tf.disable_v2_behavior()
tf.disable_eager_execution()
logger = getLogger(os.path.basename(__file__))
def SelectableSlotFPFormatOptimizer(cls: Type[tf.train.Optimizer]) -> Type[tf.train.Optimizer]:
if not issubclass(cls, AdamOptimizer):
raise ValueError(f'Class {cls} does not inherit from tf.python.training.adam.AdamOptimizer')
class Wrapped(cls):
def __init__(self, slots_dtype, force_fp32_weight_update=True, use_nesterov=False, *args, **kwargs):
self.slots_dtype = tf.as_dtype(slots_dtype)
self.use_nesterov = use_nesterov
self.force_fp32_weight_update = force_fp32_weight_update
super(Wrapped, self).__init__(*args, **kwargs)
def _zeros_slot(self, var, slot_name, op_name):
"""Find or create a slot initialized with 0.0.
This is effectively a copy of the original TF optimizer method
excepts this one allows to pass a dtype to `create_zeros_slot`.
Args:
var: A `Variable` object.
slot_name: Name for the slot.
op_name: Name to use when scoping the Variable that
needs to be created for the slot.
Returns:
A `Variable` object.
"""
named_slots = self._slot_dict(slot_name)
if _var_key(var) not in named_slots:
new_slot_variable = slot_creator.create_zeros_slot(var, op_name,
dtype=self.slots_dtype)
self._restore_slot_variable(
slot_name=slot_name, variable=var,
slot_variable=new_slot_variable)
named_slots[_var_key(var)] = new_slot_variable
return tf.cast(named_slots[_var_key(var)], var.dtype)
def _apply_weight_update(self, grad, var, m, v, beta1_power, beta2_power, lr, beta1, beta2, epsilon, use_nesterov):
if self.force_fp32_weight_update:
# Cast to fp32 for extra precision
weight_update_dtype = tf.float32
else:
weight_update_dtype = var.dtype
# cast all variables to the same desired dtype for the update
m_c = tf.convert_to_tensor(tf.cast(m, weight_update_dtype))
v_c = tf.convert_to_tensor(tf.cast(v, weight_update_dtype))
var_c = tf.cast(var, weight_update_dtype)
lr_c = tf.cast(lr, weight_update_dtype)
beta1_power_c = tf.cast(beta1_power, weight_update_dtype)
beta2_power_c = tf.cast(beta2_power, weight_update_dtype)
beta1_c = tf.cast(beta1, weight_update_dtype)
beta2_c = tf.cast(beta2, weight_update_dtype)
epsilon_c = tf.cast(epsilon, weight_update_dtype)
grad_c = tf.cast(grad, weight_update_dtype)
# correct for the bias of the first and second order moments
alpha = lr_c * math_ops.sqrt(1 - beta2_power_c) / (1 - beta1_power_c)
# update the first order moment
m_t = beta1_c * m_c + (1.0 - beta1_c) * grad_c
# update the second order moment
v_t = beta2_c * v_c + (1.0 - beta2_c) * grad_c * grad_c
# store the moments in the right dtype
assign_m = tf.assign(m, tf.cast(m_t, self.slots_dtype))
assign_v = tf.assign(v, tf.cast(v_t, self.slots_dtype))
# update the variable
with tf.control_dependencies([assign_m, assign_v]):
if use_nesterov:
return tf.cast(var_c - ((grad_c * (1.0 - beta1_c) + beta1_c * m_t) * alpha) / (math_ops.sqrt(v_t) + epsilon_c), var.dtype)
else:
return tf.cast(var_c - (m_t * alpha) / (math_ops.sqrt(v_t) + epsilon_c), var.dtype)
def _resource_apply_dense(self, grad, var):
m = self.get_slot(var, "m")
v = self.get_slot(var, "v")
beta1_power, beta2_power = self._get_beta_accumulators()
return var.assign(
self._apply_weight_update(
grad=grad,
var=var,
m=m,
v=v,
beta1_power=beta1_power,
beta2_power=beta2_power,
lr=self._lr_t,
beta1=self._beta1_t,
beta2=self._beta2_t,
epsilon=self._epsilon_t,
use_nesterov=self.use_nesterov))
return Wrapped
|
NAS/single-path-one-shot/src/MNIST/utils.py | naviocean/SimpleCVReproduction | 923 | 12759773 | <gh_stars>100-1000
import os
import re
import torch
import torch.nn as nn
import random
import json
import numpy as np
def get_num_correct(preds, labels):
return preds.argmax(dim=1).eq(labels).sum().item()
class ArchLoader():
'''
load arch from json file
'''
def __init__(self, path):
super(ArchLoader, self).__init__()
self.arc_list = []
self.arc_dict = {}
self.get_arch_list_dict(path)
random.shuffle(self.arc_list)
self.idx = -1
self.level_config = {
"level1": [4, 8, 12, 16],
"level2": [4, 8, 12, 16, 20, 24, 28, 32],
"level3": [4, 8, 12, 16, 20, 24, 28, 32, 36, 40, 44, 48, 52, 56, 60, 64]
}
def get_arch_list(self):
return self.arc_list
def get_arch_dict(self):
return self.arc_dict
def get_random_batch(self, bs):
return random.sample(self.arc_list, bs)
def get_part_dict(self):
keys = list(self.arc_dict.keys())[:10]
return dict([(key, self.arc_dict[key]) for key in keys])
def convert_list_arc_str(self, arc_list):
arc_str = ""
arc_list = [str(item)+"-" for item in arc_list]
for item in arc_list:
arc_str += item
return arc_str[:-1]
def __next__(self):
self.idx += 1
if self.idx >= len(self.arc_list):
raise StopIteration
return self.arc_list[self.idx]
def __iter__(self):
return self
def get_arch_list_dict(self, path):
with open(path, "r") as f:
self.arc_dict = json.load(f)
self.arc_list = []
for _, v in self.arc_dict.items():
self.arc_list.append(v["arch"])
def generate_fair_batch(self):
rngs = []
seed = 0
# level1
for i in range(0, 7):
seed += 1
random.seed(seed)
rngs.append(random.sample(self.level_config['level1'],
len(self.level_config['level1']))*4)
# level2
for i in range(7, 13):
seed += 1
random.seed(seed)
rngs.append(random.sample(self.level_config['level2'],
len(self.level_config['level2']))*2)
# level3
for i in range(13, 20):
seed += 1
random.seed(seed)
rngs.append(random.sample(self.level_config['level3'],
len(self.level_config['level3'])))
return np.transpose(rngs)
def generate_niu_fair_batch(self):
rngs = []
seed = 0
# level1
for i in range(0, 7):
seed += 1
random.seed(seed)
tmp_rngs = []
for _ in range(4):
tmp_rngs.extend(random.sample(self.level_config['level1'],
len(self.level_config['level1'])))
rngs.append(tmp_rngs)
# level2
for i in range(7, 13):
seed += 1
random.seed(seed)
tmp_rngs = []
for _ in range(2):
tmp_rngs.extend(random.sample(self.level_config['level2'],
len(self.level_config['level2'])))
rngs.append(tmp_rngs)
# level3
for i in range(13, 20):
seed += 1
random.seed(seed)
rngs.append(random.sample(self.level_config['level3'],
len(self.level_config['level3'])))
return np.transpose(rngs)
# arch_loader = ArchLoader("Track1_final_archs.json")
# print(arch_loader.generate_niu_fair_batch())
# arc_dc = arch_loader.get_random_batch(1000)
# for i, arc in enumerate(arc_dc):
# print(i, arc)
# cnt = 0
# for i,ac in enumerate(arch_loader):
# print(i,ac)
# cnt += 1
# print(cnt)
class CrossEntropyLabelSmooth(nn.Module):
def __init__(self, num_classes, epsilon):
super(CrossEntropyLabelSmooth, self).__init__()
self.num_classes = num_classes
self.epsilon = epsilon
self.logsoftmax = nn.LogSoftmax(dim=1)
def forward(self, inputs, targets):
log_probs = self.logsoftmax(inputs)
targets = torch.zeros_like(log_probs).scatter_(
1, targets.unsqueeze(1), 1)
targets = (1 - self.epsilon) * \
targets + self.epsilon / self.num_classes
loss = (-targets * log_probs).mean(0).sum()
return loss
class AvgrageMeter(object):
def __init__(self):
self.reset()
def reset(self):
self.avg = 0
self.sum = 0
self.cnt = 0
self.val = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.cnt += n
self.avg = self.sum / self.cnt
def accuracy(output, target, topk=(1,)):
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].reshape(-1).float().sum(0)
res.append(correct_k.mul_(100.0 / batch_size))
return res
def save_checkpoint(state, iters, tag=''):
if not os.path.exists("./models"):
os.makedirs("./models")
filename = os.path.join(
"./models/{}checkpoint-{:06}.pth.tar".format(tag, iters))
torch.save(state, filename)
# latestfilename = os.path.join(
# "./models/{}checkpoint-latest.pth.tar".format(tag))
# torch.save(state, latestfilename)
def get_lastest_model():
if not os.path.exists('./models'):
os.mkdir('./models')
model_list = os.listdir('./models/')
if model_list == []:
return None, 0
model_list.sort()
lastest_model = model_list[-1]
iters = re.findall(r'\d+', lastest_model)
return './models/' + lastest_model, int(iters[0])
def get_parameters(model):
group_no_weight_decay = []
group_weight_decay = []
for pname, p in model.named_parameters():
if pname.find('weight') >= 0 and len(p.size()) > 1:
# print('include ', pname, p.size())
group_weight_decay.append(p)
else:
# print('not include ', pname, p.size())
group_no_weight_decay.append(p)
assert len(list(model.parameters())) == len(
group_weight_decay) + len(group_no_weight_decay)
groups = [dict(params=group_weight_decay), dict(
params=group_no_weight_decay, weight_decay=0.)]
return groups
def bn_calibration_init(m):
""" calculating post-statistics of batch normalization """
if getattr(m, 'track_running_stats', False):
# reset all values for post-statistics
m.reset_running_stats()
# set bn in training mode to update post-statistics
m.training = True
# if use cumulative moving average
if getattr(FLAGS, 'cumulative_bn_stats', False):
m.momentum = None
|
internetdefense/settings/dev.py | gnubrasil/idl-members | 175 | 12759788 | """
Settings specific to development environments
"""
from os import path
from settings.base import PROJECT_DIR, MIDDLEWARE_CLASSES, INSTALLED_APPS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': path.join(PROJECT_DIR, 'data', 'data.db'),
}
}
DEBUG = True
TEMPLATE_DEBUG = True
SITE_ID = 1
INCLUDE_DOMAIN = 'localhost:8000'
INCLUDE_URL = INCLUDE_DOMAIN + '/include/'
STATIC_URL = '/static/'
def show_toolbar(request):
return True
DEBUG_TOOLBAR_CONFIG = {
'INTERCEPT_REDIRECTS': False,
'SHOW_TOOLBAR_CALLBACK': show_toolbar,
}
INTERNAL_IPS = ('127.0.0.1', '10.0.1.3',)
MIDDLEWARE_CLASSES = MIDDLEWARE_CLASSES + [
'debug_toolbar.middleware.DebugToolbarMiddleware',
]
INSTALLED_APPS = INSTALLED_APPS + [
'debug_toolbar',
]
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.filebased.FileBasedCache',
'LOCATION': path.join(PROJECT_DIR, 'cache'),
'TIMEOUT': 60 * 60 * 24 * 365
}
}
COMPRESS_ENABLED = True |
openvim/dhcp_thread.py | acasana/openmano_movilnet | 204 | 12759794 | # -*- coding: utf-8 -*-
##
# Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U.
# This file is part of openmano
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# For those usages not covered by the Apache License, Version 2.0 please
# contact with: <EMAIL>
##
'''
This is thread that interact with the dhcp server to get the IP addresses
'''
__author__="<NAME>, <NAME>"
__date__ ="$4-Jan-2016 12:07:15$"
import threading
import time
import Queue
import paramiko
import random
import subprocess
#TODO: insert a logging system
class dhcp_thread(threading.Thread):
def __init__(self, dhcp_params, db, db_lock, test, dhcp_nets, debug=None):
'''Init a thread.
Arguments: thread_info must be a dictionary with:
'dhcp_params' dhcp server parameters with the following keys:
mandatory : user, host, port, key, ifaces(interface name list of the one managed by the dhcp)
optional: password, key, port(22)
'db' 'db_lock': database class and lock for accessing it
'test': in test mode no acces to a server is done, and ip is invented
'''
threading.Thread.__init__(self)
self.name = "dhcp_thread"
self.dhcp_params = dhcp_params
self.debug = debug
self.db = db
self.db_lock = db_lock
self.test = test
self.dhcp_nets = dhcp_nets
self.ssh_conn = None
self.mac_status ={} #dictionary of mac_address to retrieve information
#ip: None
#retries:
#next_reading: time for the next trying to check ACTIVE status or IP
#created: time when it was added
#active: time when the VM becomes into ACTIVE status
self.queueLock = threading.Lock()
self.taskQueue = Queue.Queue(2000)
def ssh_connect(self):
try:
#Connect SSH
self.ssh_conn = paramiko.SSHClient()
self.ssh_conn.set_missing_host_key_policy(paramiko.AutoAddPolicy())
self.ssh_conn.load_system_host_keys()
self.ssh_conn.connect(self.dhcp_params["host"], port=self.dhcp_params.get("port",22),
username=self.dhcp_params["user"], password=self.dhcp_params.get("password"), pkey=self.dhcp_params.get("key"),
timeout=2)
except paramiko.ssh_exception.SSHException as e:
text = e.args[0]
print self.name, ": ssh_connect ssh Exception:", text
def load_mac_from_db(self):
#TODO get macs to follow from the database
print self.name, " load macs from db"
self.db_lock.acquire()
r,c = self.db.get_table(SELECT=('mac','ip_address','nets.uuid as net_id', ),
FROM='ports join nets on ports.net_id=nets.uuid',
WHERE_NOT={'ports.instance_id': None, 'nets.provider': None})
self.db_lock.release()
now = time.time()
self.mac_status ={}
if r<0:
print self.name, ": Error getting data from database:", c
return
for port in c:
if port["net_id"] in self.dhcp_nets:
self.mac_status[ port["mac"] ] = {"ip": port["ip_address"], "next_reading": now, "created": now, "retries":0}
def insert_task(self, task, *aditional):
try:
self.queueLock.acquire()
task = self.taskQueue.put( (task,) + aditional, timeout=5)
self.queueLock.release()
return 1, None
except Queue.Full:
return -1, "timeout inserting a task over host " + self.name
def run(self):
print self.name, " starting, nets", self.dhcp_nets
next_iteration = time.time() + 10
while True:
self.load_mac_from_db()
while True:
self.queueLock.acquire()
if not self.taskQueue.empty():
task = self.taskQueue.get()
else:
task = None
self.queueLock.release()
if task is None:
now=time.time()
if now >= next_iteration:
next_iteration = self.get_ip_from_dhcp()
else:
time.sleep(1)
continue
if task[0] == 'add':
print self.name, ": processing task add mac", task[1]
now=time.time()
self.mac_status[task[1] ] = {"ip": None, "next_reading": now, "created": now, "retries":0}
next_iteration = now
elif task[0] == 'del':
print self.name, ": processing task del mac", task[1]
if task[1] in self.mac_status:
del self.mac_status[task[1] ]
elif task[0] == 'exit':
print self.name, ": processing task exit"
self.terminate()
return 0
else:
print self.name, ": unknown task", task
def terminate(self):
try:
if self.ssh_conn:
self.ssh_conn.close()
except Exception as e:
text = str(e)
print self.name, ": terminate Exception:", text
print self.name, ": exit from host_thread"
def get_ip_from_dhcp(self):
now = time.time()
next_iteration= now + 40000 # >10 hores
#print self.name, "Iteration"
for mac_address in self.mac_status:
if now < self.mac_status[mac_address]["next_reading"]:
if self.mac_status[mac_address]["next_reading"] < next_iteration:
next_iteration = self.mac_status[mac_address]["next_reading"]
continue
if self.mac_status[mac_address].get("active") == None:
#check from db if already active
self.db_lock.acquire()
r,c = self.db.get_table(FROM="ports as p join instances as i on p.instance_id=i.uuid",
WHERE={"p.mac": mac_address, "i.status": "ACTIVE"})
self.db_lock.release()
if r>0:
self.mac_status[mac_address]["active"] = now
self.mac_status[mac_address]["next_reading"] = (int(now)/2 +1)* 2
print self.name, "mac %s VM ACTIVE" % (mac_address)
self.mac_status[mac_address]["retries"] = 0
else:
#print self.name, "mac %s VM INACTIVE" % (mac_address)
if now - self.mac_status[mac_address]["created"] > 300:
#modify Database to tell openmano that we can not get dhcp from the machine
if not self.mac_status[mac_address].get("ip"):
self.db_lock.acquire()
r,c = self.db.update_rows("ports", {"ip_address": "0.0.0.0"}, {"mac": mac_address})
self.db_lock.release()
self.mac_status[mac_address]["ip"] = "0.0.0.0"
print self.name, "mac %s >> set to 0.0.0.0 because of timeout" % (mac_address)
self.mac_status[mac_address]["next_reading"] = (int(now)/60 +1)* 60
else:
self.mac_status[mac_address]["next_reading"] = (int(now)/6 +1)* 6
if self.mac_status[mac_address]["next_reading"] < next_iteration:
next_iteration = self.mac_status[mac_address]["next_reading"]
continue
if self.test:
if self.mac_status[mac_address]["retries"]>random.randint(10,100): #wait between 10 and 100 seconds to produce a fake IP
content = self.get_fake_ip()
else:
content = None
elif self.dhcp_params["host"]=="localhost":
try:
command = ['get_dhcp_lease.sh', mac_address]
content = subprocess.check_output(command)
except Exception as e:
text = str(e)
print self.name, ": get_ip_from_dhcp subprocess Exception", text
content = None
else:
try:
if not self.ssh_conn:
self.ssh_connect()
command = 'get_dhcp_lease.sh ' + mac_address
(_, stdout, _) = self.ssh_conn.exec_command(command)
content = stdout.read()
except paramiko.ssh_exception.SSHException as e:
text = e.args[0]
print self.name, ": get_ip_from_dhcp: ssh_Exception:", text
content = None
self.ssh_conn = None
except Exception as e:
text = str(e)
print self.name, ": get_ip_from_dhcp: Exception:", text
content = None
self.ssh_conn = None
if content:
self.mac_status[mac_address]["ip"] = content
#modify Database
self.db_lock.acquire()
r,c = self.db.update_rows("ports", {"ip_address": content}, {"mac": mac_address})
self.db_lock.release()
if r<0:
print self.name, ": Database update error:", c
else:
self.mac_status[mac_address]["retries"] = 0
self.mac_status[mac_address]["next_reading"] = (int(now)/3600 +1)* 36000 # 10 hores
if self.mac_status[mac_address]["next_reading"] < next_iteration:
next_iteration = self.mac_status[mac_address]["next_reading"]
print self.name, "mac %s >> %s" % (mac_address, content)
continue
#a fail has happen
self.mac_status[mac_address]["retries"] +=1
#next iteration is every 2sec at the beginning; every 5sec after a minute, every 1min after a 5min
if now - self.mac_status[mac_address]["active"] > 120:
#modify Database to tell openmano that we can not get dhcp from the machine
if not self.mac_status[mac_address].get("ip"):
self.db_lock.acquire()
r,c = self.db.update_rows("ports", {"ip_address": "0.0.0.0"}, {"mac": mac_address})
self.db_lock.release()
self.mac_status[mac_address]["ip"] = "0.0.0.0"
print self.name, "mac %s >> set to 0.0.0.0 because of timeout" % (mac_address)
if now - self.mac_status[mac_address]["active"] > 60:
self.mac_status[mac_address]["next_reading"] = (int(now)/6 +1)* 6
elif now - self.mac_status[mac_address]["active"] > 300:
self.mac_status[mac_address]["next_reading"] = (int(now)/60 +1)* 60
else:
self.mac_status[mac_address]["next_reading"] = (int(now)/2 +1)* 2
if self.mac_status[mac_address]["next_reading"] < next_iteration:
next_iteration = self.mac_status[mac_address]["next_reading"]
return next_iteration
def get_fake_ip(self):
fake_ip= "192.168.%d.%d" % (random.randint(1,254), random.randint(1,254) )
while True:
#check not already provided
already_used = False
for mac_address in self.mac_status:
if self.mac_status[mac_address]["ip"] == fake_ip:
already_used = True
break
if not already_used:
return fake_ip
#EXAMPLE of bash script that must be available at the DHCP server for "isc-dhcp-server" type
# $ cat ./get_dhcp_lease.sh
# #!/bin/bash
# awk '
# ($1=="lease" && $3=="{"){ lease=$2; active="no"; found="no" }
# ($1=="binding" && $2=="state" && $3=="active;"){ active="yes" }
# ($1=="hardware" && $2=="ethernet" && $3==tolower("'$1';")){ found="yes" }
# ($1=="client-hostname"){ name=$2 }
# ($1=="}"){ if (active=="yes" && found=="yes"){ target_lease=lease; target_name=name}}
# END{printf("%s", target_lease)} #print target_name
# ' /var/lib/dhcp/dhcpd.leases
|
panaroo/post_run_alignment_gen.py | AMARTELKE/Pangenome-with-Panaroo | 116 | 12759839 | import shutil
import tempfile
import os
import networkx as nx
from .generate_output import *
from .isvalid import *
from .__init__ import __version__
def get_options():
import argparse
description = 'Generate multiple sequence alignments after running Panaroo'
parser = argparse.ArgumentParser(description=description,
prog='generate_panaroo_msa')
io_opts = parser.add_argument_group('Input/output')
io_opts.add_argument("-o",
"--out_dir",
dest="output_dir",
required=True,
help="location of the Panaroo output directory",
type=lambda x: is_valid_folder(parser, x))
# alignment
core = parser.add_argument_group('Gene alignment')
core.add_argument(
"-a",
"--alignment",
dest="aln",
help=("Output alignments of core genes or all genes. Options are" +
" 'core' and 'pan'. Default: 'None'"),
type=str,
choices={'core', 'pan'},
default='core')
core.add_argument(
"--aligner",
dest="alr",
help=
"Specify an aligner. Options:'prank', 'clustal', and default: 'mafft'",
type=str,
choices={'prank', 'clustal', 'mafft'},
default="mafft")
core.add_argument("--core_threshold",
dest="core",
help="Core-genome sample threshold (default=0.95)",
type=float,
default=0.95)
# Other options
parser.add_argument("-t",
"--threads",
dest="n_cpu",
help="number of threads to use (default=1)",
type=int,
default=1)
parser.add_argument("--verbose",
dest="verbose",
help="print additional output",
action='store_true',
default=False)
parser.add_argument('--version',
action='version',
version='%(prog)s ' + __version__)
args = parser.parse_args()
return (args)
def main():
args = get_options()
# make sure trailing forward slash is present
args.output_dir = os.path.join(args.output_dir, "")
# Create temporary directory
temp_dir = os.path.join(tempfile.mkdtemp(dir=args.output_dir), "")
# Load isolate names
seen = set()
isolate_names = []
with open(args.output_dir + "gene_data.csv", 'r') as infile:
next(infile)
for line in infile:
iso = line.split(",")[0]
if iso not in seen:
isolate_names.append(iso)
seen.add(iso)
# Load graph
G = nx.read_gml(args.output_dir + "final_graph.gml")
#Write out core/pan-genome alignments
if args.aln == "pan":
if args.verbose: print("generating pan genome MSAs...")
generate_pan_genome_alignment(G, temp_dir, args.output_dir, args.n_cpu,
args.alr, isolate_names)
core_nodes = get_core_gene_nodes(G, args.core, len(isolate_names))
concatenate_core_genome_alignments(core_nodes, args.output_dir)
elif args.aln == "core":
if args.verbose: print("generating core genome MSAs...")
generate_core_genome_alignment(G, temp_dir, args.output_dir,
args.n_cpu, args.alr, isolate_names,
args.core, len(isolate_names))
# remove temporary directory
shutil.rmtree(temp_dir)
return
if __name__ == '__main__':
main()
|
tools/bin/pythonSrc/pychecker-0.8.18/test_input/test84.py | YangHao666666/hawq | 450 | 12759842 | <filename>tools/bin/pythonSrc/pychecker-0.8.18/test_input/test84.py
'this crashed pychecker from calendar.py in Python 2.2'
class X:
'd'
def test(self, item):
return [e for e in item].__getslice__()
# this crashed in 2.2, but not 2.3
def f(a):
a.a = [x for x in range(2) if x > 1]
|
tests/mem.py | caplena/django-hashid-field | 310 | 12759861 | #!/usr/bin/env python
import os
import sys
import django
from memory_profiler import profile
@profile(precision=8)
def no_cache():
from hashid_field.hashid import Hashid
instances = [Hashid(i, salt="asdf", min_length=7) for i in range(1, 10_000)]
return instances
@profile(precision=8)
def with_cache():
from hashid_field.hashid import Hashid
from hashids import Hashids
hashids = Hashids(salt="asdf", min_length=7)
instances = [Hashid(i, hashids=hashids) for i in range(1, 10_000)]
return instances
if __name__ == "__main__":
print("Python:", sys.version)
print("Django:", django.get_version(django.VERSION))
os.environ['DJANGO_SETTINGS_MODULE'] = 'tests.test_settings'
django.setup()
no_cache()
with_cache()
|
DQMOffline/CalibCalo/python/MonitorAlCaEcalPi0_cfi.py | ckamtsikis/cmssw | 852 | 12759862 | import FWCore.ParameterSet.Config as cms
from DQMServices.Core.DQMEDAnalyzer import DQMEDAnalyzer
EcalPi0MonDQM = DQMEDAnalyzer('DQMSourcePi0',
prescaleFactor = cms.untracked.int32(1),
FolderName = cms.untracked.string('AlCaReco/EcalPi0'),
AlCaStreamEBpi0Tag = cms.untracked.InputTag("hltAlCaPi0RegRecHits","pi0EcalRecHitsEB"),
AlCaStreamEEpi0Tag = cms.untracked.InputTag("hltAlCaPi0RegRecHits","pi0EcalRecHitsEE"),
AlCaStreamEBetaTag = cms.untracked.InputTag("hltAlCaEtaRegRecHits","etaEcalRecHitsEB"),
AlCaStreamEEetaTag = cms.untracked.InputTag("hltAlCaEtaRegRecHits","etaEcalRecHitsEE"),
isMonEEpi0 = cms.untracked.bool(True),
isMonEBpi0 = cms.untracked.bool(True),
isMonEEeta = cms.untracked.bool(True),
isMonEBeta = cms.untracked.bool(True),
SaveToFile = cms.untracked.bool(False),
FileName = cms.untracked.string('MonitorAlCaEcalPi0.root'),
clusSeedThr = cms.double( 0.5 ),
clusSeedThrEndCap = cms.double( 1.0 ),
clusEtaSize = cms.int32( 3 ),
clusPhiSize = cms.int32( 3 ),
seleXtalMinEnergy = cms.double( -0.15 ),
seleXtalMinEnergyEndCap = cms.double( -0.75 ),
selePtGamma = cms.double(1 ),
selePtPi0 = cms.double( 2. ),
seleMinvMaxPi0 = cms.double( 0.22 ),
seleMinvMinPi0 = cms.double( 0.06 ),
seleS4S9Gamma = cms.double( 0.83 ),
selePi0Iso = cms.double( 0.5 ),
ptMinForIsolation = cms.double( 1 ),
selePi0BeltDR = cms.double( 0.2 ),
selePi0BeltDeta = cms.double( 0.05 ),
selePtGammaEndCap = cms.double( 0.8 ),
selePtPi0EndCap = cms.double( 3.0 ),
seleS4S9GammaEndCap = cms.double( 0.9 ),
seleMinvMaxPi0EndCap = cms.double( 0.3 ),
seleMinvMinPi0EndCap = cms.double( 0.05 ),
ptMinForIsolationEndCap = cms.double( 0.5 ),
selePi0IsoEndCap = cms.double( 0.5 ),
selePi0BeltDREndCap = cms.double( 0.2 ),
selePi0BeltDetaEndCap = cms.double( 0.05 ),
selePtGammaEta = cms.double(1.2),
selePtEta = cms.double(4.0),
seleS4S9GammaEta = cms.double(0.9),
seleS9S25GammaEta = cms.double(0.8),
seleMinvMaxEta = cms.double(0.8),
seleMinvMinEta = cms.double(0.3),
ptMinForIsolationEta = cms.double(1.0),
seleEtaIso = cms.double(0.5),
seleEtaBeltDR = cms.double(0.3),
seleEtaBeltDeta = cms.double(0.1),
massLowPi0Cand = cms.double(0.104),
massHighPi0Cand = cms.double(0.163),
selePtGammaEtaEndCap = cms.double(1.5),
selePtEtaEndCap = cms.double(5),
seleS4S9GammaEtaEndCap = cms.double(0.9),
seleS9S25GammaEtaEndCap = cms.double(0.85),
seleMinvMaxEtaEndCap = cms.double(0.8),
seleMinvMinEtaEndCap = cms.double(0.3),
ptMinForIsolationEtaEndCap = cms.double(0.5),
seleEtaIsoEndCap = cms.double(0.5),
seleEtaBeltDREndCap = cms.double(0.3),
seleEtaBeltDetaEndCap = cms.double(0.1),
posCalcParameters = cms.PSet( T0_barl = cms.double(5.7),
T0_endc = cms.double(3.1),
T0_endcPresh = cms.double(1.2),
LogWeighted = cms.bool(True),
W0 = cms.double(4.2),
X0 = cms.double(0.89)
)
)
|
python codes/rock-paper-scissor.py | mflilian/Hacktoberfest2020-1 | 266 | 12759921 | <gh_stars>100-1000
import random
def rps(str):
if (str==1):
return "Rock"
elif (str==2):
return "Paper"
else:
return "Scissor"
print("1. Rock 2.Paper 3.Scissor \n")
choice=int(input())
print("You "+rps(choice))
computer = random.randint(1,3)
print("Computer "+rps(computer))
print()
if (choice==computer):
print('Tie!')
elif (choice==1):
if (computer==2):
print("Computer Wins!")
print(rps(computer),"beat",rps(choice))
elif (computer==3):
print("You Wins!")
print(rps(choice),"beat",rps(computer))
elif (choice==2):
if (computer==3):
print("Computer Wins!")
print(rps(computer),"beat",rps(choice))
elif (computer==1):
print("You Wins!")
print(rps(choice),"beat",rps(computer))
elif (choice==3):
if (computer==1):
print("Computer Wins!")
print(rps(computer),"beat",rps(choice))
elif (computer==2):
print("You Wins!")
print(rps(choice),"beat",rps(computer))
|
pytorchvideo/transforms/augmentations.py | kevinmtian/pytorchvideo | 2,391 | 12759928 | <gh_stars>1000+
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
"""Video transforms that are used for advanced augmentation methods."""
from typing import Any, Callable, Dict, Optional, Tuple
import torch
import torchvision
import torchvision.transforms.functional_tensor as F_t
from torchvision.transforms.functional import InterpolationMode
# Maximum global magnitude used for video augmentation.
_AUGMENTATION_MAX_LEVEL = 10
def _check_fill_arg(kwargs):
"""
Check if kwargs contains key ``fill``.
"""
assert "fill" in kwargs, "Need to have fill in kwargs."
def _autocontrast(video: torch.Tensor, **kwargs) -> torch.Tensor:
"""
Maximize contrast of a video by remapping its pixels per channel so that the lowest
becomes black and the lightest becomes white.
Args:
video (torch.Tensor): Video tensor with shape (T, C, H, W).
"""
return torchvision.transforms.functional.autocontrast(video)
def _equalize(video: torch.Tensor, **kwargs) -> torch.Tensor:
"""
Equalize the histogram of a video by applying a non-linear mapping to the input in
order to create a uniform distribution of grayscale values in the output.
Args:
video (torch.Tensor): Video tensor with shape (T, C, H, W).
"""
if video.dtype != torch.uint8:
video_type = video.dtype
video = (video * 255).to(torch.uint8)
return (torchvision.transforms.functional.equalize(video) / 255).to(video_type)
return torchvision.transforms.functional.equalize(video)
def _invert(video: torch.Tensor, **kwargs) -> torch.Tensor:
"""
Invert the colors of a video.
Args:
video (torch.Tensor): Video tensor with shape (T, C, H, W).
"""
return torchvision.transforms.functional.invert(video)
def _rotate(video: torch.Tensor, factor: float, **kwargs) -> torch.Tensor:
"""
Rotate the image by angle.
Args:
video (torch.Tensor): Video tensor with shape (T, C, H, W).
factor (float): The rotation angle value in degrees, counter-clockwise.
"""
_check_fill_arg(kwargs)
return torchvision.transforms.functional.rotate(
video, factor, fill=kwargs["fill"], interpolation=InterpolationMode.BILINEAR
)
def _solarize(video: torch.Tensor, factor: float, **kwargs) -> torch.Tensor:
"""
Solarize an video by inverting all pixel values above a threshold.
Args:
video (torch.Tensor): Video tensor with shape (T, C, H, W).
"""
if video.dtype == torch.uint8:
return torchvision.transforms.functional.solarize(video, int(factor * 255.0))
else:
return torchvision.transforms.functional.solarize(video, factor)
def _adjust_contrast(video: torch.Tensor, factor: float, **kwargs) -> torch.Tensor:
"""
Adjust contrast of an a video.
Args:
video (torch.Tensor): Video tensor with shape (T, C, H, W).
factor (float): How much to adjust the contrast. Can be any non-negative
number. 0 gives a solid gray video, 1 gives the original video while 2
increases the contrast by a factor of 2.
"""
return torchvision.transforms.functional.adjust_contrast(video, factor)
def _adjust_saturation(video: torch.Tensor, factor: float, **kwargs) -> torch.Tensor:
"""
Adjust the saturation of a video.
Args:
video (torch.Tensor): Video tensor with shape (T, C, H, W).
factor (float): How much to adjust the saturation. 0 will give a black and
white video, 1 will give the original video while 2 will enhance the
saturation by a factor of 2.
"""
return torchvision.transforms.functional.adjust_saturation(video, factor)
def _adjust_brightness(video: torch.Tensor, factor: float, **kwargs) -> torch.Tensor:
"""
Adjust brightness of a video.
Args:
video (torch.Tensor): Video tensor with shape (T, C, H, W).
sharpness_factor (float): How much to adjust the sharpness. Can be any
non-negative number. 0 gives a blurred video, 1 gives the original video
while 2 increases the sharpness by a factor of 2.
"""
return torchvision.transforms.functional.adjust_brightness(video, factor)
def _adjust_sharpness(video: torch.Tensor, factor: float, **kwargs) -> torch.Tensor:
"""
Adjust the sharpness of a video.
Args:
video (torch.Tensor): Video tensor with shape (T, C, H, W).
factor (float): How much to adjust the sharpness. Can be any non-negative
number. 0 gives a blurred video, 1 gives the original video while 2
increases the sharpness by a factor of 2.
"""
return torchvision.transforms.functional.adjust_sharpness(video, factor)
def _posterize(video: torch.Tensor, factor: float, **kwargs):
"""
Posterize an image by reducing the number of bits for each color channel.
Args:
video (torch.Tensor): Video tensor with shape (T, C, H, W).
factor (float): The number of bits to keep for each channel (0-8).
"""
if factor >= 8:
return video
if video.dtype != torch.uint8:
video_type = video.dtype
video = (video * 255).to(torch.uint8)
return (torchvision.transforms.functional.posterize(video, factor) / 255).to(
video_type
)
return torchvision.transforms.functional.posterize(video, factor)
def _shear_x(video: torch.Tensor, factor: float, **kwargs):
"""
Shear the video along the horizontal axis.
Args:
video (torch.Tensor): Video tensor with shape (T, C, H, W).
factor (float): How much to shear along the horizontal axis using the affine
matrix.
"""
_check_fill_arg(kwargs)
translation_offset = video.size(-2) * factor / 2
return F_t.affine(
video,
[1, factor, translation_offset, 0, 1, 0],
fill=kwargs["fill"],
interpolation="bilinear",
)
def _shear_y(video: torch.Tensor, factor: float, **kwargs):
"""
Shear the video along the vertical axis.
Args:
video (torch.Tensor): Video tensor with shape (T, C, H, W).
factor (float): How much to shear along the vertical axis using the affine
matrix.
"""
_check_fill_arg(kwargs)
translation_offset = video.size(-1) * factor / 2
return F_t.affine(
video,
[1, 0, 0, factor, 1, translation_offset],
fill=kwargs["fill"],
interpolation="bilinear",
)
def _translate_x(video: torch.Tensor, factor: float, **kwargs):
"""
Translate the video along the vertical axis.
Args:
video (torch.Tensor): Video tensor with shape (T, C, H, W).
factor (float): How much (relative to the image size) to translate along the
vertical axis.
"""
_check_fill_arg(kwargs)
translation_offset = factor * video.size(-1)
return F_t.affine(
video,
[1, 0, translation_offset, 0, 1, 0],
fill=kwargs["fill"],
interpolation="bilinear",
)
def _translate_y(video: torch.Tensor, factor: float, **kwargs):
"""
Translate the video along the vertical axis.
Args:
video (torch.Tensor): Video tensor with shape (T, C, H, W).
factor (float): How much (relative to the image size) to translate along the
horizontal axis.
"""
_check_fill_arg(kwargs)
translation_offset = factor * video.size(-2)
return F_t.affine(
video,
[1, 0, 0, 0, 1, translation_offset],
fill=kwargs["fill"],
interpolation="bilinear",
)
def _randomly_negate(magnitude: float) -> float:
"""
Negate input value with 50% chance.
Args:
magnitude (float): Input value.
"""
return magnitude if torch.rand(1).item() > 0.5 else -magnitude
def _increasing_magnitude_to_arg(level: int, params: Tuple[float, float]) -> float:
"""
Convert level to transform magnitude. This assumes transform magnitude increases
linearly with level.
Args:
level (int): Level value.
params (Tuple[float, float]): Params contains two values: 1) Base transform
magnitude when level is 0; 2) Maxmimum increasing in transform magnitude
when level is at Maxmimum.
"""
magnitude = (level / _AUGMENTATION_MAX_LEVEL) * params[1]
return (params[0] + magnitude,)
def _increasing_randomly_negate_to_arg(
level: int, params: Tuple[float, float]
) -> Tuple[float]:
"""
Convert level to transform magnitude. This assumes transform magnitude increases
(or decreases with 50% chance) linearly with level.
Args:
level (int): Level value.
params (Tuple[float, float]): Params contains two values: 1) Base transform
magnitude when level is 0; 2) Maxmimum increasing in transform magnitude
when level is at maxmimum.
"""
magnitude = (level / _AUGMENTATION_MAX_LEVEL) * params[1]
return (params[0] + _randomly_negate(magnitude),)
def _decreasing_int_to_arg(level: int, params: Tuple[int, int]) -> Tuple[int]:
"""
Convert level to transform magnitude. This assumes transform magnitude decreases
linearly with level. The return value is converted to int.
Args:
level (int): Level value.
params (Tuple[float, float]): Params contains two values: 1) Base transform
magnitude when level is 0; 2) Maxmimum decreasing in transform magnitude
when level is at maxmimum.
"""
magnitude = (level / _AUGMENTATION_MAX_LEVEL) * params[1]
return (params[0] - int(magnitude),)
def _decreasing_to_arg(level: int, params: Tuple[float, float]) -> Tuple[float]:
"""
Convert level to transform magnitude. This assumes transform magnitude decreases
linearly with level.
Args:
level (int): Level value.
params (Tuple[float, float]): Params contains two values: 1) Base transform
magnitude when level is 0; 2) Maxmimum decreasing in transform magnitude
when level is at maxmimum.
"""
magnitude = (level / _AUGMENTATION_MAX_LEVEL) * params[1]
return (params[0] - magnitude,)
# A dictionary that contains transform names (key) and their corresponding transform
# functions (value).
_NAME_TO_TRANSFORM_FUNC = {
"AdjustBrightness": _adjust_brightness,
"AdjustContrast": _adjust_contrast,
"AdjustSaturation": _adjust_saturation,
"AdjustSharpness": _adjust_sharpness,
"AutoContrast": _autocontrast,
"Equalize": _equalize,
"Invert": _invert,
"Rotate": _rotate,
"Posterize": _posterize,
"Solarize": _solarize,
"ShearX": _shear_x,
"ShearY": _shear_y,
"TranslateX": _translate_x,
"TranslateY": _translate_y,
}
# A dictionary that contains transform names (key) and their corresponding level
# functions (value), which converts the magnitude to the transform function arguments.
_LEVEL_TO_ARG = {
"AdjustBrightness": _increasing_randomly_negate_to_arg,
"AdjustContrast": _increasing_randomly_negate_to_arg,
"AdjustSaturation": _increasing_randomly_negate_to_arg,
"AdjustSharpness": _increasing_randomly_negate_to_arg,
"AutoContrast": None,
"Equalize": None,
"Invert": None,
"Rotate": _increasing_randomly_negate_to_arg,
"Posterize": _decreasing_int_to_arg,
"Solarize": _decreasing_to_arg,
"ShearX": _increasing_randomly_negate_to_arg,
"ShearY": _increasing_randomly_negate_to_arg,
"TranslateX": _increasing_randomly_negate_to_arg,
"TranslateY": _increasing_randomly_negate_to_arg,
}
# A dictionary that contains transform names (key) and their corresponding maximum
# transform (value).
_TRANSFORM_MAX_PARAMS = {
"AdjustBrightness": (1, 0.9),
"AdjustContrast": (1, 0.9),
"AdjustSaturation": (1, 0.9),
"AdjustSharpness": (1, 0.9),
"AutoContrast": None,
"Equalize": None,
"Invert": None,
"Rotate": (0, 30),
"Posterize": (4, 4),
"Solarize": (1, 1),
"ShearX": (0, 0.3),
"ShearY": (0, 0.3),
"TranslateX": (0, 0.45),
"TranslateY": (0, 0.45),
}
# Hyperparameters for sampling magnitude.
SAMPLING_DEFAULT_HPARAS = {"sampling_std": 0.5}
# Hyperparameters for transform functions.
TRANSFORM_DEFAULT_HPARAS = {"fill": (0.5, 0.5, 0.5)}
class AugmentTransform:
def __init__(
self,
transform_name: str,
magnitude: int = 10,
prob: float = 0.5,
name_to_transform_func: Optional[Dict[str, Callable]] = None,
level_to_arg: Optional[Dict[str, Callable]] = None,
transform_max_paras: Optional[Dict[str, Tuple]] = None,
transform_hparas: Optional[Dict[str, Any]] = None,
sampling_type: str = "gaussian",
sampling_hparas: Optional[Dict[str, Any]] = None,
) -> None:
"""
The AugmentTransform composes a video transform that performs augmentation
based on a maximum magnitude. AugmentTransform also offers flexible ways to
generate augmentation magnitude based on different sampling strategies.
Args:
transform_name (str): The name of the video transform function.
magnitude (int): Magnitude used for transform function.
prob (float): The probablity of applying each transform function.
name_to_transform_func (Optional[Dict[str, Callable]]): A Dictionary that
contains mapping of the transform name to the transform function.
level_to_arg (Optional[Dict[str, Callable]]): A Dictionary that contains
mapping of the transform name to its level function, which converts
the the magnitude to the transform function arguments.
transform_max_paras (Optional[Dict[str, Tuple]]): A Dictionary that
contains mapping of the transform name to its maximum transform
magnitude.
transform_hparas (Optional[Dict[Any]]): Transform hyper parameters.
Needs to have key fill. By default, it uses transform_default_hparas.
sampling_type (str): Sampling method for magnitude of transform. It should
be either gaussian or uniform.
sampling_hparas (Optional[Dict[Any]]): Hyper parameters for sampling. If
gaussian sampling is used, it needs to have key sampling_std. By
default, it uses transform_default_hparas.
"""
assert sampling_type in ["gaussian", "uniform"]
name_to_transform_func = name_to_transform_func or _NAME_TO_TRANSFORM_FUNC
level_to_arg = level_to_arg or _LEVEL_TO_ARG
transform_max_paras = transform_max_paras or _TRANSFORM_MAX_PARAMS
self.transform_hparas = transform_hparas or TRANSFORM_DEFAULT_HPARAS
self.sampling_type = sampling_type
self.sampling_hparas = sampling_hparas or SAMPLING_DEFAULT_HPARAS
assert "fill" in self.transform_hparas
if self.sampling_type == "gaussian":
assert "sampling_std" in self.sampling_hparas
if self.sampling_type == "uniform":
assert "sampling_data_type" in self.sampling_hparas
assert "sampling_min" in self.sampling_hparas
if self.sampling_hparas["sampling_data_type"] == "int":
assert isinstance(self.sampling_hparas["sampling_min"], int)
elif self.sampling_hparas["sampling_data_type"] == "float":
assert isinstance(self.sampling_hparas["sampling_min"], (int, float))
assert transform_name in name_to_transform_func
self.max_level = _AUGMENTATION_MAX_LEVEL
self.transform_name = transform_name
self.magnitude = magnitude
self.transform_fn = name_to_transform_func[transform_name]
self.level_fn = level_to_arg[transform_name]
self.level_paras = transform_max_paras[transform_name]
self.prob = prob
self.sampling_type = sampling_type
def _get_magnitude(self) -> float:
"""
Get magnitude based on sampling type.
"""
if self.sampling_type == "gaussian":
return max(
0,
min(
self.max_level,
torch.normal(
self.magnitude, self.sampling_hparas["sampling_std"], size=(1,)
).item(),
),
)
elif self.sampling_type == "uniform":
if self.sampling_hparas["sampling_data_type"] == "int":
return torch.randint(
self.sampling_hparas["sampling_min"], self.magnitude + 1, size=(1,)
).item()
elif self.sampling_hparas["sampling_data_type"] == "float":
return (
torch.rand(size=(1,)).item()
* (self.magnitude - self.sampling_hparas["sampling_min"])
+ self.sampling_hparas["sampling_min"]
)
else:
raise ValueError("sampling_data_type must be either 'int' or 'float'")
else:
raise NotImplementedError
def __call__(self, video: torch.Tensor) -> torch.Tensor:
"""
The input is a video tensor.
Args:
video (torch.Tensor): Input video tensor with shape (T, C, H, W).
"""
if torch.rand(1).item() > self.prob:
return video
magnitude = self._get_magnitude()
level_args = (
self.level_fn(magnitude, self.level_paras)
if self.level_fn is not None
else ()
)
return self.transform_fn(video, *level_args, **self.transform_hparas)
|
applications/GeoMechanicsApplication/python_scripts/gap_closure_interface_activation_process.py | lkusch/Kratos | 778 | 12759960 | import KratosMultiphysics
import KratosMultiphysics.GeoMechanicsApplication as KratosGeo
def Factory(settings, Model):
if(type(settings) != KratosMultiphysics.Parameters):
raise Exception("expected input shall be a Parameters object, encapsulating a json string")
return GapClosureInterfaceActivationProcess(Model, settings["Parameters"])
## All the python processes should be derived from "python_process"
class GapClosureInterfaceActivationProcess(KratosMultiphysics.Process):
def __init__(self, Model, settings ):
KratosMultiphysics.Process.__init__(self)
model_part = Model[settings["model_part_name"].GetString()]
params = KratosMultiphysics.Parameters("{}")
params.AddValue("model_part_name",settings["model_part_name"])
params.AddValue("gap_width_threshold",settings["gap_width_threshold"])
params.AddValue("consider_gap_closure",settings["consider_gap_closure"])
self.process = KratosGeo.GapClosureInterfaceProcess(model_part, params)
def ExecuteInitialize(self):
self.process.ExecuteInitialize()
def ExecuteInitializeSolutionStep(self):
self.process.ExecuteInitializeSolutionStep()
def ExecuteFinalizeSolutionStep(self):
self.process.ExecuteFinalizeSolutionStep()
def ExecuteFinalize(self):
self.process.ExecuteFinalize()
|
tests/integration/s2n_handshake_test_old_s_client.py | bryce-shang/s2n-tls | 4,256 | 12759968 | #
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://aws.amazon.com/apache2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
#
"""
Handshake tests using Openssl 0.9.8 s_client against s2nd
"""
import argparse
import os
import sys
import subprocess
import itertools
import multiprocessing
import threading
import uuid
import re
import string
from os import environ
from multiprocessing.pool import ThreadPool
from s2n_test_constants import *
from time import sleep
S_CLIENT_NEGOTIATED_CIPHER_PREFIX="Cipher : "
PROTO_VERS_TO_S_CLIENT_ARG = {
S2N_TLS10 : "-tls1",
S2N_TLS11 : "-tls1_1",
S2N_TLS12 : "-tls1_2",
}
use_corked_io=False
def cleanup_processes(*processes):
for p in processes:
p.kill()
p.wait()
def validate_version(expected_version, output):
for line in output.splitlines():
if ACTUAL_VERSION_STR.format(expected_version or S2N_TLS10) in line:
return 0
return -1
def validate_data_transfer(expected_data, s_client_out, s2nd_out):
"""
Verify that the application data written between s_client and s2nd is encrypted and decrypted successfuly.
"""
found = 0
for line in s2nd_out.splitlines():
if expected_data in line:
found = 1
break
if found == 0:
print ("Did not find " + expected_data + " in output from s2nd")
return -1
found = 0
for line in s_client_out.splitlines():
if expected_data in line:
found = 1
break
if found == 0:
print ("Did not find " + expected_data + " in output from s_client")
return -1
return 0
def find_expected_cipher(expected_cipher, s_client_out):
"""
Make sure s_client and s2nd negotiate the cipher suite we expect
"""
s_client_out_len = len(s_client_out)
full_expected_string = S_CLIENT_NEGOTIATED_CIPHER_PREFIX + expected_cipher
for line in s_client_out.splitlines():
if full_expected_string in line:
return 0
break
print("Failed to find " + expected_cipher + " in s_client output")
return -1
def read_process_output_until(process, marker):
output = ""
while True:
line = process.stdout.readline().decode("utf-8")
output += line
if marker in line:
return output
return output
def try_handshake(endpoint, port, cipher, ssl_version, server_name=None, strict_hostname=False, server_cert=None, server_key=None,
server_cert_key_list=None, expected_server_cert=None, server_cipher_pref=None, ocsp=None, sig_algs=None, curves=None, resume=False, no_ticket=False,
prefer_low_latency=False, enter_fips_mode=False, client_auth=None, client_cert=DEFAULT_CLIENT_CERT_PATH,
client_key=DEFAULT_CLIENT_KEY_PATH, expected_cipher=None, expected_extensions=None):
"""
Attempt to handshake against s2nd listening on `endpoint` and `port` using Openssl s_client
:param int endpoint: endpoint for s2nd to listen on
:param int port: port for s2nd to listen on
:param str cipher: ciphers for Openssl s_client to offer. See https://www.openssl.org/docs/man1.0.2/apps/ciphers.html
:param int ssl_version: SSL version for s_client to use
:param str server_name: server_name value for s_client to send
:param bool strict_hostname: whether s_client should strictly check to see if server certificate matches the server_name
:param str server_cert: path to certificate for s2nd to use
:param str server_key: path to private key for s2nd to use
:param list server_cert_key_list: a list of (cert_path, key_path) tuples for multicert tests.
:param str expected_server_cert: Path to the expected server certificate should be sent to s_client.
:param str ocsp: path to OCSP response file for stapling
:param str sig_algs: Signature algorithms for s_client to offer
:param str curves: Elliptic curves for s_client to offer
:param bool resume: True if s_client should try to reconnect to s2nd and reuse the same TLS session. False for normal negotiation.
:param bool no_ticket: True if s2n server should not use session ticket to resume the same TLS session.
:param bool prefer_low_latency: True if s2nd should use 1500 for max outgoing record size. False for default max.
:param bool enter_fips_mode: True if s2nd should enter libcrypto's FIPS mode. Libcrypto must be built with a FIPS module to enter FIPS mode.
:param bool client_auth: True if the test should try and use client authentication
:param str client_cert: Path to the client's cert file
:param str client_key: Path to the client's private key file
:param str expected_cipher: the cipher we expect to negotiate
:param list expected_extensions: list of expected extensions that s_client should receive.
:return: 0 on successfully negotiation(s), -1 on failure
"""
# Override certificate for ECDSA if unspecified. We can remove this when we
# support multiple certificates
if server_cert is None and server_cert_key_list is None and "ECDSA" in cipher:
server_cert = TEST_ECDSA_CERT
server_key = TEST_ECDSA_KEY
# Fire up s2nd
s2nd_cmd = ["../../bin/s2nd"]
if server_cert is not None:
s2nd_cmd.extend(["--cert", server_cert])
if server_key is not None:
s2nd_cmd.extend(["--key", server_key])
if server_cert_key_list is not None:
for cert_key_path in server_cert_key_list:
cert_path = cert_key_path[0]
key_path = cert_key_path[1]
s2nd_cmd.extend(["--cert", cert_path])
s2nd_cmd.extend(["--key", key_path])
if ocsp is not None:
s2nd_cmd.extend(["--ocsp", ocsp])
if prefer_low_latency == True:
s2nd_cmd.append("--prefer-low-latency")
if client_auth is not None:
s2nd_cmd.append("-m")
s2nd_cmd.extend(["-t", client_cert])
if use_corked_io:
s2nd_cmd.append("-C")
s2nd_cmd.extend([str(endpoint), str(port)])
s2nd_ciphers = "test_all_tls12"
if server_cipher_pref is not None:
s2nd_ciphers = server_cipher_pref
if enter_fips_mode == True:
s2nd_ciphers = "test_all_fips"
s2nd_cmd.append("--enter-fips-mode")
s2nd_cmd.append("-c")
s2nd_cmd.append(s2nd_ciphers)
if no_ticket:
s2nd_cmd.append("-T")
s2nd = subprocess.Popen(s2nd_cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE)
# Make sure s2nd has started
s2nd.stdout.readline()
s_client_cmd = ["openssl", "s_client", "-connect", str(endpoint) + ":" + str(port)]
if ssl_version is not None:
s_client_cmd.append(PROTO_VERS_TO_S_CLIENT_ARG[ssl_version])
if cipher is not None:
s_client_cmd.extend(["-cipher", cipher])
# For verifying extensions that s2nd sends expected extensions
s_client_cmd.append("-tlsextdebug")
# Fire up s_client
s_client = subprocess.Popen(s_client_cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
s_client_out = ""
s2nd_out = ""
openssl_connect_marker = "CONNECTED"
openssl_reconnect_marker = "drop connection and then reconnect"
end_of_msg_marker = "__end_of_msg__"
# Wait until openssl and s2n have finished the handshake and are connected to each other
s_client_out += read_process_output_until(s_client, openssl_connect_marker)
s2nd_out += read_process_output_until(s2nd, openssl_connect_marker)
if resume == True:
for i in range(0,5):
# Wait for openssl to resume connection 5 times in a row, and verify resumption works.
s_client_out += read_process_output_until(s_client, openssl_reconnect_marker)
s2nd_out += read_process_output_until(s2nd, openssl_connect_marker)
data_to_validate = cipher + " " + str(uuid.uuid4())
# Write the data to openssl towards s2n server
msg = (data_to_validate + "\n" + end_of_msg_marker + "\n\n").encode("utf-8")
s_client.stdin.write(msg)
s_client.stdin.flush()
# Write the data to s2n towards openssl client
s2nd.stdin.write(msg)
s2nd.stdin.flush()
# Wait for the Data transfer to complete between OpenSSL and s2n
s_client_out += read_process_output_until(s_client, end_of_msg_marker)
s2nd_out += read_process_output_until(s2nd, end_of_msg_marker)
cleanup_processes(s2nd, s_client)
if validate_data_transfer(data_to_validate, s_client_out, s2nd_out) != 0:
return -1
if validate_version(ssl_version, s2nd_out) != 0:
return -1
if resume is True:
if validate_resume(s2nd_out) != 0:
return -1
if ocsp is not None:
if validate_ocsp(s_client_out) != 0:
return -1
if expected_cipher is not None:
if find_expected_cipher(expected_cipher, s_client_out) != 0:
return -1
if strict_hostname is True:
if validate_hostname(s_client_out) != 0:
return -1
if expected_server_cert is not None:
if validate_selected_certificate(s_client_out, expected_server_cert) != 0:
return -1
if expected_extensions is not None:
for extension in expected_extensions:
if extension.s_client_validate(s_client_out) != 0:
return -1
return 0
def cert_path_to_str(cert_path):
# Converts a path to a cert into a string usable for printing to test output
# Example: "./test_certs/rsa_2048_sha256_client_cert.pem" => "RSA-2048-SHA256"
return '-'.join(cert_path[cert_path.rfind('/')+1:].split('_')[:3]).upper()
def print_result(result_prefix, return_code):
suffix = ""
if return_code == 0:
if sys.stdout.isatty():
suffix = "\033[32;1mPASSED\033[0m"
else:
suffix = "PASSED"
else:
if sys.stdout.isatty():
suffix = "\033[31;1mFAILED\033[0m"
else:
suffix ="FAILED"
print(result_prefix + suffix)
def create_thread_pool():
threadpool_size = multiprocessing.cpu_count() * 4 # Multiply by 4 to increase parallelization between integration tests
print("\tCreating ThreadPool of size: " + str(threadpool_size))
threadpool = ThreadPool(processes=threadpool_size)
return threadpool
def run_handshake_test(host, port, ssl_version, cipher, fips_mode, no_ticket, use_client_auth, client_cert_path, client_key_path):
cipher_name = cipher.openssl_name
cipher_vers = cipher.min_tls_vers
# Skip the cipher if openssl can't test it. 3DES/RC4 are disabled by default in 1.1.1
if not cipher.openssl_1_1_1_compatible:
return 0
if ssl_version and ssl_version < cipher_vers:
return 0
client_cert_str=str(use_client_auth)
if (use_client_auth is not None) and (client_cert_path is not None):
client_cert_str = cert_path_to_str(client_cert_path)
ret = try_handshake(host, port, cipher_name, ssl_version, no_ticket=no_ticket, enter_fips_mode=fips_mode, client_auth=use_client_auth, client_cert=client_cert_path, client_key=client_key_path)
result_prefix = "Cipher: %-30s ClientCert: %-16s Vers: %-8s ... " % (cipher_name, client_cert_str, S2N_PROTO_VERS_TO_STR[ssl_version])
print_result(result_prefix, ret)
return ret
def handshake_test(host, port, test_ciphers, fips_mode, no_ticket=False, use_client_auth=None, use_client_cert=None, use_client_key=None):
"""
Basic handshake tests using all valid combinations of supported cipher suites and TLS versions.
"""
print("\n\tRunning handshake tests:")
failed = 0
for ssl_version in [S2N_TLS10, None]:
print("\n\tTesting ciphers using client version: " + S2N_PROTO_VERS_TO_STR[ssl_version])
port_offset = 0
results = []
# Only test non ECC ciphers, openssl 0.9.8 has trouble with ECDHE.
# Only test 1.0/SSLv3 ciphers since 0.9.8 only supports those.
for cipher in filter(lambda x: "ECDHE" not in x.openssl_name and x.min_tls_vers < S2N_TLS11, test_ciphers):
async_result = run_handshake_test(host, port + port_offset, ssl_version, cipher, fips_mode, no_ticket, use_client_auth, use_client_cert, use_client_key)
port_offset += 1
results.append(async_result)
for async_result in results:
if async_result != 0:
failed = 1
return failed
def main():
parser = argparse.ArgumentParser(description='Runs TLS server integration tests against s2nd using Openssl s_client')
parser.add_argument('host', help='The host for s2nd to bind to')
parser.add_argument('port', type=int, help='The port for s2nd to bind to')
parser.add_argument('--use_corked_io', action='store_true', help='Turn corked IO on/off')
parser.add_argument('--libcrypto', default='openssl-1.1.1', choices=S2N_LIBCRYPTO_CHOICES,
help="""The Libcrypto that s2n was built with. s2n supports different cipher suites depending on
libcrypto version. Defaults to openssl-1.1.1.""")
args = parser.parse_args()
use_corked_io = args.use_corked_io
# Retrieve the test ciphers to use based on the libcrypto version s2n was built with
test_ciphers = S2N_LIBCRYPTO_TO_TEST_CIPHERS[args.libcrypto]
host = args.host
port = args.port
libcrypto_version = args.libcrypto
fips_mode = False
if environ.get("S2N_TEST_IN_FIPS_MODE") is not None:
fips_mode = True
print("\nRunning s2nd in FIPS mode.")
print("\nRunning tests with: " + os.popen('openssl version').read())
if use_corked_io == True:
print("Corked IO is on")
failed = 0
failed += handshake_test(host, port, test_ciphers, fips_mode)
return failed
if __name__ == "__main__":
sys.exit(main())
|
examples/rest-api-python/src/list.py | drewfish/serverless-stack | 5,922 | 12759982 | <reponame>drewfish/serverless-stack<gh_stars>1000+
import json
from db.notes import getNotes
def main(event, context):
return {
"statusCode": 200,
"body": json.dumps(getNotes(), indent=2)
}
|
research/delf/delf/python/datasets/generic_dataset.py | NasTul/models | 82,518 | 12759993 | # Lint as: python3
# Copyright 2021 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functions for generic image dataset creation."""
import os
from delf.python.datasets import utils
class ImagesFromList():
"""A generic data loader that loads images from a list.
Supports images of different sizes.
"""
def __init__(self, root, image_paths, imsize=None, bounding_boxes=None,
loader=utils.default_loader):
"""ImagesFromList object initialization.
Args:
root: String, root directory path.
image_paths: List, relative image paths as strings.
imsize: Integer, defines the maximum size of longer image side.
bounding_boxes: List of (x1,y1,x2,y2) tuples to crop the query images.
loader: Callable, a function to load an image given its path.
Raises:
ValueError: Raised if `image_paths` list is empty.
"""
# List of the full image filenames.
images_filenames = [os.path.join(root, image_path) for image_path in
image_paths]
if not images_filenames:
raise ValueError("Dataset contains 0 images.")
self.root = root
self.images = image_paths
self.imsize = imsize
self.images_filenames = images_filenames
self.bounding_boxes = bounding_boxes
self.loader = loader
def __getitem__(self, index):
"""Called to load an image at the given `index`.
Args:
index: Integer, image index.
Returns:
image: Tensor, loaded image.
"""
path = self.images_filenames[index]
if self.bounding_boxes is not None:
img = self.loader(path, self.imsize, self.bounding_boxes[index])
else:
img = self.loader(path, self.imsize)
return img
def __len__(self):
"""Implements the built-in function len().
Returns:
len: Number of images in the dataset.
"""
return len(self.images_filenames)
|
f5/bigip/tm/sys/test/functional/test_sshd.py | nghia-tran/f5-common-python | 272 | 12760002 | <reponame>nghia-tran/f5-common-python<filename>f5/bigip/tm/sys/test/functional/test_sshd.py<gh_stars>100-1000
# Copyright 2016 F5 Networks Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import pytest
V11_SUPPORTED = ['11.5.4', '11.6.0', '11.6.1', '11.6.2']
V12_SUPPORTED = ['12.0.0', '12.1.0']
def setup_sshd_test(request, mgmt_root):
def teardown():
d.allow = ['ALL']
d.banner = 'disabled'
d.bannerText = ''
d.inactivityTimeout = 0
d.logLevel = 'info'
d.login = 'enabled'
if pytest.config.getoption('--release') in V12_SUPPORTED:
d.port = 22
d.update()
request.addfinalizer(teardown)
d = mgmt_root.tm.sys.sshd.load()
return d
@pytest.mark.skipif(pytest.config.getoption('--release') not in V11_SUPPORTED,
reason='Needs v11 TMOS to pass')
class TestSshd11(object):
def test_load(self, request, mgmt_root):
ssh1 = setup_sshd_test(request, mgmt_root)
ssh2 = setup_sshd_test(request, mgmt_root)
assert ssh1.allow == ssh2.allow
assert ssh1.banner == ssh2.banner
assert ssh1.inactivityTimeout == ssh2.inactivityTimeout
assert ssh1.logLevel == ssh2.logLevel
assert ssh1.login == ssh2.login
def test_update_allow(self, request, mgmt_root):
ssh1 = setup_sshd_test(request, mgmt_root)
ssh2 = setup_sshd_test(request, mgmt_root)
ssh1.allow = ['192.168.1.1']
ssh1.update()
assert ['192.168.1.1'] == ssh1.allow
assert ['192.168.1.1'] != ssh2.allow
# Refresh
ssh2.refresh()
assert ['192.168.1.1'] == ssh2.allow
def test_update_banner(self, request, mgmt_root):
ssh1 = setup_sshd_test(request, mgmt_root)
ssh2 = setup_sshd_test(request, mgmt_root)
banners = ['enabled', 'disabled']
for banner in banners:
ssh1.banner = banner
ssh1.update()
assert banner == ssh1.banner
assert banner != ssh2.banner
# Refresh
ssh2.refresh()
assert banner == ssh2.banner
def test_update_bannerText(self, request, mgmt_root):
ssh1 = setup_sshd_test(request, mgmt_root)
ssh2 = setup_sshd_test(request, mgmt_root)
ssh1.bannerText = 'foo banner'
ssh1.update()
assert 'foo banner' == ssh1.bannerText
assert not hasattr(ssh2, 'bannerText')
# Refresh
ssh2.refresh()
assert 'foo banner' == ssh2.bannerText
def test_update_inactivityTimeout(self, request, mgmt_root):
ssh1 = setup_sshd_test(request, mgmt_root)
ssh2 = setup_sshd_test(request, mgmt_root)
ssh1.inactivityTimeout = 10
ssh1.update()
assert 10 == ssh1.inactivityTimeout
assert 10 != ssh2.inactivityTimeout
# Refresh
ssh2.refresh()
assert 10 == ssh2.inactivityTimeout
def test_update_logLevel(self, request, mgmt_root):
ssh1 = setup_sshd_test(request, mgmt_root)
ssh2 = setup_sshd_test(request, mgmt_root)
levels = ['debug', 'debug1', 'debug2', 'debug3', 'error', 'fatal',
'info', 'quiet', 'verbose']
for level in levels:
ssh1.logLevel = level
ssh1.update()
assert level == ssh1.logLevel
assert level != ssh2.logLevel
# Refresh
ssh2.refresh()
assert level == ssh2.logLevel
def test_update_login(self, request, mgmt_root):
ssh1 = setup_sshd_test(request, mgmt_root)
ssh2 = setup_sshd_test(request, mgmt_root)
logins = ['disabled', 'enabled']
for login in logins:
ssh1.login = login
ssh1.update()
assert login == ssh1.login
assert login != ssh2.login
# Refresh
ssh2.refresh()
assert login == ssh2.login
@pytest.mark.skipif(pytest.config.getoption('--release') not in V12_SUPPORTED,
reason='Needs v12 TMOS to pass')
class TestSshd12(object):
def test_load(self, request, mgmt_root):
ssh1 = setup_sshd_test(request, mgmt_root)
ssh2 = setup_sshd_test(request, mgmt_root)
assert ssh1.allow == ssh2.allow
assert ssh1.banner == ssh2.banner
assert ssh1.inactivityTimeout == ssh2.inactivityTimeout
assert ssh1.logLevel == ssh2.logLevel
assert ssh1.login == ssh2.login
assert ssh1.port == ssh2.port
def test_update_allow(self, request, mgmt_root):
ssh1 = setup_sshd_test(request, mgmt_root)
ssh2 = setup_sshd_test(request, mgmt_root)
ssh1.allow = ['192.168.1.1']
ssh1.update()
assert ['192.168.1.1'] == ssh1.allow
assert ['192.168.1.1'] != ssh2.allow
# Refresh
ssh2.refresh()
assert ['192.168.1.1'] == ssh2.allow
def test_update_banner(self, request, mgmt_root):
ssh1 = setup_sshd_test(request, mgmt_root)
ssh2 = setup_sshd_test(request, mgmt_root)
banners = ['enabled', 'disabled']
for banner in banners:
ssh1.banner = banner
ssh1.update()
assert banner == ssh1.banner
assert banner != ssh2.banner
# Refresh
ssh2.refresh()
assert banner == ssh2.banner
def test_update_bannerText(self, request, mgmt_root):
ssh1 = setup_sshd_test(request, mgmt_root)
ssh2 = setup_sshd_test(request, mgmt_root)
ssh1.bannerText = 'foo banner'
ssh1.update()
assert 'foo banner' == ssh1.bannerText
assert not hasattr(ssh2, 'bannerText')
# Refresh
ssh2.refresh()
assert 'foo banner' == ssh2.bannerText
def test_update_inactivityTimeout(self, request, mgmt_root):
ssh1 = setup_sshd_test(request, mgmt_root)
ssh2 = setup_sshd_test(request, mgmt_root)
ssh1.inactivityTimeout = 10
ssh1.update()
assert 10 == ssh1.inactivityTimeout
assert 10 != ssh2.inactivityTimeout
# Refresh
ssh2.refresh()
assert 10 == ssh2.inactivityTimeout
def test_update_logLevel(self, request, mgmt_root):
ssh1 = setup_sshd_test(request, mgmt_root)
ssh2 = setup_sshd_test(request, mgmt_root)
levels = ['debug', 'debug1', 'debug2', 'debug3', 'error', 'fatal',
'info', 'quiet', 'verbose']
for level in levels:
ssh1.logLevel = level
ssh1.update()
assert level == ssh1.logLevel
assert level != ssh2.logLevel
# Refresh
ssh2.refresh()
assert level == ssh2.logLevel
def test_update_login(self, request, mgmt_root):
ssh1 = setup_sshd_test(request, mgmt_root)
ssh2 = setup_sshd_test(request, mgmt_root)
logins = ['disabled', 'enabled']
for login in logins:
ssh1.login = login
ssh1.update()
assert login == ssh1.login
assert login != ssh2.login
# Refresh
ssh2.refresh()
assert login == ssh2.login
def test_update_port(self, request, mgmt_root):
ssh1 = setup_sshd_test(request, mgmt_root)
ssh2 = setup_sshd_test(request, mgmt_root)
ssh1.port = 1234
ssh1.update()
assert 1234 == ssh1.port
assert 1234 != ssh2.port
# Refresh
ssh2.refresh()
assert 1234 == ssh2.port
|
rigl/experimental/jax/prune_test.py | vishalbelsare/rigl | 276 | 12760015 | # coding=utf-8
# Copyright 2021 RigL Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Tests for weight_symmetry.prune."""
import glob
from os import path
from absl.testing import absltest
from absl.testing import flagsaver
from rigl.experimental.jax import prune
class PruneTest(absltest.TestCase):
def test_prune_fixed_schedule(self):
"""Tests training/pruning driver with a fixed global sparsity."""
experiment_dir = self.create_tempdir().full_path
eval_flags = dict(
epochs=1,
pruning_rate=0.95,
experiment_dir=experiment_dir,
)
with flagsaver.flagsaver(**eval_flags):
prune.main([])
outfile = path.join(experiment_dir, '*', 'events.out.tfevents.*')
files = glob.glob(outfile)
self.assertTrue(len(files) == 1 and path.exists(files[0]))
def test_prune_global_pruning_schedule(self):
"""Tests training/pruning driver with a global sparsity schedule."""
experiment_dir = self.create_tempdir().full_path
eval_flags = dict(
epochs=10,
pruning_schedule='[(5, 0.33), (7, 0.66), (9, 0.95)]',
experiment_dir=experiment_dir,
)
with flagsaver.flagsaver(**eval_flags):
prune.main([])
outfile = path.join(experiment_dir, '*', 'events.out.tfevents.*')
files = glob.glob(outfile)
self.assertTrue(len(files) == 1 and path.exists(files[0]))
def test_prune_local_pruning_schedule(self):
"""Tests training/pruning driver with a single layer sparsity schedule."""
experiment_dir = self.create_tempdir().full_path
eval_flags = dict(
epochs=10,
pruning_schedule='{1:[(5, 0.33), (7, 0.66), (9, 0.95)]}',
experiment_dir=experiment_dir,
)
with flagsaver.flagsaver(**eval_flags):
prune.main([])
outfile = path.join(experiment_dir, '*', 'events.out.tfevents.*')
files = glob.glob(outfile)
self.assertTrue(len(files) == 1 and path.exists(files[0]))
if __name__ == '__main__':
absltest.main()
|
course/admin.py | khanhduy8/relate | 284 | 12760024 | <filename>course/admin.py
__copyright__ = "Copyright (C) 2014 <NAME>"
__license__ = """
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from django.utils.translation import (
gettext_lazy as _, pgettext)
from django.contrib import admin
from course.models import (
Course, Event,
ParticipationTag,
Participation, ParticipationPermission,
ParticipationRole, ParticipationRolePermission,
ParticipationPreapproval,
AuthenticationToken,
InstantFlowRequest,
FlowSession, FlowPageData,
FlowPageVisit, FlowPageVisitGrade,
FlowRuleException,
GradingOpportunity, GradeChange, InstantMessage,
Exam, ExamTicket)
from django import forms
from relate.utils import string_concat
from course.enrollment import (approve_enrollment, deny_enrollment)
from course.constants import (
participation_permission as pperm,
exam_ticket_states
)
from typing import Any, Text, Tuple # noqa
# {{{ permission helpers
def _filter_courses_for_user(queryset, user):
if user.is_superuser:
return queryset
z = queryset.filter(
participations__user=user,
participations__roles__permissions__permission=pperm.use_admin_interface)
return z
def _filter_course_linked_obj_for_user(queryset, user):
if user.is_superuser:
return queryset
return queryset.filter(
course__participations__user=user,
course__participations__roles__permissions__permission # noqa
=pperm.use_admin_interface
)
def _filter_participation_linked_obj_for_user(queryset, user):
if user.is_superuser:
return queryset
return queryset.filter(
participation__course__participations__user=user,
participation__course__participations__roles__permissions__permission # noqa
=pperm.use_admin_interface)
# }}}
# {{{ list filter helper
def _filter_related_only(filter_arg: str) -> Tuple[str, Any]:
return (filter_arg, admin.RelatedOnlyFieldListFilter)
# }}}
# {{{ course
class UnsafePasswordInput(forms.TextInput):
# This sends passwords back to the user--not ideal, but OK for the XMPP
# password.
input_type = "password"
class CourseAdminForm(forms.ModelForm):
class Meta:
model = Course
widgets = {
"course_xmpp_password": UnsafePasswordInput
}
exclude = ()
class CourseAdmin(admin.ModelAdmin):
list_display = (
"identifier",
"number",
"name",
"time_period",
"start_date",
"end_date",
"hidden",
"listed",
"accepts_enrollment")
list_editable = (
"number",
"name",
"time_period",
"start_date",
"end_date",
"hidden",
"listed",
"accepts_enrollment")
list_filter = (
"number",
"time_period",
"hidden",
"listed",
"accepts_enrollment")
date_hierarchy = "start_date"
search_fields = (
"identifier",
"number",
"name",
"time_period")
form = CourseAdminForm
save_on_top = True
# {{{ permissions
def has_add_permission(self, request):
# These are created only through the course creation form.
return False
def get_queryset(self, request):
qs = super().get_queryset(request)
return _filter_courses_for_user(qs, request.user)
# }}}
admin.site.register(Course, CourseAdmin)
# }}}
# {{{ events
class EventAdmin(admin.ModelAdmin):
list_display = (
"course",
"kind",
"ordinal",
"time",
"end_time",
"shown_in_calendar")
list_filter = (_filter_related_only("course"), "kind", "shown_in_calendar")
date_hierarchy = "time"
search_fields = (
"course__identifier",
"kind",
)
def __unicode__(self): # pragma: no cover # not used
return "{}{} in {}".format(
self.kind,
" (%s)" % str(self.ordinal) if self.ordinal is not None else "",
self.course)
__str__ = __unicode__
list_editable = ("ordinal", "time", "end_time", "shown_in_calendar")
# {{{ permissions
def get_queryset(self, request):
qs = super().get_queryset(request)
return _filter_course_linked_obj_for_user(qs, request.user)
def formfield_for_foreignkey(self, db_field, request, **kwargs):
if db_field.name == "course":
kwargs["queryset"] = _filter_courses_for_user(
Course.objects, request.user)
return super().formfield_for_foreignkey(
db_field, request, **kwargs)
# }}}
admin.site.register(Event, EventAdmin)
# }}}
# {{{ participation tags
class ParticipationTagAdmin(admin.ModelAdmin):
list_filter = (_filter_related_only("course"),)
# {{{ permissions
def get_queryset(self, request):
qs = super().get_queryset(request)
return _filter_course_linked_obj_for_user(qs, request.user)
def formfield_for_foreignkey(self, db_field, request, **kwargs):
if db_field.name == "course":
kwargs["queryset"] = _filter_courses_for_user(
Course.objects, request.user)
return super().formfield_for_foreignkey(
db_field, request, **kwargs)
# }}}
admin.site.register(ParticipationTag, ParticipationTagAdmin)
# }}}
# {{{ participations
class ParticipationRolePermissionInline(admin.TabularInline):
model = ParticipationRolePermission
extra = 3
class ParticipationRoleAdmin(admin.ModelAdmin):
inlines = (ParticipationRolePermissionInline,)
list_filter = (_filter_related_only("course"), "identifier")
def get_queryset(self, request):
qs = super().get_queryset(request)
if request.user.is_superuser:
return qs
return _filter_course_linked_obj_for_user(qs, request.user)
admin.site.register(ParticipationRole, ParticipationRoleAdmin)
class ParticipationPermissionInline(admin.TabularInline):
model = ParticipationPermission
extra = 3
class ParticipationForm(forms.ModelForm):
class Meta:
model = Participation
exclude = ("role",)
def clean(self):
super().clean()
for tag in self.cleaned_data.get("tags", []):
if tag.course != self.cleaned_data.get("course"):
from django.core.exceptions import ValidationError
raise ValidationError(
{"tags": _("Tags must belong to same course as "
"participation.")})
for role in self.cleaned_data.get("roles", []):
if role.course != self.cleaned_data.get("course"):
from django.core.exceptions import ValidationError
raise ValidationError(
{"roles": _("Role must belong to same course as "
"participation.")})
class ParticipationAdmin(admin.ModelAdmin):
form = ParticipationForm
def get_roles(self, obj):
return ", ".join(str(role.name) for role in obj.roles.all())
get_roles.short_description = _("Roles") # type: ignore
def get_tags(self, obj):
return ", ".join(str(tag.name) for tag in obj.tags.all())
get_tags.short_description = _("Tags") # type: ignore
# Fixme: This can be misleading when Non-superuser click on the
# link of a user who also attend other courses.
def get_user(self, obj):
from django.urls import reverse
from django.conf import settings
from django.utils.html import mark_safe
return mark_safe(string_concat(
"<a href='%(link)s'>", "%(user_fullname)s",
"</a>"
) % {
"link": reverse(
"admin:%s_change"
% settings.AUTH_USER_MODEL.replace(".", "_")
.lower(),
args=(obj.user.id,)),
"user_fullname": obj.user.get_full_name(
force_verbose_blank=True),
})
get_user.short_description = pgettext("real name of a user", "Name") # type:ignore # noqa
get_user.admin_order_field = "user__last_name" # type: ignore
get_user.allow_tags = True # type: ignore
list_display = (
"user",
"get_user",
"course",
"get_roles",
"status",
"get_tags",
)
def get_list_filter(self, request):
if request is not None and request.user.is_superuser:
return ("course",
"roles__name",
"status",
"tags")
return (_filter_related_only("course"),
_filter_related_only("roles"),
"status",
_filter_related_only("tags"))
raw_id_fields = ("user",)
filter_horizontal = ("tags", "roles",)
search_fields = (
"course__identifier",
"user__username",
"user__first_name",
"user__last_name",
)
actions = [approve_enrollment, deny_enrollment]
inlines = (ParticipationPermissionInline,)
save_on_top = True
# {{{ permissions
def get_queryset(self, request):
qs = super().get_queryset(request)
return _filter_course_linked_obj_for_user(qs, request.user)
def formfield_for_foreignkey(self, db_field, request, **kwargs):
if db_field.name == "course":
kwargs["queryset"] = _filter_courses_for_user(
Course.objects, request.user)
# Fixme: This seems not to be not reachable
if db_field.name == "tags":
kwargs["queryset"] = _filter_course_linked_obj_for_user(
ParticipationTag.objects, request.user)
return super().formfield_for_foreignkey(
db_field, request, **kwargs)
# }}}
admin.site.register(Participation, ParticipationAdmin)
class ParticipationPreapprovalAdmin(admin.ModelAdmin):
def get_roles(self, obj):
return ", ".join(str(role.name) for role in obj.roles.all())
get_roles.short_description = _("Roles") # type: ignore
list_display = ("email", "institutional_id", "course", "get_roles",
"creation_time", "creator")
list_filter = (_filter_related_only("course"), _filter_related_only("roles"))
search_fields = (
"email", "institutional_id",
)
# {{{ permissions
def get_queryset(self, request):
qs = super().get_queryset(request)
if request.user.is_superuser:
return qs
return _filter_course_linked_obj_for_user(qs, request.user)
exclude = ("creator", "creation_time", "role")
def save_model(self, request, obj, form, change):
obj.creator = request.user
obj.save()
def formfield_for_foreignkey(self, db_field, request, **kwargs):
if db_field.name == "course":
kwargs["queryset"] = _filter_courses_for_user(
Course.objects, request.user)
return super().formfield_for_foreignkey(
db_field, request, **kwargs)
# }}}
admin.site.register(ParticipationPreapproval, ParticipationPreapprovalAdmin)
# }}}
class AuthenticationTokenAdmin(admin.ModelAdmin):
list_display = ("id", "participation", "restrict_to_participation_role",
"description", "valid_until", "revocation_time")
date_hierarchy = "creation_time"
search_fields = (
"id", "description", "participation__user__username"
)
admin.site.register(AuthenticationToken, AuthenticationTokenAdmin)
class InstantFlowRequestAdmin(admin.ModelAdmin):
list_display = ("course", "flow_id", "start_time", "end_time", "cancelled")
list_filter = (_filter_related_only("course"),)
date_hierarchy = "start_time"
search_fields = (
"email",
)
admin.site.register(InstantFlowRequest, InstantFlowRequestAdmin)
# {{{ flow sessions
class FlowPageDataInline(admin.TabularInline):
model = FlowPageData
extra = 0
class FlowSessionAdmin(admin.ModelAdmin):
def get_participant(self, obj):
if obj.participation is None:
return None
return obj.participation.user
get_participant.short_description = _("Participant") # type: ignore
get_participant.admin_order_field = "participation__user" # type: ignore
search_fields = (
"=id",
"flow_id",
"access_rules_tag",
"participation__user__username",
"participation__user__first_name",
"participation__user__last_name",
"user__username",
"user__first_name",
"user__last_name",
)
list_display = (
"id",
"flow_id",
"get_participant",
"course",
"start_time",
"completion_time",
"access_rules_tag",
"in_progress",
#"expiration_mode",
)
list_display_links = (
"flow_id",
"get_participant",
)
date_hierarchy = "start_time"
list_filter = (
_filter_related_only("course"),
"flow_id",
"in_progress",
"access_rules_tag",
"expiration_mode",
)
inlines = (FlowPageDataInline,)
raw_id_fields = ("participation", "user")
save_on_top = True
# {{{ permissions
def has_add_permission(self, request):
# These are only created automatically.
return False
def get_queryset(self, request):
qs = super().get_queryset(request)
return _filter_course_linked_obj_for_user(qs, request.user)
def formfield_for_foreignkey(self, db_field, request, **kwargs):
if db_field.name == "course":
kwargs["queryset"] = _filter_courses_for_user(
Course.objects, request.user)
return super().formfield_for_foreignkey(
db_field, request, **kwargs)
# }}}
admin.site.register(FlowSession, FlowSessionAdmin)
# }}}
# {{{ flow page visit
class FlowPageVisitGradeInline(admin.TabularInline):
model = FlowPageVisitGrade
extra = 0
class HasAnswerListFilter(admin.SimpleListFilter):
title = "has answer"
parameter_name = "has_answer"
def lookups(self, request, model_admin):
return (
("y", _("Yes")),
("n", _("No")),
)
def queryset(self, request, queryset):
if self.value() is None:
return queryset
return queryset.filter(answer__isnull=self.value() != "y")
class FlowIdListFilter(admin.SimpleListFilter):
"""
This is only necessary when flow_id is only accessible by FlowSession, which is
a ForeignKey in the model
"""
title = _("Flow ID")
parameter_name = "flow_id"
def lookups(self, request, model_admin):
qs = model_admin.get_queryset(request)
if not request.user.is_superuser:
qs = qs.filter(
flow_session__course__participations__user=request.user,
flow_session__course__participations__roles__permissions__permission # noqa
=pperm.use_admin_interface)
flow_ids = qs.values_list("flow_session__flow_id", flat=True).distinct()
return zip(flow_ids, flow_ids)
def queryset(self, request, queryset):
if self.value():
return queryset.filter(flow_session__flow_id=self.value())
else:
return queryset
class FlowPageVisitAdmin(admin.ModelAdmin):
def get_course(self, obj):
return obj.flow_session.course
get_course.short_description = _("Course") # type: ignore
get_course.admin_order_field = "flow_session__course" # type: ignore
def get_flow_id(self, obj):
return obj.flow_session.flow_id
get_flow_id.short_description = _("Flow ID") # type: ignore
get_flow_id.admin_order_field = "flow_session__flow_id" # type: ignore
def get_page_id(self, obj):
if obj.page_data.page_ordinal is None:
return string_concat("%s/%s (", _("not in use"), ")") % (
obj.page_data.group_id,
obj.page_data.page_id)
else:
return "{}/{} ({})".format(
obj.page_data.group_id,
obj.page_data.page_id,
obj.page_data.page_ordinal)
get_page_id.short_description = _("Page ID") # type: ignore
get_page_id.admin_order_field = "page_data__page_id" # type: ignore
def get_participant(self, obj):
if obj.flow_session.participation:
return obj.flow_session.participation.user
else:
return string_concat("(", _("anonymous"), ")")
get_participant.short_description = _("Owner") # type: ignore
get_participant.admin_order_field = "flow_session__participation" # type: ignore
def get_answer_is_null(self, obj):
return obj.answer is not None
get_answer_is_null.short_description = _("Has answer") # type: ignore
get_answer_is_null.boolean = True # type: ignore
def get_flow_session_id(self, obj):
return obj.flow_session.id
get_flow_session_id.short_description = _("Flow Session ID") # type: ignore
get_flow_session_id.admin_order_field = "flow_session__id" # type: ignore
list_filter = (
HasAnswerListFilter,
"is_submitted_answer",
"is_synthetic",
_filter_related_only("flow_session__participation__course"),
FlowIdListFilter,
)
date_hierarchy = "visit_time"
list_display = (
"id",
"get_course",
"get_flow_id",
"get_page_id",
"get_participant",
"get_flow_session_id",
"visit_time",
"get_answer_is_null",
"is_submitted_answer",
"is_synthetic",
"user",
"impersonated_by",
)
list_display_links = (
"id",
)
search_fields = (
"=id",
"=flow_session__id",
"flow_session__flow_id",
"page_data__group_id",
"page_data__page_id",
"flow_session__participation__user__username",
"flow_session__participation__user__first_name",
"flow_session__participation__user__last_name",
)
raw_id_fields = ("flow_session", "page_data")
inlines = (FlowPageVisitGradeInline,)
save_on_top = True
# {{{ permissions
def has_add_permission(self, request):
# These are created only automatically.
return False
def get_queryset(self, request):
qs = super().get_queryset(request)
if request.user.is_superuser:
return qs
return qs.filter(
flow_session__course__participations__user=request.user,
flow_session__course__participations__roles__permissions__permission # noqa
=pperm.use_admin_interface)
# }}}
admin.site.register(FlowPageVisit, FlowPageVisitAdmin)
# }}}
# {{{ flow access
class FlowRuleExceptionAdmin(admin.ModelAdmin):
def get_course(self, obj):
return obj.participation.course
get_course.short_description = _("Course") # type: ignore
get_course.admin_order_field = "participation__course" # type: ignore
def get_participant(self, obj):
return obj.participation.user
get_participant.short_description = _("Participant") # type: ignore
get_participant.admin_order_field = "participation__user" # type: ignore
ordering = ("-creation_time",)
search_fields = (
"flow_id",
"participation__user__username",
"participation__user__first_name",
"participation__user__last_name",
"comment",
)
list_display = (
"get_participant",
"get_course",
"flow_id",
"kind",
"expiration",
"creation_time",
)
list_display_links = (
"get_participant",
"flow_id",
)
list_filter = (
_filter_related_only("participation__course"),
"flow_id",
"kind",
)
date_hierarchy = "creation_time"
raw_id_fields = ("participation",)
# {{{ permissions
def has_add_permission(self, request):
# These are only created automatically.
return False
def get_queryset(self, request):
qs = super().get_queryset(request)
return _filter_participation_linked_obj_for_user(qs, request.user)
exclude = ("creator", "creation_time")
def save_model(self, request, obj, form, change): # pragma: no cover
# This won't work since it's not allowed to add
obj.creator = request.user
obj.save()
# }}}
admin.site.register(FlowRuleException, FlowRuleExceptionAdmin)
# }}}
# {{{ grading
class GradingOpportunityAdmin(admin.ModelAdmin):
list_display = (
"name",
"course",
"identifier",
"due_time",
"shown_in_grade_book",
"shown_in_participant_grade_book",
)
list_filter = (
_filter_related_only("course"),
"shown_in_grade_book",
"shown_in_participant_grade_book",
)
list_editable = (
"shown_in_grade_book",
"shown_in_participant_grade_book",
)
# {{{ permissions
exclude = ("creation_time",)
def get_queryset(self, request):
qs = super().get_queryset(request)
return _filter_course_linked_obj_for_user(qs, request.user)
def formfield_for_foreignkey(self, db_field, request, **kwargs):
if db_field.name == "course":
kwargs["queryset"] = _filter_courses_for_user(
Course.objects, request.user)
return super().formfield_for_foreignkey(
db_field, request, **kwargs)
# }}}
admin.site.register(GradingOpportunity, GradingOpportunityAdmin)
class GradeChangeAdmin(admin.ModelAdmin):
def get_course(self, obj):
return obj.participation.course
get_course.short_description = _("Course") # type: ignore
get_course.admin_order_field = "participation__course" # type: ignore
def get_opportunity(self, obj):
return obj.opportunity.name
get_opportunity.short_description = _("Opportunity") # type: ignore
get_opportunity.admin_order_field = "opportunity" # type: ignore
def get_participant(self, obj):
return obj.participation.user
get_participant.short_description = _("Participant") # type: ignore
get_participant.admin_order_field = "participation__user" # type: ignore
def get_percentage(self, obj):
if obj.points is None or obj.max_points is None:
return None
else:
return round(100*obj.points/obj.max_points)
get_percentage.short_description = "%" # type: ignore
list_display = (
"get_opportunity",
"get_participant",
"get_course",
"state",
"points",
"max_points",
"get_percentage",
"attempt_id",
"grade_time",
)
list_display_links = (
"get_opportunity",
"get_participant",
)
date_hierarchy = "grade_time"
search_fields = (
"opportunity__name",
"opportunity__flow_id",
"opportunity__identifier",
"participation__user__username",
"participation__user__first_name",
"participation__user__last_name",
"attempt_id",
)
list_filter = (
_filter_related_only("opportunity__course"),
_filter_related_only("opportunity"),
"state",
)
raw_id_fields = ("participation", "flow_session", "opportunity")
# {{{ permission
def get_queryset(self, request):
qs = super().get_queryset(request)
return _filter_participation_linked_obj_for_user(qs, request.user)
exclude = ("creator", "grade_time")
def save_model(self, request, obj, form, change):
obj.creator = request.user
obj.save()
# }}}
admin.site.register(GradeChange, GradeChangeAdmin)
# }}}
# {{{ instant message
class InstantMessageAdmin(admin.ModelAdmin):
def get_course(self, obj):
return obj.participation.course
get_course.short_description = _("Course") # type: ignore
get_course.admin_order_field = "participation__course" # type: ignore
def get_participant(self, obj):
return obj.participation.user
get_participant.short_description = _("Participant") # type: ignore
get_participant.admin_order_field = "participation__user" # type: ignore
list_filter = (_filter_related_only("participation__course"),)
list_display = (
"get_course",
"get_participant",
"time",
"text",
)
date_hierarchy = "time"
search_fields = (
"text",
"participation__user__username",
"participation__user__first_name",
"participation__user__last_name",
)
raw_id_fields = ("participation",)
# {{{ permissions
def has_add_permission(self, request):
# These are created only automatically.
return False
def get_queryset(self, request):
qs = super().get_queryset(request)
return _filter_participation_linked_obj_for_user(qs, request.user)
# }}}
admin.site.register(InstantMessage, InstantMessageAdmin)
# }}}
# {{{ exam tickets
class ExamAdmin(admin.ModelAdmin):
list_filter = (
_filter_related_only("course"),
"active",
"listed",
)
list_display = (
"course",
"flow_id",
"active",
"listed",
"no_exams_before",
)
search_fields = (
"flow_id",
)
date_hierarchy = "no_exams_before"
# {{{ permissions
def get_queryset(self, request):
qs = super().get_queryset(request)
return _filter_course_linked_obj_for_user(qs, request.user)
def formfield_for_foreignkey(self, db_field, request, **kwargs):
if db_field.name == "course":
kwargs["queryset"] = _filter_courses_for_user(
Course.objects, request.user)
return super().formfield_for_foreignkey(
db_field, request, **kwargs)
# }}}
admin.site.register(Exam, ExamAdmin)
class ExamTicketAdmin(admin.ModelAdmin):
def get_course(self, obj):
return obj.participation.course
get_course.short_description = _("Course") # type: ignore
get_course.admin_order_field = "participation__course" # type: ignore
list_filter = (
_filter_related_only("participation__course"),
"state",
)
raw_id_fields = ("participation",)
list_display = (
"get_course",
"exam",
"participation",
"state",
"creation_time",
"usage_time",
)
date_hierarchy = "usage_time"
search_fields = (
"exam__course__identifier",
"exam__flow_id",
"exam__description",
"participation__user__username",
"participation__user__first_name",
"participation__user__last_name",
)
# {{{ permissions
def get_queryset(self, request):
qs = super().get_queryset(request)
return _filter_participation_linked_obj_for_user(qs, request.user)
exclude = ("creator",)
def save_model(self, request, obj, form, change):
obj.creator = request.user
obj.save()
# }}}
def revoke_exam_tickets(self, request, queryset): # noqa
queryset \
.filter(state=exam_ticket_states.valid) \
.update(state=exam_ticket_states.revoked)
revoke_exam_tickets.short_description = _("Revoke Exam Tickets") # type: ignore
actions = [revoke_exam_tickets]
admin.site.register(ExamTicket, ExamTicketAdmin)
# }}}
# vim: foldmethod=marker
|
pg_view/models/consumers.py | bocytko/pg_view | 402 | 12760078 | <filename>pg_view/models/consumers.py
import sys
if sys.hexversion >= 0x03000000:
from queue import Empty
else:
from Queue import Empty
class DiskCollectorConsumer(object):
""" consumes information from the disk collector and provides it for the local
collector classes running in the same subprocess.
"""
def __init__(self, q):
self.result = {}
self.cached_result = {}
self.q = q
def consume(self):
# if we haven't consumed the previous value
if len(self.result) != 0:
return
try:
self.result = self.q.get_nowait()
self.cached_result = self.result.copy()
except Empty:
# we are too fast, just do nothing.
pass
else:
self.q.task_done()
def fetch(self, wd):
data = None
if wd in self.result:
data = self.result[wd]
del self.result[wd]
elif wd in self.cached_result:
data = self.cached_result[wd]
return data
|
jsonpath_ng/bin/jsonpath.py | transfluxus/jsonpath-ng | 339 | 12760099 | <gh_stars>100-1000
#!/usr/bin/python
# encoding: utf-8
# Copyright © 2012 <NAME> <<EMAIL>>
# This work is free. You can redistribute it and/or modify it under the
# terms of the Do What The Fuck You Want To Public License, Version 2,
# as published by Sam Hocevar. See the COPYING file for more details.
# Use modern Python
from __future__ import unicode_literals, print_function, absolute_import
# Standard Library imports
import json
import sys
import glob
import argparse
# JsonPath-RW imports
from jsonpath_ng import parse
def find_matches_for_file(expr, f):
return expr.find(json.load(f))
def print_matches(matches):
print('\n'.join(['{0}'.format(match.value) for match in matches]))
def main(*argv):
parser = argparse.ArgumentParser(
description='Search JSON files (or stdin) according to a JSONPath expression.',
formatter_class=argparse.RawTextHelpFormatter,
epilog="""
Quick JSONPath reference (see more at https://github.com/kennknowles/python-jsonpath-rw)
atomics:
$ - root object
`this` - current object
operators:
path1.path2 - same as xpath /
path1|path2 - union
path1..path2 - somewhere in between
fields:
fieldname - field with name
* - any field
[_start_?:_end_?] - array slice
[*] - any array index
""")
parser.add_argument('expression', help='A JSONPath expression.')
parser.add_argument('files', metavar='file', nargs='*', help='Files to search (if none, searches stdin)')
args = parser.parse_args(argv[1:])
expr = parse(args.expression)
glob_patterns = args.files
if len(glob_patterns) == 0:
# stdin mode
print_matches(find_matches_for_file(expr, sys.stdin))
else:
# file paths mode
for pattern in glob_patterns:
for filename in glob.glob(pattern):
with open(filename) as f:
print_matches(find_matches_for_file(expr, f))
def entry_point():
main(*sys.argv)
|
CondTools/BeamSpot/test/BeamSpotOnlineRecordsWriter_cfg.py | malbouis/cmssw | 852 | 12760102 | <reponame>malbouis/cmssw
import FWCore.ParameterSet.Config as cms
import FWCore.ParameterSet.VarParsing as VarParsing
process = cms.Process("write2DB")
options = VarParsing.VarParsing()
options.register('unitTest',
False, # default value
VarParsing.VarParsing.multiplicity.singleton, # singleton or list
VarParsing.VarParsing.varType.bool, # string, int, or float
"are we running the unit test?")
options.register('inputFile',
"BeamFitResults_Run306171.txt", # default value
VarParsing.VarParsing.multiplicity.singleton, # singleton or list
VarParsing.VarParsing.varType.string, # string, int, or float
"location of the input data")
options.register('inputTag',
"myTagName", # default value
VarParsing.VarParsing.multiplicity.singleton, # singleton or list
VarParsing.VarParsing.varType.string, # string, int, or float
"output tag name")
options.register('inputRecord',
"BeamSpotOnlineLegacyObjectsRcd", # default value
VarParsing.VarParsing.multiplicity.singleton, # singleton or list
VarParsing.VarParsing.varType.string, # string, int, or float
"type of record")
options.register('startRun',
306171, # default value
VarParsing.VarParsing.multiplicity.singleton, # singleton or list
VarParsing.VarParsing.varType.int, # string, int, or float
"location of the input data")
options.register('startLumi',
497, # default value
VarParsing.VarParsing.multiplicity.singleton, # singleton or list
VarParsing.VarParsing.varType.int, # string, int, or float
"IOV Start Lumi")
options.parseArguments()
process.load("FWCore.MessageLogger.MessageLogger_cfi")
from CondCore.CondDB.CondDB_cfi import *
if options.unitTest :
if options.inputRecord == "BeamSpotOnlineLegacyObjectsRcd" :
tag_name = 'BSLegacy_tag'
else:
tag_name = 'BSHLT_tag'
else:
tag_name = options.inputTag
#################################
# Produce a SQLITE FILE
#################################
CondDBBeamSpotObjects = CondDB.clone(connect = cms.string('sqlite_file:test_%s.db' % tag_name)) # choose an output name
process.PoolDBOutputService = cms.Service("PoolDBOutputService",
CondDBBeamSpotObjects,
timetype = cms.untracked.string('lumiid'), #('lumiid'), #('runnumber')
toPut = cms.VPSet(cms.PSet(record = cms.string(options.inputRecord), # BeamSpotOnline record
tag = cms.string(tag_name))), # choose your favourite tag
loadBlobStreamer = cms.untracked.bool(False)
)
process.source = cms.Source("EmptySource")
process.maxEvents = cms.untracked.PSet(input = cms.untracked.int32(1))
process.beamspotonlinewriter = cms.EDAnalyzer("BeamSpotOnlineRecordsWriter",
isHLT = cms.bool((options.inputRecord == "BeamSpotOnlineHLTObjectsRcd")),
InputFileName = cms.untracked.string(options.inputFile), # choose your input file
)
if(options.startRun>0 and options.startLumi>0):
process.beamspotonlinewriter.IOVStartRun = cms.untracked.uint32(options.startRun) # Customize your Run
process.beamspotonlinewriter.IOVStartLumi = cms.untracked.uint32(options.startLumi) # Customize your Lumi
process.p = cms.Path(process.beamspotonlinewriter)
|
jmilkfansblog/tests/test_urls.py | xiaoyh121/program | 176 | 12760117 | import unittest
from jmilkfansblog.controllers import admin
from jmilkfansblog.controllers import rest_api
from jmilkfansblog import create_app
from jmilkfansblog.models import db
class TestURLs(unittest.TestCase):
"""Unit test for route functions."""
def setUp(self):
# Destroy the Flask-Admin and Flask-Result object after delete app
# object
admin._views = []
rest_api.resource = []
app = create_app('jmilkfansblog.config.TestConfig')
self.client = app.test_client()
# Using Test app for db
db.app = app
db.create_all()
def tearDown(self):
db.session.remove()
db.drop_all()
if __name__ == '__main__':
unittest.main()
|
integration_tests/samples/issues/issue_522.py | priya1puresoftware/python-slack-sdk | 2,486 | 12760129 | # export SLACK_SDK_TEST_CLASSIC_APP_BOT_TOKEN=<KEY>
# python3 integration_tests/samples/issues/issue_522.py
import asyncio
import logging
import os
from slack_sdk.rtm import RTMClient
logging.basicConfig(level=logging.DEBUG)
LOGGER = logging.getLogger(__name__)
token = os.environ["SLACK_SDK_TEST_CLASSIC_APP_BOT_TOKEN"]
async def sleepy_count(name, sleep_for):
for i in range(10):
await asyncio.sleep(sleep_for)
LOGGER.debug(f"{name} - slept {i + 1} times.")
async def slack_client_and_sleeps():
# real-time-messaging Slack client
client = RTMClient(token=token, run_async=True)
sleepy_count_task = asyncio.create_task(sleepy_count("first counter", 1))
sleepy_count_task2 = asyncio.create_task(sleepy_count("second counter", 3))
await asyncio.gather(client.start(), sleepy_count_task, sleepy_count_task2)
async def slack_client():
# real-time-messaging Slack client
client = RTMClient(token=token, run_async=True)
await asyncio.gather(client.start())
async def sleeps():
sleepy_count_task = asyncio.create_task(sleepy_count("first counter", 1))
sleepy_count_task2 = asyncio.create_task(sleepy_count("second counter", 3))
await asyncio.gather(sleepy_count_task, sleepy_count_task2)
if __name__ == "__main__":
LOGGER.info(f"Try: kill -2 {os.getpid()} or ctrl+c")
if len(sys.argv) > 1:
option = sys.argv[1]
if option == "1":
# sigint closes program correctly
asyncio.run(slack_client())
elif option == "2":
# sigint closes program correctly
asyncio.run(sleeps())
elif option == "3":
# sigint doesn't actually close properly
asyncio.run(slack_client_and_sleeps())
else:
# sigint doesn't actually close properly
asyncio.run(slack_client_and_sleeps())
|
src/mcedit2/panels/pending_imports.py | elcarrion06/mcedit2 | 673 | 12760143 | <reponame>elcarrion06/mcedit2<filename>src/mcedit2/panels/pending_imports.py<gh_stars>100-1000
"""
pending_imports
"""
from __future__ import absolute_import, division, print_function, unicode_literals
from PySide import QtGui
import logging
log = logging.getLogger(__name__)
class PendingImportsWidget(QtGui.QWidget):
def __init__(self):
super(PendingImportsWidget, self).__init__()
self.importsListWidget = QtGui.QListView()
self.importsListModel = QtGui.QStandardItemModel()
self.importsListWidget.setModel(self.importsListModel)
self.importsListWidget.clicked.connect(self.listClicked)
self.importsListWidget.doubleClicked.connect(self.listDoubleClicked)
|
questionary/prompts/autocomplete.py | qualichat/questionary | 851 | 12760194 | <filename>questionary/prompts/autocomplete.py<gh_stars>100-1000
from typing import (
Any,
Callable,
Dict,
List,
Optional,
Tuple,
Union,
Iterable,
)
from prompt_toolkit.completion import CompleteEvent, Completer, Completion
from prompt_toolkit.document import Document
from prompt_toolkit.formatted_text import HTML
from prompt_toolkit.shortcuts.prompt import PromptSession, CompleteStyle
from prompt_toolkit.styles import Style, merge_styles
from prompt_toolkit.lexers import SimpleLexer
from questionary.constants import DEFAULT_QUESTION_PREFIX, DEFAULT_STYLE
from questionary.prompts.common import build_validator
from questionary.question import Question
class WordCompleter(Completer):
choices_source: Union[List[str], Callable[[], List[str]]]
ignore_case: bool
meta_information: Dict[str, Any]
match_middle: bool
def __init__(
self,
choices: Union[List[str], Callable[[], List[str]]],
ignore_case: bool = True,
meta_information: Optional[Dict[str, Any]] = None,
match_middle: bool = True,
) -> None:
self.choices_source = choices
self.ignore_case = ignore_case
self.meta_information = meta_information or {}
self.match_middle = match_middle
def _choices(self) -> Iterable[str]:
return (
self.choices_source()
if callable(self.choices_source)
else self.choices_source
)
def _choice_matches(self, word_before_cursor: str, choice: str) -> int:
"""Match index if found, -1 if not. """
if self.ignore_case:
choice = choice.lower()
if self.match_middle:
return choice.find(word_before_cursor)
elif choice.startswith(word_before_cursor):
return 0
else:
return -1
@staticmethod
def _display_for_choice(choice: str, index: int, word_before_cursor: str) -> HTML:
return HTML("{}<b><u>{}</u></b>{}").format(
choice[:index],
choice[index : index + len(word_before_cursor)],
choice[index + len(word_before_cursor) : len(choice)],
)
def get_completions(
self, document: Document, complete_event: CompleteEvent
) -> Iterable[Completion]:
choices = self._choices()
# Get word/text before cursor.
word_before_cursor = document.text_before_cursor
if self.ignore_case:
word_before_cursor = word_before_cursor.lower()
for choice in choices:
index = self._choice_matches(word_before_cursor, choice)
if index == -1:
# didn't find a match
continue
display_meta = self.meta_information.get(choice, "")
display = self._display_for_choice(choice, index, word_before_cursor)
yield Completion(
choice,
start_position=-len(choice),
display=display.formatted_text,
display_meta=display_meta,
style="class:answer",
selected_style="class:selected",
)
def autocomplete(
message: str,
choices: List[str],
default: str = "",
qmark: str = DEFAULT_QUESTION_PREFIX,
completer: Optional[Completer] = None,
meta_information: Optional[Dict[str, Any]] = None,
ignore_case: bool = True,
match_middle: bool = True,
complete_style: CompleteStyle = CompleteStyle.COLUMN,
validate: Any = None,
style: Optional[Style] = None,
**kwargs: Any,
) -> Question:
"""Prompt the user to enter a message with autocomplete help.
Example:
>>> import questionary
>>> questionary.autocomplete(
... 'Choose ant specie',
... choices=[
... 'Camponotus pennsylvanicus',
... 'Linepithema humile',
... 'Eciton burchellii',
... "Atta colombica",
... 'Polyergus lucidus',
... 'Polyergus rufescens',
... ]).ask()
? Choose ant specie Atta colombica
'Atta colombica'
.. image:: ../images/autocomplete.gif
This is just a really basic example, the prompt can be customised using the
parameters.
Args:
message: Question text
choices: Items shown in the selection, this contains items as strings
default: Default return value (single value).
qmark: Question prefix displayed in front of the question.
By default this is a ``?``
completer: A prompt_toolkit :class:`prompt_toolkit.completion.Completion`
implementation. If not set, a questionary completer implementation
will be used.
meta_information: A dictionary with information/anything about choices.
ignore_case: If true autocomplete would ignore case.
match_middle: If true autocomplete would search in every string position
not only in string begin.
complete_style: How autocomplete menu would be shown, it could be ``COLUMN``
``MULTI_COLUMN`` or ``READLINE_LIKE`` from
:class:`prompt_toolkit.shortcuts.CompleteStyle`.
validate: Require the entered value to pass a validation. The
value can not be submitted until the validator accepts
it (e.g. to check minimum password length).
This can either be a function accepting the input and
returning a boolean, or an class reference to a
subclass of the prompt toolkit Validator class.
style: A custom color and style for the question parts. You can
configure colors as well as font types for different elements.
Returns:
:class:`Question`: Question instance, ready to be prompted (using ``.ask()``).
"""
merged_style = merge_styles([DEFAULT_STYLE, style])
def get_prompt_tokens() -> List[Tuple[str, str]]:
return [("class:qmark", qmark), ("class:question", " {} ".format(message))]
def get_meta_style(meta: Optional[Dict[str, Any]]) -> Optional[Dict[str, Any]]:
if meta:
for key in meta:
meta[key] = HTML("<text>{}</text>").format(meta[key])
return meta
validator = build_validator(validate)
if completer is None:
if not choices:
raise ValueError("No choices is given, you should use Text question.")
# use the default completer
completer = WordCompleter(
choices,
ignore_case=ignore_case,
meta_information=get_meta_style(meta_information),
match_middle=match_middle,
)
p = PromptSession(
get_prompt_tokens,
lexer=SimpleLexer("class:answer"),
style=merged_style,
completer=completer,
validator=validator,
complete_style=complete_style,
**kwargs,
)
p.default_buffer.reset(Document(default))
return Question(p.app)
|
tests/integrational/native_sync/test_fetch_messages.py | natekspencer/pubnub-python | 146 | 12760200 | import time
from pubnub.models.consumer.history import PNFetchMessagesResult
from pubnub.models.consumer.pubsub import PNPublishResult
from pubnub.pubnub import PubNub
from tests.helper import pnconf_copy
from tests.integrational.vcr_helper import use_cassette_and_stub_time_sleep_native
COUNT = 120
class TestFetchMessages:
@use_cassette_and_stub_time_sleep_native(
'tests/integrational/fixtures/native_sync/fetch_messages/max_100_single.yaml',
filter_query_parameters=['uuid', 'pnsdk', 'l_pub'])
def test_fetch_messages_return_max_100_for_single_channel(self):
ch = "fetch-messages-ch-1"
pubnub = PubNub(pnconf_copy())
pubnub.config.uuid = "fetch-messages-uuid"
for i in range(COUNT):
envelope = pubnub.publish().channel(ch).message("hey-%s" % i).sync()
assert isinstance(envelope.result, PNPublishResult)
assert envelope.result.timetoken > 0
while True:
time.sleep(1)
if len(pubnub.history().channel(ch).count(COUNT).sync().result.messages) >= 100:
break
envelope = pubnub.fetch_messages().channels(ch).sync()
assert envelope is not None
assert isinstance(envelope.result, PNFetchMessagesResult)
assert len(envelope.result.channels[ch]) == 100
@use_cassette_and_stub_time_sleep_native(
'tests/integrational/fixtures/native_sync/fetch_messages/max_25_multiple.yaml',
filter_query_parameters=['uuid', 'pnsdk', 'l_pub'])
def test_fetch_messages_return_max_25_for_multiple_channels(self):
ch1 = "fetch-messages-ch-1"
ch2 = "fetch-messages-ch-2"
pubnub = PubNub(pnconf_copy())
pubnub.config.uuid = "fetch-messages-uuid"
for i in range(COUNT):
envelope1 = pubnub.publish().channel(ch1).message("hey-%s" % i).sync()
assert isinstance(envelope1.result, PNPublishResult)
assert envelope1.result.timetoken > 0
envelope2 = pubnub.publish().channel(ch2).message("hey-%s" % i).sync()
assert isinstance(envelope2.result, PNPublishResult)
assert envelope2.result.timetoken > 0
while True:
time.sleep(1)
if len(pubnub.history().channel(ch1).count(COUNT).sync().result.messages) >= 100 and \
len(pubnub.history().channel(ch2).count(COUNT).sync().result.messages) >= 100:
break
envelope = pubnub.fetch_messages().channels([ch1, ch2]).sync()
assert isinstance(envelope.result, PNFetchMessagesResult)
assert len(envelope.result.channels[ch1]) == 25
assert len(envelope.result.channels[ch2]) == 25
@use_cassette_and_stub_time_sleep_native(
'tests/integrational/fixtures/native_sync/fetch_messages/max_25_with_actions.yaml',
filter_query_parameters=['uuid', 'pnsdk', 'l_pub'])
def test_fetch_messages_actions_return_max_25(self):
ch = "fetch-messages-actions-ch-1"
pubnub = PubNub(pnconf_copy())
pubnub.config.uuid = "fetch-messages-uuid"
for i in range(COUNT):
envelope = pubnub.publish().channel(ch).message("hey-%s" % i).sync()
assert isinstance(envelope.result, PNPublishResult)
assert envelope.result.timetoken > 0
while True:
time.sleep(1)
if len(pubnub.history().channel(ch).count(COUNT).sync().result.messages) >= 100:
break
envelope = pubnub.fetch_messages().channels(ch).include_message_actions(True).sync()
assert envelope is not None
assert isinstance(envelope.result, PNFetchMessagesResult)
assert len(envelope.result.channels[ch]) == 25
|
examples/ocaml_rockstar.py | hoojaoh/rockstar | 4,603 | 12760214 | from rockstar import RockStar
ocaml_code = 'print_string "Hello world!\n";;'
rock_it_bro = RockStar(days=400, file_name='hello.ml', code=ocaml_code)
rock_it_bro.make_me_a_rockstar()
|
POSITIONMANAGE/api_position.py | CJuanvip/quant-trading-system | 281 | 12760243 | import json
def api_position(db,cursor,temp,principal5,principal30,principal60,principal300,principal900,principal1800,coin_number5,coin_number30,coin_number60,coin_number300,coin_number900,coin_number1800,judge_position,sell_amount,buy_amount,current_price):
all_buyamount = 0
all_sellamount = 0
trade_amonut = {}
flee = 0.0025
for i in temp:
if(i == '5'):
trade_amonut['5'] = position(coin_number5,principal5,buy_amount,sell_amount,flee,judge_position,temp[i],current_price)
if(trade_amonut['5']['action'] == 'buy'):
principal5 = trade_amonut['5']['value']['principal']
coin_number5 = trade_amonut['5']['value']['coin_number']
all_buyamount += trade_amonut['5']['value']['buy_amount']
if(trade_amonut['5']['action'] == 'sell'):
principal5 = trade_amonut['5']['value']['principal']
coin_number5 = trade_amonut['5']['value']['coin_number']
all_sellamount += trade_amonut['5']['value']['sell_amount']
if(i == '30'):
trade_amonut['30'] = position(coin_number30,principal30,buy_amount,sell_amount,flee,judge_position,temp[i],current_price)
if (trade_amonut['30']['action'] == 'buy'):
principal30 = trade_amonut['30']['value']['principal']
coin_number30 = trade_amonut['30']['value']['coin_number']
all_buyamount += trade_amonut['30']['value']['buy_amount']
if (trade_amonut['30']['action'] == 'sell'):
principal30 = trade_amonut['30']['value']['principal']
coin_number30 = trade_amonut['30']['value']['coin_number']
all_sellamount += trade_amonut['30']['value']['sell_amount']
if (i == '60'):
trade_amonut['60'] = position(coin_number60,principal60,buy_amount,sell_amount,flee,judge_position,temp[i],current_price)
if (trade_amonut['60']['action'] == 'buy'):
principal60 = trade_amonut['60']['value']['principal']
coin_number60 = trade_amonut['60']['value']['coin_number']
all_buyamount += trade_amonut['60']['value']['buy_amount']
if (trade_amonut['60']['action'] == 'sell'):
principal60 = trade_amonut['60']['value']['principal']
coin_number60 = trade_amonut['60']['value']['coin_number']
all_sellamount += trade_amonut['60']['value']['sell_amount']
if (i == '300'):
trade_amonut['300'] = position(coin_number300,principal300,buy_amount,sell_amount,flee,judge_position,temp[i],current_price)
if (trade_amonut['300']['action'] == 'buy'):
principal300 = trade_amonut['300']['value']['principal']
coin_number300 = trade_amonut['300']['value']['coin_number']
all_buyamount += trade_amonut['300']['value']['buy_amount']
if (trade_amonut['300']['action'] == 'sell'):
principal300 = trade_amonut['300']['value']['principal']
coin_number300 = trade_amonut['300']['value']['coin_number']
all_sellamount += trade_amonut['300']['value']['sell_amount']
if (i == '900'):
trade_amonut['900'] = position(coin_number900,principal900,buy_amount,sell_amount,flee,judge_position,temp[i],current_price)
if (trade_amonut['900']['action'] == 'buy'):
principal900 = trade_amonut['900']['value']['principal']
coin_number900 = trade_amonut['900']['value']['coin_number']
all_buyamount += trade_amonut['900']['value']['buy_amount']
if (trade_amonut['900']['action'] == 'sell'):
principal900 = trade_amonut['900']['value']['principal']
coin_number900 = trade_amonut['900']['value']['coin_number']
all_sellamount += trade_amonut['900']['value']['sell_amount']
if (i == '1800'):
trade_amonut['1800'] = position(coin_number1800,principal1800,buy_amount,sell_amount,flee,judge_position,temp[i],current_price)
if (trade_amonut['1800']['action'] == 'buy'):
principal1800 = trade_amonut['1800']['value']['principal']
coin_number1800 = trade_amonut['1800']['value']['coin_number']
all_buyamount += trade_amonut['1800']['value']['buy_amount']
if (trade_amonut['1800']['action'] == 'sell'):
principal1800 = trade_amonut['1800']['value']['principal']
coin_number1800 = trade_amonut['1800']['value']['coin_number']
all_sellamount += trade_amonut['1800']['value']['sell_amount']
if(all_buyamount > all_sellamount):
uid = exec('buy', all_buyamount - all_sellamount)
sql = "INSERT INTO order_table(uid , valuess , timess) VALUES ('%s', '%s', '%s')" % (str(uid), json.dumps(
{'principal5': principal5, 'coin_number5': coin_number5, 'principal30': principal30,
'coin_number30': coin_number30, 'principal60': principal60, 'coin_number60': coin_number60,
'principal300': principal300, 'coin_number300': coin_number300, 'principal900': principal900,
'coin_number900': coin_number900, 'principal1800': principal1800, 'coin_number1800': coin_number1800,
'result': trade_amonut, 'current_price': current_price}), 0)
cursor.execute(sql)
db.commit()
if(all_sellamount > all_buyamount):
uid = exec('sell',all_sellamount-all_buyamount)
sql = "INSERT INTO order_table(uid , valuess , timess) VALUES ('%s', '%s', '%s')" % (str(uid), json.dumps(
{'principal5': principal5, 'coin_number5': coin_number5, 'principal30': principal30,
'coin_number30': coin_number30, 'principal60': principal60, 'coin_number60': coin_number60,
'principal300': principal300, 'coin_number300': coin_number300, 'principal900': principal900,
'coin_number900': coin_number900, 'principal1800': principal1800, 'coin_number1800': coin_number1800,
'result': trade_amonut, 'current_price': current_price}), 0)
cursor.execute(sql)
db.commit()
return {'principal5': principal5, 'coin_number5': coin_number5, 'principal30': principal30,
'coin_number30': coin_number30, 'principal60': principal60, 'coin_number60': coin_number60,
'principal300': principal300, 'coin_number300': coin_number300, 'principal900': principal900,
'coin_number900': coin_number900, 'principal1800': principal1800, 'coin_number1800': coin_number1800}
def position(coin_number,principal,buy_amount,sell_amount,flee,judge_position,index,current_price):
sposition = ( coin_number * current_price ) / (principal + ( coin_number * current_price ))
if ((index['buy_index'] > index['sell_index']) and (judge_position > sposition)):
buy_amount2 = (index['buy_index'] / (index['buy_index'] + index['sell_index'])) * buy_amount
if(buy_amount2 < principal):
coin_number = ((buy_amount2 - buy_amount2 * flee) / current_price) + coin_number
principal = principal - buy_amount2
else:
buy_amount2 = principal
coin_number = ((principal - principal * flee) / current_price) + coin_number
principal = 0
return {'action':'buy','value':{'buy_amount':buy_amount2,'principal':principal,'coin_number':coin_number}}
if (index['buy_index'] < index['sell_index'] and (sposition > 0)):
sell_amount2 = (index['sell_index'] / (index['buy_index'] + index['sell_index'])) * sell_amount
if((sell_amount2 / current_price) < coin_number):
coin_number = coin_number - (sell_amount2 / current_price)
principal = principal + (sell_amount2 - sell_amount2 * flee)
else:
sell_amount2 = coin_number * current_price
principal = principal + (coin_number - coin_number * flee) * current_price
coin_number = 0
return {'action':'sell','value': {'sell_amount': sell_amount2, 'principal': principal, 'coin_number': coin_number}}
return {'action': 'none'}
def exec(action,buy_amount):
return 23231321 |
cookies/run_test.py | nikicc/anaconda-recipes | 130 | 12760260 | # new cookies.py
from cookies import Cookies, Cookie
cookies = Cookies(rocky='road')
# Can also write explicitly: cookies['rocky'] = Cookie['road']
cookies['rocky'].path = "/cookie"
assert cookies.render_request() == 'rocky=road'
|
venv/Lib/site-packages/dask_ml/cluster/__init__.py | ZhangQingsen/CISC849Proj | 803 | 12760291 | <gh_stars>100-1000
"""Unsupervised Clustering Algorithms"""
from .k_means import KMeans # noqa
from .spectral import SpectralClustering # noqa
|
lib/metrics/F1_running_score.py | shampooma/openseg.pytorch | 1,069 | 12760335 | <gh_stars>1000+
##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
## Created by: JingyiXie, RainbowSecret
## Microsoft Research
## <EMAIL>
## Copyright (c) 2019
##
## Code adapted from:
## https://github.com/nv-tlabs/GSCNN/blob/master/utils/f_boundary.py
##
## This source code is licensed under the MIT-style license found in the
## LICENSE file in the root directory of this source tree
##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import pdb
import numpy as np
import torch
from multiprocessing.pool import Pool
class F1RunningScore(object):
def __init__(self, configer=None, num_classes=None, boundary_threshold=0.00088, num_proc=15):
assert configer is not None or num_classes is not None
self.configer = configer
if configer is not None:
self.n_classes = self.configer.get('data', 'num_classes')
else:
self.n_classes = num_classes
self.ignore_index = -1
self.boundary_threshold = boundary_threshold
self.pool = Pool(processes=num_proc)
self.num_proc = num_proc
self._Fpc = 0
self._Fc = 0
self.seg_map_cache = []
self.gt_map_cache = []
def _update_cache(self, seg_map, gt_map):
"""
Append inputs to `seg_map_cache` and `gt_map_cache`.
Returns whether the length reached our pool size.
"""
self.seg_map_cache.extend(seg_map)
self.gt_map_cache.extend(gt_map)
return len(self.gt_map_cache) >= self.num_proc
def _get_from_cache(self):
n = self.num_proc
seg_map, self.seg_map_cache = self.seg_map_cache[:n], self.seg_map_cache[n:]
gt_map, self.gt_map_cache = self.gt_map_cache[:n], self.gt_map_cache[n:]
return seg_map, gt_map
def update(self, seg_map, gt_map):
if self._update_cache(seg_map, gt_map):
seg_map, gt_map = self._get_from_cache()
self._update_scores(seg_map, gt_map)
else:
return
def _update_scores(self, seg_map, gt_map):
batch_size = len(seg_map)
if batch_size == 0:
return
Fpc = np.zeros(self.n_classes)
Fc = np.zeros(self.n_classes)
for class_id in range(self.n_classes):
args = []
for i in range(batch_size):
if seg_map[i].shape[0] == self.n_classes:
pred_i = seg_map[i][class_id] > 0.5
pred_is_boundary = True
else:
pred_i = seg_map[i] == class_id
pred_is_boundary = False
args.append([
(pred_i).astype(np.uint8),
(gt_map[i] == class_id).astype(np.uint8),
(gt_map[i] == -1),
self.boundary_threshold,
class_id,
pred_is_boundary
])
results = self.pool.map(db_eval_boundary, args)
results = np.array(results)
Fs = results[:, 0]
_valid = ~np.isnan(Fs)
Fc[class_id] = np.sum(_valid)
Fs[np.isnan(Fs)] = 0
Fpc[class_id] = sum(Fs)
self._Fc = self._Fc + Fc
self._Fpc = self._Fpc + Fpc
def get_scores(self):
if self.seg_map_cache is None:
return 0, 0
self._update_scores(self.seg_map_cache, self.gt_map_cache)
F_score = np.sum(self._Fpc / self._Fc) / self.n_classes
F_score_classwise = self._Fpc / self._Fc
return F_score, F_score_classwise
def reset(self):
self._Fpc = self._Fc = 0
def db_eval_boundary(args):
"""
Compute mean,recall and decay from per-frame evaluation.
Calculates precision/recall for boundaries between foreground_mask and
gt_mask using morphological operators to speed it up.
Arguments:
foreground_mask (ndarray): binary segmentation image.
gt_mask (ndarray): binary annotated image.
Returns:
F (float): boundaries F-measure
P (float): boundaries precision
R (float): boundaries recall
"""
foreground_mask, gt_mask, ignore_mask, bound_th, class_id, pred_is_boundary = args
assert np.atleast_3d(foreground_mask).shape[2] == 1
bound_pix = bound_th if bound_th >= 1 else \
np.ceil(bound_th*np.linalg.norm(foreground_mask.shape))
# print(bound_pix)
# print(gt.shape)
# print(np.unique(gt))
foreground_mask[ignore_mask] = 0
gt_mask[ignore_mask] = 0
# Get the pixel boundaries of both masks
if pred_is_boundary:
fg_boundary = foreground_mask
else:
fg_boundary = seg2bmap(foreground_mask)
gt_boundary = seg2bmap(gt_mask)
from skimage.morphology import disk
from cv2 import dilate
def binary_dilation(x, d): return dilate(
x.astype(np.uint8), d).astype(np.bool)
fg_dil = binary_dilation(fg_boundary, disk(bound_pix))
gt_dil = binary_dilation(gt_boundary, disk(bound_pix))
# Get the intersection
gt_match = gt_boundary * fg_dil
fg_match = fg_boundary * gt_dil
# Area of the intersection
n_fg = np.sum(fg_boundary)
n_gt = np.sum(gt_boundary)
# % Compute precision and recall
if n_fg == 0 and n_gt > 0:
precision = 1
recall = 0
elif n_fg > 0 and n_gt == 0:
precision = 0
recall = 1
elif n_fg == 0 and n_gt == 0:
precision = 1
recall = 1
else:
precision = np.sum(fg_match) / float(n_fg)
recall = np.sum(gt_match) / float(n_gt)
# Compute F measure
if precision + recall == 0:
F = 0
else:
F = 2 * precision * recall / (precision + recall)
return F, precision
def seg2bmap(seg, width=None, height=None):
"""
From a segmentation, compute a binary boundary map with 1 pixel wide
boundaries. The boundary pixels are offset by 1/2 pixel towards the
origin from the actual segment boundary.
Arguments:
seg : Segments labeled from 1..k.
width : Width of desired bmap <= seg.shape[1]
height : Height of desired bmap <= seg.shape[0]
Returns:
bmap (ndarray): Binary boundary map.
<NAME> <<EMAIL>>
January 2003
"""
seg = seg.astype(np.bool)
seg[seg > 0] = 1
assert np.atleast_3d(seg).shape[2] == 1
width = seg.shape[1] if width is None else width
height = seg.shape[0] if height is None else height
h, w = seg.shape[:2]
ar1 = float(width) / float(height)
ar2 = float(w) / float(h)
assert not (width > w | height > h | abs(ar1 - ar2) > 0.01),\
'Can''t convert %dx%d seg to %dx%d bmap.' % (w, h, width, height)
e = np.zeros_like(seg)
s = np.zeros_like(seg)
se = np.zeros_like(seg)
e[:, :-1] = seg[:, 1:]
s[:-1, :] = seg[1:, :]
se[:-1, :-1] = seg[1:, 1:]
b = seg ^ e | seg ^ s | seg ^ se
b[-1, :] = seg[-1, :] ^ e[-1, :]
b[:, -1] = seg[:, -1] ^ s[:, -1]
b[-1, -1] = 0
if w == width and h == height:
bmap = b
else:
bmap = np.zeros((height, width))
for x in range(w):
for y in range(h):
if b[y, x]:
j = 1 + floor((y - 1) + height / h)
i = 1 + floor((x - 1) + width / h)
bmap[j, i] = 1
return bmap |
krop/build_rop.py | tszentendrei/henkaku | 534 | 12760371 | #!/usr/bin/env python3
from sys import argv, exit
import tempfile
import os.path
import subprocess
tpl = """
.equ ENC_PAYLOAD_ADDR, {payload_addr}
.equ ENC_PAYLOAD_SIZE, {payload_size}
.equ BASE, {sysmem_base}
.equ SECOND_PAYLOAD, {second_payload}
"""
prefix = "arm-vita-eabi-"
def build(tmp, code):
src_file = os.path.join(tmp, "rop.S")
obj_file = os.path.join(tmp, "rop.o")
bin_file = os.path.join(tmp, "rop.bin")
fout = open(src_file, "wb")
fout.write(code)
fout.close()
subprocess.check_call([prefix + "as", src_file, "-o", obj_file])
subprocess.check_call([prefix + "objcopy", "-O", "binary", obj_file, bin_file])
return bin_file
def analyze(tmp, bin_file):
db_file = os.path.join(tmp, "rop.db")
subprocess.check_call(["python3", "krop/ropfuscator.py", "analyze", bin_file, db_file, "DxT9HVn5"])
return db_file
def obfuscate(tmp, bin_file, db_file):
obf_file = os.path.join(tmp, "rop.obf")
subprocess.check_call(["python3", "krop/ropfuscator.py", "generate", bin_file, obf_file, db_file])
fin = open(obf_file, "rb")
data = fin.read()
fin.close()
return data
def chunk(b, size):
if len(b) % size != 0:
raise RuntimeError("chunk: b % size != 0")
return [b[x * size:(x + 1) * size] for x in range(0, len(b) // size)]
def write_rop_code(krop, relocs, addr_pos, size_shift_pos, size_xor_pos, size_plain_pos, second_payload_pos):
output = ""
output += "from rop import Ret, Load\n"
output += "def krop(rop):\n"
output += " c = rop.caller\n"
output += " d = rop.data\n"
tpl = " c.store({}, d.krop + 0x{:x})\n"
for x, (addr, reloc) in enumerate(zip(krop, relocs)):
addr = int.from_bytes(addr, "little")
if reloc == 0:
s = "0x{:x}".format(addr)
else:
output += " c.add(Load(d.sysmem_base), 0x{:x})\n".format(addr)
s = "Ret"
output += tpl.format(s, x * 4)
output += " c.store(Load(d.kx_loader_addr), d.krop + 0x{:x})\n".format(addr_pos * 4)
# I've hardcoded payload size to be 0x200, deal with it
payload_size = 0x200
output += " c.store(0x{:x}, d.krop + 0x{:x})\n".format((payload_size >> 2) + 0x10, size_shift_pos * 4)
output += " c.store(0x{:x}, d.krop + 0x{:x})\n".format(payload_size ^ 0x40, size_xor_pos * 4)
output += " c.store(0x{:x}, d.krop + 0x{:x})\n".format(payload_size, size_plain_pos * 4)
output += " c.store(d.second_payload, d.krop + 0x{:x})\n".format(second_payload_pos * 4)
return output
def main():
if len(argv) != 3:
print("Usage: build_rop.py rop.S output-directory/")
return -1
fin = open(argv[1], "rb")
code = fin.read()
fin.close()
tags = {
"payload_addr": 0xF0F0F0F0,
"payload_size": 0x0A0A0A00,
"sysmem_base": 0xB0B00000,
"second_payload": 0xC0C0C0C0,
}
with tempfile.TemporaryDirectory() as tmp:
first_bin = build(tmp, tpl.format(payload_addr=0, payload_size=0, sysmem_base=0, second_payload=0).encode("ascii") + code)
db_file = analyze(tmp, first_bin)
first = obfuscate(tmp, first_bin, db_file)
second_bin = build(tmp, tpl.format(**tags).encode("ascii") + code)
second = obfuscate(tmp, second_bin, db_file)
if len(first) != len(second):
print("wtf? got different krop lengths")
return -2
# Find differences in krops, a difference indicates either that this address depends on sysmem base or it's
# payload addr/size
krop = first = chunk(first, 4)
second = chunk(second, 4)
relocs = [0] * len(first)
addr_pos = size_shift_pos = size_xor_pos = size_plain_pos = second_payload_pos = -1
for i, (first_word, second_word) in enumerate(zip(first, second)):
if first_word != second_word:
second = int.from_bytes(second_word, "little")
if second == tags["payload_addr"]:
addr_pos = i
elif second == (tags["payload_size"] >> 2) + 0x10:
size_shift_pos = i
elif second == tags["payload_size"] ^ 0x40:
size_xor_pos = i
elif second == tags["payload_size"]:
size_plain_pos = i
elif second == tags["second_payload"]:
second_payload_pos = i
else:
relocs[i] = 1
if -1 in [addr_pos, size_shift_pos, size_xor_pos, size_plain_pos, second_payload_pos]:
print("unable to resolve positions: addr={}, size_shift={}, size_xor={}, size_plain={}, second_payload={}".format(
addr_pos, size_shift_pos, size_xor_pos, size_plain_pos, second_payload_pos))
return -2
print("Kernel rop size: 0x{:x} bytes".format(len(krop) * 4))
with open(os.path.join(argv[2], "krop.py"), "w") as fout:
fout.write(write_rop_code(krop, relocs, addr_pos, size_shift_pos, size_xor_pos, size_plain_pos, second_payload_pos))
if __name__ == "__main__":
exit(main())
|
FeatureProject/cut_td_idf.py | liruifeng-01/nlp_xiaojiang | 1,379 | 12760393 | # -*- coding: UTF-8 -*-
# !/usr/bin/python
# @time :2019/4/1 10:35
# @author :Mo
# @function :cut sentences
from conf.path_config import chicken_and_gossip_path, td_idf_cut_path, td_idf_cut_pinyin
from utils.text_tools import txtWrite, txtRead, get_syboml, strQ2B
from conf.path_config import projectdir
from gensim import corpora, models
import xpinyin
import pickle
import jieba
def cut_td_idf(sources_path, target_path):
"""
结巴切词,汉语
:param path:
:return:
"""
print("cut_td_idf start! ")
corpus = txtRead(sources_path)
governments = []
for corpus_one in corpus:
corpus_one_clear = corpus_one.replace(' ', '').strip()
ques_q2b = strQ2B(corpus_one_clear.strip())
ques_q2b_syboml = get_syboml(ques_q2b)
governments.append(ques_q2b_syboml.strip())
government_ques = list(map(lambda x: ' '.join(jieba.lcut(x)), governments))
topic_ques_all = []
for topic_ques_one in government_ques:
top_ques_aqlq = topic_ques_one.replace(' ', ' ').replace(' ', ' ').strip() + '\n'
topic_ques_all.append(top_ques_aqlq)
txtWrite(topic_ques_all, target_path)
print("cut_td_idf ok! " + sources_path)
def cut_td_idf_pinyin(sources_path, target_path): #获取拼音
"""
汉语转拼音
:param path:
:return:
"""
pin = xpinyin.Pinyin()
corpus = txtRead(sources_path)
topic_ques_all = []
corpus_count = 0
for corpus_one in corpus:
corpus_count += 1
# time1 = time.time()
corpus_one_clear = corpus_one.replace(' ', '').strip()
ques_q2b = strQ2B(corpus_one_clear.strip())
ques_q2b_syboml = get_syboml(ques_q2b)
ques_q2b_syboml_pinying = pin.get_pinyin(ques_q2b_syboml.replace(' ', '').replace(' ', '').strip(), ' ')
topic_ques_all.append(ques_q2b_syboml_pinying + '\n')
# time2 = time.time()
# print(str(corpus_count) + 'time:' + str(time2 - time1))
txtWrite(topic_ques_all, target_path)
print("cut_td_idf_pinyin ok! " + sources_path)
def init_tfidf_chinese_or_pinyin(sources_path):
"""
构建td_idf
:param path:
:return:
"""
questions = txtRead(sources_path)
corpora_documents = []
for item_text in questions:
item_seg = list(jieba.cut(str(item_text).strip()))
corpora_documents.append(item_seg)
dictionary = corpora.Dictionary(corpora_documents)
corpus = [dictionary.doc2bow(text) for text in corpora_documents]
tfidf_model = models.TfidfModel(corpus)
print("init_tfidf_chinese_or_pinyin ok! " + sources_path)
file = open(sources_path.replace(".csv", "_dictionary_model.pkl"), 'wb')
pickle.dump([dictionary, tfidf_model], file)
if __name__ == '__main__':
# path_text = projectdir + '/Data/chicken_gossip.txt'
# sentences = txtRead(path_text)
# sentences_q = []
# for sentences_one in sentences:
# sentences_one_replace = sentences_one.replace(" ", "").replace("\t", "")
# sentences_one_replace_split = sentences_one_replace.split("|")
# sentence_new = sentences_one_replace_split[0] + "\t" + "".join(sentences_one_replace_split[1:])
# sentences_q.append(sentence_new)
# sentences = txtWrite(sentences_q, projectdir + '/Data/chicken_and_gossip.txt')
cut_td_idf(chicken_and_gossip_path, td_idf_cut_path)
cut_td_idf_pinyin(chicken_and_gossip_path, td_idf_cut_pinyin)
init_tfidf_chinese_or_pinyin(td_idf_cut_path)
init_tfidf_chinese_or_pinyin(td_idf_cut_pinyin)
print("corpus ok!")
|
mocodo/dynamic.py | JeanHenri79/mocodo | 158 | 12760394 | #!/usr/bin/env python
# encoding: utf-8
class Dynamic(str):
"""Wrapper for the strings that need to be dynamically interpreted by the generated Python files."""
pass |
tests/test_repr.py | dolfinus/pexpect | 2,132 | 12760402 | """ Test __str__ methods. """
import pexpect
from . import PexpectTestCase
class TestCaseMisc(PexpectTestCase.PexpectTestCase):
def test_str_spawnu(self):
""" Exercise spawnu.__str__() """
# given,
p = pexpect.spawnu('cat')
# exercise,
value = str(p)
# verify
assert isinstance(value, str)
def test_str_spawn(self):
""" Exercise spawn.__str__() """
# given,
p = pexpect.spawn('cat')
# exercise,
value = str(p)
# verify
assert isinstance(value, str)
def test_str_before_spawn(self):
""" Exercise derived spawn.__str__() """
# given,
child = pexpect.spawn(None, None)
child.read_nonblocking = lambda size, timeout: b''
try:
child.expect('alpha', timeout=0.1)
except pexpect.TIMEOUT as e:
str(e) # Smoketest
else:
assert False, 'TIMEOUT exception expected. No exception raised.'
|
controlcenter/__init__.py | EnriqueSoria/django-controlcenter | 980 | 12760428 | from .dashboards import Dashboard # NOQA
|
workshop_material/029_find_paper2.py | nrupatunga/pyimageconf2018 | 106 | 12760496 | <gh_stars>100-1000
from dlib import *
import numpy as np
import sys
sys.path = ['./superfast/build'] + sys.path
import superfast
# NEW!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
def discard_all_but_largest_blob(img):
labels, num_blobs = label_connected_blobs(img, connected_if_both_not_zero=True)
h = get_histogram(labels, num_blobs)
# ignore background blobs
h[0] = 0
largest_blob = np.argmax(h)
superfast.zero_pixels_not_labeled_with_val(img, labels, largest_blob)
return img
#img = load_grayscale_image(sys.argv[1])
# discarding all but largest blob fixes this image
img = load_grayscale_image('./images/find_page/paper22.jpg')
# What about this image? Need to do something to fix it
#img = load_grayscale_image('./images/find_page/tissue_04.jpg')
ht = hough_transform(300)
img = resize_image(img, ht.size, ht.size)
win1 = image_window(img)
ig = image_gradients(10)
x = ig.gradient_x(img)
y = ig.gradient_y(img)
edges = suppress_non_maximum_edges(x,y)
win3 = image_window(edges)
edges = discard_all_but_largest_blob(hysteresis_threshold(edges)) # NEW!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
win4 = image_window(edges)
himg = ht(edges)
hits = ht.find_strong_hough_points(himg, hough_count_thresh=ht.size/5, angle_nms_thresh=15, radius_nms_thresh=10)
lines = [ht.get_line(p) for p in hits[0:4]]
win1.add_overlay(lines)
page = extract_image_4points(img, lines, 200,200)
win_page = image_window(page)
input("hit enter to exit")
|
runway/cfngin/actions/init.py | onicagroup/runway | 134 | 12760497 | """CFNgin init action."""
from __future__ import annotations
import logging
from typing import TYPE_CHECKING, Any, Optional, Union, cast
from ...compat import cached_property
from ...config.models.cfngin import CfnginStackDefinitionModel
from ...core.providers.aws.s3 import Bucket
from ..exceptions import CfnginBucketAccessDenied
from . import deploy
from .base import BaseAction
if TYPE_CHECKING:
import threading
from ..._logging import RunwayLogger
from ...context import CfnginContext
from ..providers.aws.default import ProviderBuilder
LOGGER = cast("RunwayLogger", logging.getLogger(__name__))
class Action(BaseAction):
"""Initialize environment."""
NAME = "init"
DESCRIPTION = "Initialize environment"
def __init__(
self,
context: CfnginContext,
provider_builder: Optional[ProviderBuilder] = None,
cancel: Optional[threading.Event] = None,
):
"""Instantiate class.
This class creates a copy of the context object prior to initialization
as some of it can perform destructive actions on the context object.
Args:
context: The context for the current run.
provider_builder: An object that will build a provider that will be
interacted with in order to perform the necessary actions.
cancel: Cancel handler.
"""
super().__init__(
context=context.copy(), provider_builder=provider_builder, cancel=cancel
)
@property
def _stack_action(self) -> Any:
"""Run against a step."""
return None
@cached_property
def cfngin_bucket(self) -> Optional[Bucket]:
"""CFNgin bucket.
Raises:
CfnginBucketRequired: cfngin_bucket not defined.
"""
if not self.context.bucket_name:
return None
return Bucket(
self.context,
name=self.context.bucket_name,
region=self.context.bucket_region,
)
@cached_property
def default_cfngin_bucket_stack(self) -> CfnginStackDefinitionModel:
"""CFNgin bucket stack."""
return CfnginStackDefinitionModel(
class_path="runway.cfngin.blueprints.cfngin_bucket.CfnginBucket",
in_progress_behavior="wait",
name="cfngin-bucket",
termination_protection=True,
variables={"BucketName": self.context.bucket_name},
)
def run(
self,
*,
concurrency: int = 0,
dump: Union[bool, str] = False, # pylint: disable=unused-argument
force: bool = False, # pylint: disable=unused-argument
outline: bool = False, # pylint: disable=unused-argument
tail: bool = False,
upload_disabled: bool = True, # pylint: disable=unused-argument
**_kwargs: Any,
) -> None:
"""Run the action.
Args:
concurrency: The maximum number of concurrent deployments.
dump: Not used by this action
force: Not used by this action.
outline: Not used by this action.
tail: Tail the stack's events.
upload_disabled: Not used by this action.
Raises:
CfnginBucketAccessDenied: Could not head cfngin_bucket.
"""
if not self.cfngin_bucket:
LOGGER.info("skipped; cfngin_bucket not defined")
return
if self.cfngin_bucket.forbidden:
raise CfnginBucketAccessDenied(bucket_name=self.cfngin_bucket.name)
if self.cfngin_bucket.exists:
LOGGER.info("cfngin_bucket %s already exists", self.cfngin_bucket.name)
return
if self.context.get_stack("cfngin-bucket"):
LOGGER.verbose(
"found stack for creating cfngin_bucket: cfngin-bucket",
)
self.context.stack_names = ["cfngin-bucket"]
else:
LOGGER.notice("using default blueprint to create cfngin_bucket...")
self.context.config.stacks = [self.default_cfngin_bucket_stack]
# clear cached values that were populated by checking the previous condition
self.context._del_cached_property( # pylint: disable=protected-access
"stacks", "stacks_dict"
)
if self.provider_builder:
self.provider_builder.region = self.context.bucket_region
deploy.Action(
context=self.context,
provider_builder=self.provider_builder,
cancel=self.cancel,
).run(
concurrency=concurrency,
tail=tail,
upload_disabled=True,
)
return
def pre_run(
self,
*,
dump: Union[bool, str] = False, # pylint: disable=unused-argument
outline: bool = False, # pylint: disable=unused-argument
**__kwargs: Any,
) -> None:
"""Do nothing."""
def post_run(
self,
*,
dump: Union[bool, str] = False, # pylint: disable=unused-argument
outline: bool = False, # pylint: disable=unused-argument
**__kwargs: Any,
) -> None:
"""Do nothing."""
|
octavia/image/image_base.py | zhangi/octavia | 129 | 12760534 | <reponame>zhangi/octavia
# Copyright 2020 Red Hat, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
class ImageBase(object, metaclass=abc.ABCMeta):
@abc.abstractmethod
def get_image_id_by_tag(self, image_tag, image_owner=None):
"""Get image ID by image tag and owner.
:param image_tag: image tag
:param image_owner: optional image owner
:raises: ImageGetException if no images found with given tag
:return: image id
"""
|
hwt/serializer/verilog/context.py | ufo2011/hwt | 134 | 12760550 | <reponame>ufo2011/hwt
from hdlConvertorAst.to.verilog.constants import SIGNAL_TYPE
class SignalTypeSwap():
"""
An object which is used as a context manager for signalType
inside of :class:`hwt.serializer.verilog.serializer.ToHdlAstVerilog`
"""
def __init__(self, ctx, signalType: SIGNAL_TYPE):
self.ctx = ctx
self.signalType = signalType
def __enter__(self):
self.orig = self.ctx.signalType
self.ctx.signalType = self.signalType
def __exit__(self, exc_type, exc_val, exc_tb):
self.ctx.signalType = self.orig
|
haiku/_src/batch_norm_test.py | pierricklee/dm-haiku | 1,647 | 12760569 | # Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for haiku._src.batch_norm."""
import os
from absl.testing import absltest
from haiku._src import batch_norm
from haiku._src import test_utils
from haiku._src import transform
import jax
import jax.numpy as jnp
import numpy as np
class BatchNormTest(absltest.TestCase):
@test_utils.transform_and_run
def test_basic(self):
data = jnp.arange(2 * 3 * 4, dtype=jnp.float32).reshape([2, 3, 4])
norm = batch_norm.BatchNorm(True, True, 0.9)
result = norm(data, is_training=True)
result_0_replicated = jnp.broadcast_to(result[:, :, :1], result.shape)
# Input data is symmetrical variance per-channel.
np.testing.assert_allclose(result, result_0_replicated)
# Running through again in test mode produces same output.
np.testing.assert_allclose(norm(data, is_training=False), result, rtol=2e-2)
@test_utils.transform_and_run
def test_simple_training(self):
layer = batch_norm.BatchNorm(
create_scale=False, create_offset=False, decay_rate=0.9)
inputs = np.ones([2, 3, 3, 5])
scale = np.full((5,), 0.5)
offset = np.full((5,), 2.0)
result = layer(inputs, True, scale=scale, offset=offset)
np.testing.assert_equal(result, np.full(inputs.shape, 2.0))
@test_utils.transform_and_run
def test_simple_training_nchw(self):
layer = batch_norm.BatchNorm(
create_scale=False,
create_offset=False,
decay_rate=0.9,
data_format="NCHW")
inputs = np.ones([2, 5, 3, 3])
scale = np.full((5, 1, 1), 0.5)
offset = np.full((5, 1, 1), 2.0)
result = layer(inputs, True, scale=scale, offset=offset)
np.testing.assert_equal(result, np.full(inputs.shape, 2.0))
@test_utils.transform_and_run
def test_simple_training_normalized_axes(self):
layer = batch_norm.BatchNorm(
create_scale=False,
create_offset=False,
decay_rate=0.9,
axis=[0, 2, 3]) # Not the second axis.
# This differs only in the second axis.
inputs = np.stack([2.0 * np.ones([5, 3, 3]), np.ones([5, 3, 3])], 1)
result = layer(inputs, True)
# Despite not all values being identical, treating slices from the first
# axis separately leads to a fully normalized = equal array.
np.testing.assert_equal(result, np.zeros(inputs.shape))
def test_simple_training_cross_replica_axis(self):
ldc = jax.local_device_count()
def f(x, is_training=True):
return batch_norm.BatchNorm(
create_scale=False,
create_offset=False,
decay_rate=0.9,
cross_replica_axis="i",
)(x, is_training=is_training)
f = transform.transform_with_state(f)
inputs = np.arange(ldc * 4).reshape(ldc, 4)
key = np.broadcast_to(jax.random.PRNGKey(42), (ldc, 2))
params, state = jax.pmap(f.init, axis_name="i")(key, inputs)
result, _ = jax.pmap(f.apply, axis_name="i")(params, state, key, inputs)
mean = np.mean(inputs, axis=0)
std = np.std(inputs, axis=0) + 1e-10
expected = (inputs - mean) / std
np.testing.assert_array_almost_equal(result, expected)
def test_simple_training_cross_replica_axis_index_groups(self):
ldc = jax.local_device_count()
if ldc < 2:
self.skipTest("Cross-replica test requires at least 2 devices.")
num_groups = ldc // 2
num_group_devices = ldc // num_groups
# for 8 devices this produces [[0, 1], [2, 3], [4, 5], [6, 7]] groups.
groups = np.arange(ldc).reshape(num_groups, num_group_devices).tolist()
def f(x, is_training=True):
return batch_norm.BatchNorm(
create_scale=False,
create_offset=False,
decay_rate=0.9,
cross_replica_axis="i",
cross_replica_axis_index_groups=groups,
)(x, is_training=is_training)
f = transform.transform_with_state(f)
inputs = np.arange(ldc * 4).reshape(ldc, 4).astype(np.float32)
key = np.broadcast_to(jax.random.PRNGKey(42), (ldc, 2))
params, state = jax.pmap(f.init, axis_name="i")(key, inputs)
result, _ = jax.pmap(f.apply, axis_name="i")(params, state, key, inputs)
expected = np.empty_like(inputs)
for g in range(num_groups):
group_inputs = inputs[num_group_devices*g:num_group_devices*(g + 1)]
group_mean = np.mean(group_inputs, axis=0)
group_std = np.std(group_inputs, axis=0) + 1e-10
group_inputs = (group_inputs - group_mean) / group_std
expected[num_group_devices*g:num_group_devices*(g + 1)] = group_inputs
np.testing.assert_array_almost_equal(result, expected)
@test_utils.transform_and_run
def test_no_scale_and_offset(self):
layer = batch_norm.BatchNorm(
create_scale=False, create_offset=False, decay_rate=0.9)
inputs = jnp.ones([2, 5, 3, 3, 3])
result = layer(inputs, True)
np.testing.assert_equal(result, np.zeros_like(inputs))
@test_utils.transform_and_run
def test_no_scale_and_init_provided(self):
with self.assertRaisesRegex(
ValueError, "Cannot set `scale_init` if `create_scale=False`"):
batch_norm.BatchNorm(
create_scale=False,
create_offset=True,
decay_rate=0.9,
scale_init=jnp.ones)
@test_utils.transform_and_run
def test_no_offset_beta_init_provided(self):
with self.assertRaisesRegex(
ValueError, "Cannot set `offset_init` if `create_offset=False`"):
batch_norm.BatchNorm(
create_scale=True,
create_offset=False,
decay_rate=0.9,
offset_init=jnp.zeros)
def test_eps_cast_to_var_dtype(self):
# See https://github.com/google/jax/issues/4718 for more info. In the
# context of this test we need to assert NumPy bf16 params/state and a
# Python float for eps preserve bf16 output.
def f(x, is_training):
return batch_norm.BatchNorm(True, True, 0.9, eps=0.1)(x, is_training)
f = transform.transform_with_state(f)
x = np.ones([], jnp.bfloat16)
key = jax.random.PRNGKey(42)
params, state = jax.device_get(f.init(key, x, True))
y, _ = f.apply(params, state, None, x, False)
self.assertEqual(y.dtype, jnp.bfloat16)
if __name__ == "__main__":
_xla_flags = os.environ.get("XLA_FLAGS", "")
os.environ["XLA_FLAGS"] = (_xla_flags +
" --xla_force_host_platform_device_count=8")
absltest.main()
os.environ["XLA_FLAGS"] = _xla_flags
|
test/hlt/pytest/python/com/huawei/iotplatform/client/dto/NorthInDTO.py | yuanyi-thu/AIOT- | 128 | 12760588 | class NorthInDTO(object):
def __init__(self):
self.platformIp = None
self.platformPort = None
def getPlatformIp(self):
return self.platformIp
def setPlatformIp(self, platformIp):
self.platformIp = platformIp
def getPlatformPort(self):
return self.platformPort
def setPlatformPort(self, platformPort):
self.platformPort = platformPort
|
app/api/__init__.py | TestAuto2018/TestAuto | 249 | 12760615 | <filename>app/api/__init__.py
# -*- coding: utf-8 -*-
__author__ = "苦叶子"
"""
公众号: 开源优测
Email: <EMAIL>
"""
from flask import Blueprint
from flask_restful import Api
api_bp = Blueprint('api', __name__)
api = Api(api_bp)
from .auth import Auth
api.add_resource(Auth, "/auth/")
from .product import Product
api.add_resource(Product, "/product/")
from .project import Project
api.add_resource(Project, "/project/")
from .suite import Suite
api.add_resource(Suite, "/suite/")
from .object import Object
api.add_resource(Object, "/object/")
from .case import Case
api.add_resource(Case, "/case/")
from .step import Step
api.add_resource(Step, "/step/")
from .var import Var
api.add_resource(Var, "/var/")
from .keyword import Keyword
api.add_resource(Keyword, "/keyword/")
from .help import Help
api.add_resource(Help, "/help/")
from .task import Task
api.add_resource(Task, "/task/")
from .trigger import Triggers
api.add_resource(Triggers, "/trigger/")
from .stats import Stats
api.add_resource(Stats, "/stats/")
from .user import Users
api.add_resource(Users, "/user/")
from .role import Roles
api.add_resource(Roles, "/role/")
from .user_keyword import UserKeywordSuite, UserKeyword
api.add_resource(UserKeywordSuite, "/user_keyword_suite/")
api.add_resource(UserKeyword, "/user_keyword/")
|
mrgeo-services/mrgeo-services-core/src/main/scripts/wps-shell-example.py | ngageoint/mrgeo | 198 | 12760623 | #!/usr/bin/python
import sys
import time
# read all the input values into a dictionary for easy access.
args = {}
for v in sys.argv[1:]:
s = v.split("=")
args[s[0]] = s[1]
# put an artificial pause to simulate an expensive operation
for i in range(1, 10):
print "progress:" + str(i * 10)
time.sleep(1)
# write out the results.
print "summary:" + str(args)
print "output:" + args['output']
|
ch12-ann/classification_example.py | GaoX2015/intro_ds | 314 | 12760633 | # -*- coding: UTF-8 -*-
"""
此脚本用于展示如何利用神经网络解决分类问题
"""
import os
from mlp import ANN
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import make_blobs, make_circles, make_moons
from sklearn.linear_model import LogisticRegression
from sklearn.preprocessing import StandardScaler, OneHotEncoder
def generateData(n):
"""
"""
np.random.seed(12046)
blobs = make_blobs(n_samples=n, centers = [[-2, -2], [2, 2]])
circles = make_circles(n_samples=n, factor=.4, noise=.05)
moons = make_moons(n_samples=n, noise=.05)
blocks = np.random.rand(n, 2) - 0.5
y = (blocks[:, 0] * blocks[:, 1] < 0) + 0
blocks = (blocks, y)
# 由于神经网络对数据的线性变换不稳定,因此将数据做归一化处理
scaler = StandardScaler()
blobs = (scaler.fit_transform(blobs[0]), blobs[1])
circles = (scaler.fit_transform(circles[0]), circles[1])
moons = (scaler.fit_transform(moons[0]), moons[1])
blocks = (scaler.fit_transform(blocks[0]), blocks[1])
return blobs, circles, moons, blocks
def drawData(ax, data):
"""
将数据可视化
"""
X, y = data
label1 = X[y>0]
ax.scatter(label1[:, 0], label1[:, 1], marker="o")
label0 = X[y==0]
ax.scatter(label0[:, 0], label0[:, 1], marker="^", color="k")
return ax
def drawModel(ax, model):
"""
将模型的分离超平面可视化
"""
x1 = np.linspace(ax.get_xlim()[0], ax.get_xlim()[1], 100)
x2 = np.linspace(ax.get_ylim()[0], ax.get_ylim()[1], 100)
X1, X2 = np.meshgrid(x1, x2)
Y = model.predict_proba(np.c_[X1.ravel(), X2.ravel()])[:, 1]
Y = Y.reshape(X1.shape)
ax.contourf(X1, X2, Y, levels=[0, 0.5], colors=["gray"], alpha=0.4)
return ax
def trainLogit(data):
"""
"""
X, y = data
model = LogisticRegression()
model.fit(X, y)
return model
def trainANN(data, logPath):
"""
"""
X, y = data
enc = OneHotEncoder()
y = enc.fit_transform(y.reshape(-1, 1)).toarray()
model = ANN([4, 4, 2], logPath)
model.fit(X, y)
return model
def visualize(data):
"""
"""
# 创建一个图形框
fig = plt.figure(figsize=(10, 10), dpi=80)
fig1 = plt.figure(figsize=(10, 10), dpi=80)
# 在图形框里画四幅图
for i in range(len(data)):
ax = fig.add_subplot(2, 2, i+1)
ax1 = fig1.add_subplot(2, 2, i+1)
drawData(ax, data[i])
# Windows下的存储路径与Linux并不相同
if os.name == "nt":
drawModel(ax, trainANN(data[i], "logs\\data_%s" % (i+1)))
else:
drawModel(ax, trainANN(data[i], "logs/data_%s" % (i+1)))
drawData(ax1, data[i])
drawModel(ax1, trainLogit(data[i]))
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
ax1.get_xaxis().set_visible(False)
ax1.get_yaxis().set_visible(False)
plt.show()
if __name__ == "__main__":
data = generateData(200)
visualize(data)
|
lib/pystatsml/plot_utils.py | gautard/pystatsml | 123 | 12760674 | # -*- coding: utf-8 -*-
"""
Created on Mon Aug 29 10:58:31 2016
@author: <EMAIL>
"""
import numpy as np
import scipy
import matplotlib.pyplot as plt
import seaborn as sns
from matplotlib.patches import Ellipse
def plot_cov_ellipse(cov, pos, nstd=2, ax=None, **kwargs):
"""
Plots an `nstd` sigma error ellipse based on the specified covariance
matrix (`cov`). Additional keyword arguments are passed on to the
ellipse patch artist.
Parameters
----------
cov : The 2x2 covariance matrix to base the ellipse on
pos : The location of the center of the ellipse. Expects a 2-element
sequence of [x0, y0].
nstd : The radius of the ellipse in numbers of standard deviations.
Defaults to 2 standard deviations.
ax : The axis that the ellipse will be plotted on. Defaults to the
current axis.
Additional keyword arguments are pass on to the ellipse patch.
Returns
-------
A matplotlib ellipse artist
"""
def eigsorted(cov):
vals, vecs = np.linalg.eigh(cov)
order = vals.argsort()[::-1]
return vals[order], vecs[:,order]
if ax is None:
ax = plt.gca()
vals, vecs = eigsorted(cov)
theta = np.degrees(np.arctan2(*vecs[:,0][::-1]))
# Width and height are "full" widths, not radius
width, height = 2 * nstd * np.sqrt(vals)
ellip = Ellipse(xy=pos, width=width, height=height, angle=theta, **kwargs)
ax.add_artist(ellip)
return ellip
|
ubpf/asm_parser.py | dongxingshui/ubpf | 466 | 12760688 | #!/usr/bin/env python
from __future__ import print_function
from parcon import *
from collections import namedtuple
hexchars = '0123456789abcdefABCDEF'
Reg = namedtuple("Reg", ["num"])
Imm = namedtuple("Imm", ["value"])
MemRef = namedtuple("MemRef", ["reg", "offset"])
def keywords(vs):
return First(*[Keyword(SignificantLiteral(v)) for v in vs])
hexnum = SignificantLiteral('0x') + +CharIn(hexchars)
decnum = +Digit()
offset = (CharIn("+-") + Exact(hexnum | decnum))[flatten]["".join][lambda x: int(x, 0)]
imm = (-CharIn("+-") + Exact(hexnum | decnum))[flatten]["".join][lambda x: int(x, 0)][Imm]
reg = Literal('r') + integer[int][Reg]
memref = (Literal('[') + reg + Optional(offset, 0) + Literal(']'))[lambda x: MemRef(*x)]
unary_alu_ops = ['neg', 'neg32', 'le16', 'le32', 'le64', 'be16', 'be32', 'be64']
binary_alu_ops = ['add', 'sub', 'mul', 'div', 'or', 'and', 'lsh', 'rsh',
'mod', 'xor', 'mov', 'arsh']
binary_alu_ops.extend([x + '32' for x in binary_alu_ops])
alu_instruction = \
(keywords(unary_alu_ops) + reg) | \
(keywords(binary_alu_ops) + reg + "," + (reg | imm))
mem_sizes = ['w', 'h', 'b', 'dw']
mem_store_reg_ops = ['stx' + s for s in mem_sizes]
mem_store_imm_ops = ['st' + s for s in mem_sizes]
mem_load_ops = ['ldx' + s for s in mem_sizes]
mem_instruction = \
(keywords(mem_store_reg_ops) + memref + "," + reg) | \
(keywords(mem_store_imm_ops) + memref + "," + imm) | \
(keywords(mem_load_ops) + reg + "," + memref) | \
(keywords(["lddw"]) + reg + "," + imm)
jmp_cmp_ops = ['jeq', 'jgt', 'jge', 'jlt', 'jle', 'jset', 'jne', 'jsgt', 'jsge', 'jslt', 'jsle']
jmp_instruction = \
(keywords(jmp_cmp_ops) + reg + "," + (reg | imm) + "," + offset) | \
(keywords(['ja']) + offset) | \
(keywords(['call']) + imm) | \
(keywords(['exit'])[lambda x: (x, )])
instruction = alu_instruction | mem_instruction | jmp_instruction
start = ZeroOrMore(instruction + Optional(Literal(';'))) + End()
def parse(source):
return start.parse_string(source)
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description="Assembly parser", formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('file', type=argparse.FileType('r'), default='-')
args = parser.parse_args()
result = parse(args.file.read())
for inst in result:
print(repr(inst))
|
acme/agents/jax/bc/agent_test.py | ostap-viniavskyi/acme | 2,650 | 12760689 | # Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for the BC agent."""
from absl.testing import absltest
from absl.testing import parameterized
from acme import specs
from acme import types
from acme.agents.jax import bc
from acme.jax import networks as networks_lib
from acme.jax import utils
from acme.testing import fakes
import chex
import haiku as hk
import jax
import jax.numpy as jnp
from jax.scipy import special
import numpy as np
import optax
def make_networks(
spec: specs.EnvironmentSpec,
discrete_actions: bool = False) -> networks_lib.FeedForwardNetwork:
"""Creates networks used by the agent."""
if discrete_actions:
final_layer_size = spec.actions.num_values
else:
final_layer_size = np.prod(spec.actions.shape, dtype=int)
def _actor_fn(obs, is_training=False, key=None):
# is_training and key allows to defined train/test dependant modules
# like dropout.
del is_training
del key
if discrete_actions:
network = hk.nets.MLP([64, 64, final_layer_size])
else:
network = hk.Sequential([
networks_lib.LayerNormMLP([64, 64], activate_final=True),
networks_lib.NormalTanhDistribution(final_layer_size),
])
return network(obs)
policy = hk.without_apply_rng(hk.transform(_actor_fn))
# Create dummy observations and actions to create network parameters.
dummy_obs = utils.zeros_like(spec.observations)
dummy_obs = utils.add_batch_dim(dummy_obs)
network = networks_lib.FeedForwardNetwork(
lambda key: policy.init(key, dummy_obs), policy.apply)
return network
class BCTest(parameterized.TestCase):
@parameterized.parameters(
('logp',),
('mse',),
('peerbc',)
)
def test_continuous_actions(self, loss_name):
with chex.fake_pmap_and_jit():
num_sgd_steps_per_step = 1
num_steps = 5
# Create a fake environment to test with.
environment = fakes.ContinuousEnvironment(
episode_length=10, bounded=True, action_dim=6)
spec = specs.make_environment_spec(environment)
dataset_demonstration = fakes.transition_dataset(environment)
dataset_demonstration = dataset_demonstration.map(
lambda sample: types.Transition(*sample.data))
dataset_demonstration = dataset_demonstration.batch(8).as_numpy_iterator()
# Construct the agent.
network = make_networks(spec)
if loss_name == 'logp':
loss_fn = bc.logp(
logp_fn=lambda dist_params, actions: dist_params.log_prob(actions))
elif loss_name == 'mse':
loss_fn = bc.mse(
sample_fn=lambda dist_params, key: dist_params.sample(seed=key))
elif loss_name == 'peerbc':
base_loss_fn = bc.logp(
logp_fn=lambda dist_params, actions: dist_params.log_prob(actions))
loss_fn = bc.peerbc(base_loss_fn, zeta=0.1)
else:
raise ValueError
learner = bc.BCLearner(
network=network,
random_key=jax.random.PRNGKey(0),
loss_fn=loss_fn,
optimizer=optax.adam(0.01),
demonstrations=dataset_demonstration,
num_sgd_steps_per_step=num_sgd_steps_per_step)
# Train the agent
for _ in range(num_steps):
learner.step()
@parameterized.parameters(
('logp',),
('rcal',))
def test_discrete_actions(self, loss_name):
with chex.fake_pmap_and_jit():
num_sgd_steps_per_step = 1
num_steps = 5
# Create a fake environment to test with.
environment = fakes.DiscreteEnvironment(
num_actions=10, num_observations=100, obs_shape=(10,),
obs_dtype=np.float32)
spec = specs.make_environment_spec(environment)
dataset_demonstration = fakes.transition_dataset(environment)
dataset_demonstration = dataset_demonstration.map(
lambda sample: types.Transition(*sample.data))
dataset_demonstration = dataset_demonstration.batch(8).as_numpy_iterator()
# Construct the agent.
network = make_networks(spec, discrete_actions=True)
def logp_fn(logits, actions):
max_logits = jnp.max(logits, axis=-1, keepdims=True)
logits = logits - max_logits
logits_actions = jnp.sum(
jax.nn.one_hot(actions, spec.actions.num_values) * logits, axis=-1)
log_prob = logits_actions - special.logsumexp(logits, axis=-1)
return log_prob
if loss_name == 'logp':
loss_fn = bc.logp(logp_fn=logp_fn)
elif loss_name == 'rcal':
base_loss_fn = bc.logp(logp_fn=logp_fn)
loss_fn = bc.rcal(base_loss_fn, discount=0.99, alpha=0.1)
else:
raise ValueError
learner = bc.BCLearner(
network=network,
random_key=jax.random.PRNGKey(0),
loss_fn=loss_fn,
optimizer=optax.adam(0.01),
demonstrations=dataset_demonstration,
num_sgd_steps_per_step=num_sgd_steps_per_step)
# Train the agent
for _ in range(num_steps):
learner.step()
if __name__ == '__main__':
absltest.main()
|
woid/apps/services/migrations/0006_auto_20150902_1522.py | emognato/project | 229 | 12760694 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('services', '0005_auto_20150901_1253'),
]
operations = [
migrations.AlterField(
model_name='story',
name='content_type',
field=models.CharField(blank=True, max_length=1, null=True, choices=[(b'T', b'text'), (b'U', b'url'), (b'I', b'image')]),
),
migrations.AlterField(
model_name='story',
name='date',
field=models.DateField(auto_now_add=True),
),
]
|
tools/dia_dll.py | xumoyan/engine | 5,823 | 12760698 | <reponame>xumoyan/engine<filename>tools/dia_dll.py
#!/usr/bin/env python3
#
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
This script is based on chromium/chromium/main/tools/clang/scripts/update.py.
It is used on Windows platforms to copy the correct msdia*.dll to the
clang folder, as a "gclient hook".
"""
import os
import shutil
import stat
import sys
# Path constants. (All of these should be absolute paths.)
THIS_DIR = os.path.abspath(os.path.dirname(__file__))
LLVM_BUILD_DIR = os.path.abspath(os.path.join(THIS_DIR, '..', '..', 'buildtools',
'windows-x64', 'clang'))
def GetDiaDll():
"""Get the location of msdia*.dll for the platform."""
# Bump after VC updates.
DIA_DLL = {
'2013': 'msdia120.dll',
'2015': 'msdia140.dll',
'2017': 'msdia140.dll',
'2019': 'msdia140.dll',
}
# Don't let vs_toolchain overwrite our environment.
environ_bak = os.environ
sys.path.append(os.path.join(THIS_DIR, '..', '..', 'build'))
import vs_toolchain
win_sdk_dir = vs_toolchain.SetEnvironmentAndGetSDKDir()
msvs_version = vs_toolchain.GetVisualStudioVersion()
if bool(int(os.environ.get('DEPOT_TOOLS_WIN_TOOLCHAIN', '1'))):
dia_path = os.path.join(win_sdk_dir, '..', 'DIA SDK', 'bin', 'amd64')
else:
if 'GYP_MSVS_OVERRIDE_PATH' in os.environ:
vs_path = os.environ['GYP_MSVS_OVERRIDE_PATH']
else:
vs_path = vs_toolchain.DetectVisualStudioPath()
dia_path = os.path.join(vs_path, 'DIA SDK', 'bin', 'amd64')
os.environ = environ_bak
return os.path.join(dia_path, DIA_DLL[msvs_version])
def CopyFile(src, dst):
"""Copy a file from src to dst."""
print("Copying %s to %s" % (str(src), str(dst)))
shutil.copy(src, dst)
def CopyDiaDllTo(target_dir):
# This script always wants to use the 64-bit msdia*.dll.
dia_dll = GetDiaDll()
CopyFile(dia_dll, target_dir)
def main():
CopyDiaDllTo(os.path.join(LLVM_BUILD_DIR, 'bin'))
return 0
if __name__ == '__main__':
sys.exit(main())
|
src/visitpy/examples/matexprs.py | visit-dav/vis | 226 | 12760709 | ###############################################################################
#
# Purpose: Use VisIt CLI to iterate over Curves in a material database and
# compute and plot some common difference curves and output the results
# to either a curve or image file format.
#
# Programmer: <NAME>
# Date: Wed May 27 13:15:07 PDT 2009
#
#
# Modifications:
# <NAME>, Mon Jun 15 17:52:15 PDT 2009
# Removed subclassing used to override behavior of Optparse in presence of
# unrecognized options. By using Argv(), VisIt-specific options never wind
# up getting passed to this script.
###############################################################################
import sys, re, os, glob
from optparse import *
#
# Convert '#FFCC13" strings to color tuple
#
def ColorTupleFromHexString(s):
if s[0] != '#':
return (0, 0, 0, 255)
return (int("0x%s"%s[1:3],16), \
int("0x%s"%s[3:5],16), \
int("0x%s"%s[5:7],16), \
255)
#
# Command-line options
#
def BuildCommandLineOptions():
parser = OptionParser()
parser.add_option("--image-width",
help="Set width of images [%default].",
type="int", dest="image_width", default="500", metavar="INT")
parser.add_option("--image-height",
help="Set height of images [%default].",
type="int", dest="image_height", default="500", metavar="INT")
parser.add_option("--data-min",
type="float", dest="data_min", metavar="FLOAT",
help="Mininum data value to be applied to all plots. If no "
"value is specified, the minimum will be allowed to vary "
"as needed from plot to plot.")
parser.add_option("--data-max",
type="float", dest="data_max", metavar="FLOAT",
help="Mininum data value to be applied to all plots. If no "
"value is specified, the minimum will be allowed to vary "
"as needed from plot to plot.")
parser.add_option("--log-data",
help="Display data (y) axis in log scaling.",
action="store_true", dest="log_data", default=False)
parser.add_option("--x-min",
type="float", dest="x_min", metavar="FLOAT",
help="Mininum positional (x) value to be applied to all plots. If no "
"value is specified, the minimum will be allowed to vary "
"as needed from plot to plot.")
parser.add_option("--x-max",
type="float", dest="x_max", metavar="FLOAT",
help="Maximum positional (x) value to be applied to all plots. If no "
"value is specified, the minimum will be allowed to vary "
"as needed from plot to plot.")
parser.add_option("--log-x",
help="Display positional (x) axis in log scaling.",
action="store_true", dest="log_x", default=False)
parser.add_option("--image-format",
help="Set output format for images (e.g. 'tiff', 'png', 'jpeg'). "
"If none specified, no images will be saved.",
dest="image_format", metavar="STRING")
parser.add_option("--curve-format",
help="Set output format for curves (e.g. 'ultra', 'curve'). "
"If none specified, no curve files will be saved.",
dest="curve_format", metavar="STRING")
parser.add_option("--color0",
help="Set color to be used for first curve plot.",
dest="color0", metavar="#RRGGBB")
parser.add_option("--color1",
help="Set color to be used for second curve plot.",
dest="color1", metavar="#RRGGBB")
parser.add_option("--line-width",
help="Set line width for curves.",
type="int", default=0, dest="line_width", metavar="INT")
parser.add_option("--point-density",
help="Plot symbols representing individual points in curves every Nth point. "
"A value of zero turns the display of points off [%default].",
type="int", default=0, dest="point_density", metavar="N")
parser.add_option("--point-size",
help="Size of symbols representing individual points in curve plots.",
type="int", default=5, dest="point_size", metavar="INT")
parser.add_option("--show-legend",
help="Display curve plot legends.",
action="store_true", dest="show_legend", default=False)
parser.add_option("--show-labels",
help="Display curve plot labels.",
action="store_true", dest="show_labels", default=False)
parser.set_usage("matexprs.py [options] dbname")
return parser
#
# Iterate through curves, finding all unique 'dirs' containing curves.
#
def GetVarMap(metadata):
dirMap = {}
for i in range(metadata.GetNumCurves()):
dirinfo = re.search("(.*)/([^/]*)", metadata.GetCurves(i).name)
if dirinfo != None:
dirname = dirinfo.group(1)
varname = dirinfo.group(2)
varMap = {}
if dirname in dirMap:
varMap = dirMap[dirname]
varMap[varname] = 1
dirMap[dirname] = varMap
return dirMap
#
# Begin main program
#
parser = BuildCommandLineOptions()
#
# This bit of logic allows users to get usage/help from
# the command 'python matexpers.py --help'. Without it
# using VisIt's cli the '--help' will get interpreted
# in internallauncher and never make it into this script.
#
if "-h" in sys.argv or \
"--help" in sys.argv or \
"-help" in sys.argv or \
"help" in sys.argv:
parser.print_help()
sys.exit(1)
#
# Argv() is a function defined by VisIt's cli that
# returns ONLY the options after the argument (filename)
# to the '-s' command-line option. In theory, that
# should be only the arguments that this script itself
# should interpret.
#
(clOpts, clArgs) = parser.parse_args(list(Argv()))
#
# Set the name of the database. It is the only 'positional'
# argument on the command line.
#
dbname = ""
if len(clArgs) > 0:
dbname = clArgs[0]
if not glob.glob(dbname):
if dbname == "":
sys.stderr.write("No database specified.\n")
else:
sys.stderr.write("Invalid database, \"%s\", specified.\n"%dbname)
parser.print_usage()
sys.exit(1)
#
# Open the database, get metadata, get info on curve 'dirs'
#
OpenDatabase(dbname)
metadata = GetMetaData(dbname)
dirMap = GetVarMap(metadata)
#
# Build up base save window attributes
#
swa = SaveWindowAttributes()
swa.family = 0
swa.width = clOpts.image_width
swa.height = clOpts.image_height
#
# Build up base curve attributes
#
ca = CurveAttributes()
ca.lineWidth = clOpts.line_width
if clOpts.color0 != None:
ca.color = ColorTupleFromHexString(clOpts.color0)
ca.cycleColors = 0
ca.showLabels = clOpts.show_labels
#if clOpts.point_density > 0:
# ca.showPoints = 1
#ca.pointSize = clOpts.point_size
ca.showLegend = clOpts.show_legend
#ca.symbolDensity = clOpts.point_density
SetDefaultPlotOptions(ca)
#
# Iterate through all curve 'dirs', finding instances where
# all essential variables exist. Create expressions and plot 'em
#
for k in list(dirMap.keys()):
if not ("Ec" in dirMap[k] and \
"cEc" in dirMap[k] and \
"cEc_fit" in dirMap[k]):
print("Ignoring %s because not all required vars are present."%k)
#del dirMap[k]
continue
DefineCurveExpression("%s/c0"%k, "<%s/Ec>-<%s/cEc_fit>"%(k,k))
DefineCurveExpression("%s/c1"%k, "<%s/cEc>-<%s/cEc_fit>"%(k,k))
AddPlot("Curve","%s/c0"%k)
AddPlot("Curve","%s/c1"%k)
DrawPlots()
v = GetViewCurve()
if clOpts.x_min != None:
v.domainCoords = (clOpts.x_min, v.domainCoords[1])
if clOpts.x_max != None:
v.domainCoords = (v.domainCoords[0], clOpts.x_max)
if clOpts.log_x:
v.domainScale = v.LOG
if clOpts.data_min != None:
v.rangeCoords = (clOpts.data_min, v.rangeCoords[1])
if clOpts.data_max != None:
v.rangeCoords = (v.rangeCoords[0], clOpts.data_max)
if clOpts.log_data:
v.rangeScale = v.LOG
SetViewCurve(v)
if clOpts.color1 != None:
ca2 = CurveAttributes()
ca2.color = ColorTupleFromHexString(clOpts.color1)
ca2.cycleColors = 0
SetActivePlots((1,))
SetPlotOptions(ca2)
DrawPlots()
if clOpts.curve_format != None:
swa.format = getattr(swa,clOpts.curve_format.upper())
swa.fileName = k # .curve is added automatically
SetSaveWindowAttributes(swa)
SaveWindow()
if clOpts.image_format != None:
swa.format = getattr(swa,clOpts.image_format.upper())
#swa.fileName = "%s.%s"%(k,clOpts.image_format.lower())
swa.fileName = k
SetSaveWindowAttributes(swa)
SaveWindow()
DeleteAllPlots()
|
tests/test_version.py | grdorin/mopidy | 6,700 | 12760721 | import unittest
from distutils.version import StrictVersion
from mopidy import __version__
class VersionTest(unittest.TestCase):
def test_current_version_is_parsable_as_a_strict_version_number(self):
StrictVersion(__version__)
|
scripts/JustFaceNet.py | nfsergiu/PyOpenPose | 300 | 12760723 | <filename>scripts/JustFaceNet.py
"""
Example script using only the Face detector of Openpose.
"""
import PyOpenPose as OP
import time
import cv2
import numpy as np
import os
OPENPOSE_ROOT = os.environ["OPENPOSE_ROOT"]
def ComputeBB(face, padding=0.4):
minX = np.min(face[:, 0])
minY = np.min(face[:, 1])
maxX = np.max(face[:, 0])
maxY = np.max(face[:, 1])
width = maxX - minX
height = maxY - minY
padX = width * padding / 2
padY = height * padding / 2
minX -= padX
minY -= padY
width += 2 * padX
height += 2 * padY
score = np.mean(face[:, 2])
return score, [int(minX), int(minY), int(width), int(height)]
def run():
cap = cv2.VideoCapture(0)
ret, frame = cap.read()
imgSize = list(frame.shape)
outSize = imgSize[1::-1]
print("Net output size: ", outSize)
download_heatmaps = False
with_hands = False
with_face = True
op = OP.OpenPose((656, 368), (240, 240), tuple(outSize), "COCO", OPENPOSE_ROOT + os.sep + "models" + os.sep, 0,
download_heatmaps, OP.OpenPose.ScaleMode.ZeroToOne, with_face, with_hands)
actual_fps = 0
paused = False
delay = {True: 0, False: 1}
newFaceBB = initFaceBB = faceBB = [240, 120, 150, 150]
print("Entering main Loop. Put your hand into the box to start tracking")
while True:
start_time = time.time()
try:
ret, frame = cap.read()
rgb = frame[:, :outSize[0]]
except Exception as e:
print("Failed to grab", e)
break
t = time.time()
op.detectFace(rgb, np.array(faceBB, dtype=np.int32).reshape((1, 4)))
t = time.time() - t
op_fps = 1.0 / t
res = op.render(rgb)
cv2.putText(res, 'UI FPS = %f, OP-FACE FPS = %f. Press \'r\' to reset.' % (actual_fps, op_fps), (20, 20), 0, 0.5,
(0, 0, 255))
cv2.rectangle(res, (faceBB[0], faceBB[1]), (faceBB[0] + faceBB[2], faceBB[1] + faceBB[3]), [50, 155, 50], 2)
cv2.rectangle(res, (newFaceBB[0], newFaceBB[1]), (newFaceBB[0] + newFaceBB[2], newFaceBB[1] + newFaceBB[3]),
[250, 55, 50], 1)
cv2.imshow("OpenPose result", res)
face = op.getKeypoints(op.KeypointType.FACE)[0].reshape(-1, 3)
score, newFaceBB = ComputeBB(face)
print("Res Score, faceBB: ", score, newFaceBB)
if score > 0.5: # update BB only when score is good.
faceBB = newFaceBB
key = cv2.waitKey(delay[paused])
if key & 255 == ord('p'):
paused = not paused
if key & 255 == ord('q'):
break
if key & 255 == ord('r'):
faceBB = initFaceBB
actual_fps = 1.0 / (time.time() - start_time)
if __name__ == '__main__':
run()
|
model-optimizer/unit_tests/extensions/front/kaldi/tdnn_component_replacer_test.py | monroid/openvino | 2,406 | 12760762 | <filename>model-optimizer/unit_tests/extensions/front/kaldi/tdnn_component_replacer_test.py
# Copyright (C) 2018-2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import unittest
import numpy as np
from generator import generator, generate
from extensions.front.kaldi.tdnn_component_replacer import TdnnComponentReplacer
from mo.utils.ir_engine.compare_graphs import compare_graphs
from unit_tests.utils.graph import build_graph, regular_op, result, connect_front, const
@generator
class TdnnComponentReplacerTest(unittest.TestCase):
@generate(*[
([[1, 1, 1], [4, 4, 4]], [1, 2], [-1, 1],),
([[1, 1, 1], [4, 4, 4]], [1, 2], [-1, 1, 2, 10, 1000],),
([[1, 1, 1], [4, 4, 4]], [1, 2], [-1, 0]),
])
def test_tdnnreplacer(self, weights, biases, time_offsets):
def generate_offsets():
offset_edges = []
offset_nodes = {}
for i, t in enumerate(time_offsets):
offset_nodes.update(**regular_op('memoryoffset_' + str(i), {'type': None}))
if t != 0:
offset_edges.append(('placeholder', 'memoryoffset_' + str(i), {'out': 0, 'in': 0}))
offset_edges.append(('memoryoffset_' + str(i), 'concat', {'out': 0, 'in': i}))
else:
offset_edges.append(('placeholder', 'concat', {'out': 0, 'in': i}))
return offset_nodes, offset_edges
offset_nodes, ref_offset_edges = generate_offsets()
nodes = {
**offset_nodes,
**regular_op('placeholder', {'type': 'Parameter'}),
**regular_op('tdnncomponent', {'op': 'tdnncomponent',
'weights': np.array(weights),
'biases': np.array(biases),
'time_offsets': np.array(time_offsets)}),
**const('weights', np.array(weights)),
**const('biases', np.array(biases)),
**regular_op('concat', {'type': 'Concat', 'axis': 1}),
**regular_op('memoryoffset_0', {'type': None}),
**regular_op('memoryoffset_1', {'type': None}),
**regular_op('memoryoffset_2', {'type': None}),
**regular_op('fully_connected', {'type': 'FullyConnected'}),
**result('result'),
}
graph = build_graph(nodes, [
*connect_front('placeholder', 'tdnncomponent'),
*connect_front('tdnncomponent', 'result')
], nodes_with_edges_only=True)
graph.stage = 'front'
ref_graph = build_graph(nodes, [
*ref_offset_edges,
*connect_front('concat', '0:fully_connected'),
*connect_front('weights', '1:fully_connected'),
*connect_front('biases', '2:fully_connected'),
*connect_front('fully_connected', 'result')
], nodes_with_edges_only=True)
TdnnComponentReplacer().find_and_replace_pattern(graph)
(flag, resp) = compare_graphs(graph, ref_graph, 'result', check_op_attrs=True)
self.assertTrue(flag, resp)
|
tests/test_topn_precision.py | keener101/lkpy | 210 | 12760787 | import numpy as np
import pandas as pd
from pytest import approx
from lenskit.topn import precision
from lenskit.util.test import demo_recs
from lenskit import topn
def _test_prec(items, rel, **k):
recs = pd.DataFrame({'item': items})
truth = pd.DataFrame({'item': rel}).set_index('item')
return precision(recs, truth, **k)
def test_precision_empty_none():
prec = _test_prec([], [1, 3])
assert prec is None
def test_precision_simple_cases():
prec = _test_prec([1, 3], [1, 3])
assert prec == approx(1.0)
prec = _test_prec([1], [1, 3])
assert prec == approx(1.0)
prec = _test_prec([1, 2, 3, 4], [1, 3])
assert prec == approx(0.5)
prec = _test_prec([1, 2, 3, 4], [1, 3, 5])
assert prec == approx(0.5)
prec = _test_prec([1, 2, 3, 4], range(5, 10))
assert prec == approx(0.0)
prec = _test_prec([1, 2, 3, 4], range(4, 10))
assert prec == approx(0.25)
def test_precision_series():
prec = _test_prec(pd.Series([1, 3]), pd.Series([1, 3]))
assert prec == approx(1.0)
prec = _test_prec(pd.Series([1, 2, 3, 4]), pd.Series([1, 3, 5]))
assert prec == approx(0.5)
prec = _test_prec(pd.Series([1, 2, 3, 4]), pd.Series(range(4, 10)))
assert prec == approx(0.25)
def test_precision_series_set():
prec = _test_prec(pd.Series([1, 2, 3, 4]), [1, 3, 5])
assert prec == approx(0.5)
prec = _test_prec(pd.Series([1, 2, 3, 4]), range(4, 10))
assert prec == approx(0.25)
def test_precision_series_index():
prec = _test_prec(pd.Series([1, 3]), pd.Index([1, 3]))
assert prec == approx(1.0)
prec = _test_prec(pd.Series([1, 2, 3, 4]), pd.Index([1, 3, 5]))
assert prec == approx(0.5)
prec = _test_prec(pd.Series([1, 2, 3, 4]), pd.Index(range(4, 10)))
assert prec == approx(0.25)
def test_precision_series_array():
prec = _test_prec(pd.Series([1, 3]), np.array([1, 3]))
assert prec == approx(1.0)
prec = _test_prec(pd.Series([1, 2, 3, 4]), np.array([1, 3, 5]))
assert prec == approx(0.5)
prec = _test_prec(pd.Series([1, 2, 3, 4]), np.arange(4, 10, 1, 'u4'))
assert prec == approx(0.25)
def test_precision_array():
prec = _test_prec(np.array([1, 3]), np.array([1, 3]))
assert prec == approx(1.0)
prec = _test_prec(np.array([1, 2, 3, 4]), np.array([1, 3, 5]))
assert prec == approx(0.5)
prec = _test_prec(np.array([1, 2, 3, 4]), np.arange(4, 10, 1, 'u4'))
assert prec == approx(0.25)
def test_prec_long_rel():
rel = np.arange(100)
items = [1, 0, 150, 3, 10]
r = _test_prec(items, rel, k=5)
assert r == approx(0.8)
def test_prec_long_items():
rel = np.arange(100)
items = [1, 0, 150, 3, 10, 30, 120, 4, 17]
r = _test_prec(items, rel, k=5)
assert r == approx(0.8)
def test_prec_short_items():
rel = np.arange(100)
items = [1, 0, 150]
r = _test_prec(items, rel, k=5)
assert r == approx(2 / 3)
def test_recall_bulk_k(demo_recs):
"bulk and normal match"
train, test, recs = demo_recs
assert test['user'].value_counts().max() > 5
rla = topn.RecListAnalysis()
rla.add_metric(precision, name='pk', k=5)
rla.add_metric(precision)
# metric without the bulk capabilities
rla.add_metric(lambda *a, **k: precision(*a, **k), name='ind_pk', k=5)
rla.add_metric(lambda *a: precision(*a), name='ind_p')
res = rla.compute(recs, test)
assert res.precision.values == approx(res.ind_p.values)
assert res.pk.values == approx(res.ind_pk.values)
|
tools/moduletests/unit/test_selinuxpermissive.py | stivesso/aws-ec2rescue-linux | 178 | 12760799 | <filename>tools/moduletests/unit/test_selinuxpermissive.py
# Copyright 2016-2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
"""
Unit tests for the selinuxpermissive module
"""
import os
import sys
import unittest
import mock
import moduletests.src.selinuxpermissive
try:
# Python 2.x
from cStringIO import StringIO
except ImportError:
# Python 3.x
from io import StringIO
if sys.hexversion >= 0x3040000:
# contextlib.redirect_stdout was introduced in Python 3.4
import contextlib
else:
# contextlib2 is a backport of contextlib from Python 3.5 and is compatible with Python2/3
import contextlib2 as contextlib
class Testselinuxpermissive(unittest.TestCase):
config_file_path = "/etc/selinux/config"
def setUp(self):
self.output = StringIO()
def tearDown(self):
self.output.close()
@mock.patch("moduletests.src.selinuxpermissive.os.path.isfile", return_value=False)
def test_detect_no_selinux(self, isfile_mock):
self.assertFalse(moduletests.src.selinuxpermissive.detect(self.config_file_path))
self.assertTrue(isfile_mock.called)
@mock.patch("moduletests.src.selinuxpermissive.os.path.isfile", return_value=True)
@mock.patch("moduletests.src.selinuxpermissive.open", mock.mock_open(read_data="SELINUX=enforcing"))
def test_detect_problem(self, isfile_mock):
self.assertTrue(moduletests.src.selinuxpermissive.detect(self.config_file_path))
self.assertTrue(isfile_mock.called)
@mock.patch("moduletests.src.selinuxpermissive.os.path.isfile", return_value=True)
@mock.patch("moduletests.src.selinuxpermissive.open", mock.mock_open(read_data="SELINUX=permissive"))
def test_detect_noproblem(self, isfile_mock):
self.assertFalse(moduletests.src.selinuxpermissive.detect(self.config_file_path))
self.assertTrue(isfile_mock.called)
@mock.patch("moduletests.src.selinuxpermissive.open", mock.mock_open(read_data="SELINUX=enforcing"))
def test_fix_success(self):
self.assertTrue(moduletests.src.selinuxpermissive.fix(self.config_file_path))
@mock.patch("moduletests.src.selinuxpermissive.open", side_effect=IOError)
def test_fix_exception(self, open_mock):
with contextlib.redirect_stdout(self.output):
self.assertRaises(IOError, moduletests.src.selinuxpermissive.fix, self.config_file_path)
self.assertEqual(self.output.getvalue(), "[WARN] Unable to replace contents of /etc/selinux/config\n")
self.assertTrue(open_mock.called)
@mock.patch("moduletests.src.selinuxpermissive.get_config_dict")
@mock.patch("moduletests.src.selinuxpermissive.detect", side_effect=(True, False))
@mock.patch("moduletests.src.selinuxpermissive.os.path.isfile", return_value=True)
@mock.patch("moduletests.src.selinuxpermissive.backup", return_value=True)
@mock.patch("moduletests.src.selinuxpermissive.fix", return_value=True)
def test_run_success_fixed(self, fix_mock, backup_mock, isfile_mock, detect_mock, config_mock):
config_mock.return_value = {"BACKUP_DIR": "/var/tmp/ec2rl",
"LOG_DIR": "/var/tmp/ec2rl",
"BACKED_FILES": dict(),
"REMEDIATE": True}
with contextlib.redirect_stdout(self.output):
self.assertTrue(moduletests.src.selinuxpermissive.run())
self.assertTrue("[SUCCESS] selinux set to permissive" in self.output.getvalue())
self.assertTrue(fix_mock.called)
self.assertTrue(backup_mock.called)
self.assertTrue(isfile_mock.called)
self.assertTrue(detect_mock.called)
self.assertTrue(config_mock.called)
@mock.patch("moduletests.src.selinuxpermissive.get_config_dict", return_value=True)
@mock.patch("moduletests.src.selinuxpermissive.detect", return_value=False)
def test_run_success(self, detect_mock, config_mock):
with contextlib.redirect_stdout(self.output):
self.assertTrue(moduletests.src.selinuxpermissive.run())
self.assertTrue("[SUCCESS] selinux is not set to enforcing" in self.output.getvalue())
self.assertTrue(detect_mock.called)
self.assertTrue(config_mock.called)
@mock.patch("moduletests.src.selinuxpermissive.get_config_dict")
@mock.patch("moduletests.src.selinuxpermissive.detect", return_value=True)
@mock.patch("moduletests.src.selinuxpermissive.os.path.isfile", return_value=True)
@mock.patch("moduletests.src.selinuxpermissive.backup", return_value=True)
@mock.patch("moduletests.src.selinuxpermissive.fix", return_value=True)
@mock.patch("moduletests.src.selinuxpermissive.restore", return_value=True)
def test_run_failure_isfile(self,
restore_mock,
fix_mock,
backup_mock,
isfile_mock,
detect_mock,
config_mock):
config_mock.return_value = {"BACKUP_DIR": "/var/tmp/ec2rl",
"LOG_DIR": "/var/tmp/ec2rl",
"BACKED_FILES": {self.config_file_path: "/some/path"},
"REMEDIATE": True,
"SUDO": True}
with contextlib.redirect_stdout(self.output):
self.assertFalse(moduletests.src.selinuxpermissive.run())
self.assertTrue("[FAILURE] failed to set selinux set to permissive" in self.output.getvalue())
self.assertTrue(restore_mock.called)
self.assertTrue(fix_mock.called)
self.assertTrue(backup_mock.called)
self.assertTrue(isfile_mock.called)
self.assertTrue(detect_mock.called)
self.assertTrue(config_mock.called)
@mock.patch("moduletests.src.selinuxpermissive.get_config_dict")
@mock.patch("moduletests.src.selinuxpermissive.detect", return_value=True)
@mock.patch("moduletests.src.selinuxpermissive.os.path.isfile", return_value=False)
@mock.patch("moduletests.src.selinuxpermissive.fix", return_value=True)
def test_run_failure(self, fix_mock, isfile_mock, detect_mock, config_mock):
config_mock.return_value = {"BACKUP_DIR": "/var/tmp/ec2rl",
"LOG_DIR": "/var/tmp/ec2rl",
"BACKED_FILES": dict(),
"REMEDIATE": True,
"SUDO": True}
with contextlib.redirect_stdout(self.output):
self.assertFalse(moduletests.src.selinuxpermissive.run())
self.assertTrue("[FAILURE] failed to set selinux set to permissive" in self.output.getvalue())
self.assertTrue(fix_mock.called)
self.assertTrue(isfile_mock.called)
self.assertTrue(detect_mock.called)
self.assertTrue(config_mock.called)
@mock.patch("moduletests.src.selinuxpermissive.get_config_dict")
@mock.patch("moduletests.src.selinuxpermissive.detect", side_effect=IOError)
@mock.patch("moduletests.src.selinuxpermissive.restore", return_value=True)
def test_run_failure_exception(self, restore_mock, detect_mock, config_mock):
config_mock.return_value = {"BACKUP_DIR": "/var/tmp/ec2rl",
"LOG_DIR": "/var/tmp/ec2rl",
"BACKED_FILES": {self.config_file_path: "/some/path"},
"REMEDIATE": True}
with contextlib.redirect_stdout(self.output):
self.assertFalse(moduletests.src.selinuxpermissive.run())
self.assertTrue(self.output.getvalue().endswith("Review the logs to determine the cause of the issue.\n"))
self.assertTrue(restore_mock.called)
self.assertTrue(detect_mock.called)
self.assertTrue(config_mock.called)
@mock.patch("moduletests.src.selinuxpermissive.get_config_dict", side_effect=IOError)
def test_run_failure_config_exception(self, config_mock):
with contextlib.redirect_stdout(self.output):
self.assertFalse(moduletests.src.selinuxpermissive.run())
self.assertTrue(self.output.getvalue().endswith("Review the logs to determine the cause of the issue.\n"))
self.assertTrue(config_mock.called)
|
tencentcloud/facefusion/v20181201/facefusion_client.py | PlasticMem/tencentcloud-sdk-python | 465 | 12760862 | <filename>tencentcloud/facefusion/v20181201/facefusion_client.py
# -*- coding: utf8 -*-
# Copyright (c) 2017-2021 THL A29 Limited, a Tencent company. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
from tencentcloud.common.exception.tencent_cloud_sdk_exception import TencentCloudSDKException
from tencentcloud.common.abstract_client import AbstractClient
from tencentcloud.facefusion.v20181201 import models
class FacefusionClient(AbstractClient):
_apiVersion = '2018-12-01'
_endpoint = 'facefusion.tencentcloudapi.com'
_service = 'facefusion'
def DescribeMaterialList(self, request):
"""通常通过腾讯云人脸融合的控制台可以查看到素材相关的参数数据,可以满足使用。本接口返回活动的素材数据,包括素材状态等。用于用户通过Api查看素材相关数据,方便使用。
:param request: Request instance for DescribeMaterialList.
:type request: :class:`tencentcloud.facefusion.v20181201.models.DescribeMaterialListRequest`
:rtype: :class:`tencentcloud.facefusion.v20181201.models.DescribeMaterialListResponse`
"""
try:
params = request._serialize()
body = self.call("DescribeMaterialList", params)
response = json.loads(body)
if "Error" not in response["Response"]:
model = models.DescribeMaterialListResponse()
model._deserialize(response["Response"])
return model
else:
code = response["Response"]["Error"]["Code"]
message = response["Response"]["Error"]["Message"]
reqid = response["Response"]["RequestId"]
raise TencentCloudSDKException(code, message, reqid)
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(e.message, e.message)
def FaceFusion(self, request):
"""本接口用于人脸融合,用户上传人脸图片,获取与模板融合后的人脸图片。未发布的活动请求频率限制为1次/秒,已发布的活动请求频率限制50次/秒。如有需要提高活动的请求频率限制,请在控制台中申请。
>
- 公共参数中的签名方式必须指定为V3版本,即配置SignatureMethod参数为TC3-HMAC-SHA256。
:param request: Request instance for FaceFusion.
:type request: :class:`tencentcloud.facefusion.v20181201.models.FaceFusionRequest`
:rtype: :class:`tencentcloud.facefusion.v20181201.models.FaceFusionResponse`
"""
try:
params = request._serialize()
body = self.call("FaceFusion", params)
response = json.loads(body)
if "Error" not in response["Response"]:
model = models.FaceFusionResponse()
model._deserialize(response["Response"])
return model
else:
code = response["Response"]["Error"]["Code"]
message = response["Response"]["Error"]["Message"]
reqid = response["Response"]["RequestId"]
raise TencentCloudSDKException(code, message, reqid)
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(e.message, e.message)
def FaceFusionLite(self, request):
"""人脸融合活动专用版,不推荐使用。人脸融合接口建议使用[人脸融合](https://cloud.tencent.com/document/product/670/31061)或[选脸融合](https://cloud.tencent.com/document/product/670/37736)接口
:param request: Request instance for FaceFusionLite.
:type request: :class:`tencentcloud.facefusion.v20181201.models.FaceFusionLiteRequest`
:rtype: :class:`tencentcloud.facefusion.v20181201.models.FaceFusionLiteResponse`
"""
try:
params = request._serialize()
body = self.call("FaceFusionLite", params)
response = json.loads(body)
if "Error" not in response["Response"]:
model = models.FaceFusionLiteResponse()
model._deserialize(response["Response"])
return model
else:
code = response["Response"]["Error"]["Code"]
message = response["Response"]["Error"]["Message"]
reqid = response["Response"]["RequestId"]
raise TencentCloudSDKException(code, message, reqid)
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(e.message, e.message)
def FuseFace(self, request):
"""本接口用于单脸、多脸融合,用户上传人脸图片,获取与模板融合后的人脸图片。查看 <a href="https://cloud.tencent.com/document/product/670/38247" target="_blank">选脸融合接入指引</a>。
未发布的活动请求频率限制为1次/秒,已发布的活动请求频率限制50次/秒。如有需要提高活动的请求频率限制,请在控制台中申请。
>
- 公共参数中的签名方式必须指定为V3版本,即配置SignatureMethod参数为TC3-HMAC-SHA256。
:param request: Request instance for FuseFace.
:type request: :class:`tencentcloud.facefusion.v20181201.models.FuseFaceRequest`
:rtype: :class:`tencentcloud.facefusion.v20181201.models.FuseFaceResponse`
"""
try:
params = request._serialize()
body = self.call("FuseFace", params)
response = json.loads(body)
if "Error" not in response["Response"]:
model = models.FuseFaceResponse()
model._deserialize(response["Response"])
return model
else:
code = response["Response"]["Error"]["Code"]
message = response["Response"]["Error"]["Message"]
reqid = response["Response"]["RequestId"]
raise TencentCloudSDKException(code, message, reqid)
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(e.message, e.message) |
src/robotide/lib/robot/running/randomizer.py | ludovicurbain/SWIFT-RIDE | 775 | 12760886 | # Copyright 2008-2015 Nokia Networks
# Copyright 2016- Robot Framework Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from random import Random
from robotide.lib.robot.model import SuiteVisitor
class Randomizer(SuiteVisitor):
def __init__(self, randomize_suites=True, randomize_tests=True, seed=None):
self.randomize_suites = randomize_suites
self.randomize_tests = randomize_tests
self.seed = seed
# Cannot use just Random(seed) due to
# https://ironpython.codeplex.com/workitem/35155
args = (seed,) if seed is not None else ()
self._shuffle = Random(*args).shuffle
def start_suite(self, suite):
if not self.randomize_suites and not self.randomize_tests:
return False
if self.randomize_suites:
self._shuffle(suite.suites)
if self.randomize_tests:
self._shuffle(suite.tests)
if not suite.parent:
suite.metadata['Randomized'] = self._get_message()
def _get_message(self):
possibilities = {(True, True): 'Suites and tests',
(True, False): 'Suites',
(False, True): 'Tests'}
randomized = (self.randomize_suites, self.randomize_tests)
return '%s (seed %s)' % (possibilities[randomized], self.seed)
def visit_test(self, test):
pass
def visit_keyword(self, kw):
pass
|
face_alignment/detection/dlib/dlib_detector.py | NovemberJoy/VTuber_Unity | 669 | 12760928 | <filename>face_alignment/detection/dlib/dlib_detector.py
import os
import cv2
import dlib
try:
import urllib.request as request_file
except BaseException:
import urllib as request_file
from ..core import FaceDetector
from ...utils import appdata_dir
class DlibDetector(FaceDetector):
def __init__(self, device, path_to_detector=None, verbose=False):
super().__init__(device, verbose)
base_path = os.path.join(appdata_dir('face_alignment'), "data")
# Initialise the face detector
if 'cuda' in device:
if path_to_detector is None:
path_to_detector = os.path.join(
base_path, "mmod_human_face_detector.dat")
if not os.path.isfile(path_to_detector):
print("Downloading the face detection CNN. Please wait...")
path_to_temp_detector = os.path.join(
base_path, "mmod_human_face_detector.dat.download")
if os.path.isfile(path_to_temp_detector):
os.remove(os.path.join(path_to_temp_detector))
request_file.urlretrieve(
"https://www.adrianbulat.com/downloads/dlib/mmod_human_face_detector.dat",
os.path.join(path_to_temp_detector))
os.rename(os.path.join(path_to_temp_detector), os.path.join(path_to_detector))
self.face_detector = dlib.cnn_face_detection_model_v1(path_to_detector)
else:
self.face_detector = dlib.get_frontal_face_detector()
def detect_from_image(self, tensor_or_path):
image = self.tensor_or_path_to_ndarray(tensor_or_path, rgb=False)
detected_faces = self.face_detector(cv2.cvtColor(image, cv2.COLOR_BGR2GRAY))
if 'cuda' not in self.device:
detected_faces = [[d.left(), d.top(), d.right(), d.bottom()] for d in detected_faces]
else:
detected_faces = [[d.rect.left(), d.rect.top(), d.rect.right(), d.rect.bottom()] for d in detected_faces]
return detected_faces
@property
def reference_scale(self):
return 195
@property
def reference_x_shift(self):
return 0
@property
def reference_y_shift(self):
return 0
|
L1Trigger/GlobalTriggerAnalyzer/python/l1GtPatternGenerator_cfi.py | ckamtsikis/cmssw | 852 | 12760976 | <filename>L1Trigger/GlobalTriggerAnalyzer/python/l1GtPatternGenerator_cfi.py
import FWCore.ParameterSet.Config as cms
l1GtPatternGenerator = cms.EDAnalyzer("L1GtPatternGenerator",
# input tags for various records
GtInputTag = cms.InputTag("gtDigis"),
GmtInputTag = cms.InputTag("gmtDigis"),
GctInputTag = cms.InputTag("gctDigis"),
CscInputTag = cms.InputTag("gtDigis", "CSC"),
DtInputTag = cms.InputTag("gtDigis", "DT"),
RpcbInputTag = cms.InputTag("gtDigis", "RPCb"),
RpcfInputTag = cms.InputTag("gtDigis", "RPCf"),
# file name
PatternFileName = cms.string("GT_GMT_patterns.txt"),
# bunch crossing numbers to write
bx = cms.vint32(0),
# header
PatternFileHeader = cms.string(
"""#GT_GMT_patterns_VD
#
# editors - HB 220606
#
# remarks:
# values in this template are for version VD (same as VB) for the cond-chips of GTL9U (from IVAN)
#
# syntax:
# character "#" indicates a comment line
# header line 1 => hardware of sim- and spy-memories
# header line 2 => hardware location (FPGA-chip) of sim-memories
# header line 3 => channel number of sim-memories (PSB)
# header line 4 => hardware location (FPGA-chip) of spy-memories
# header line 5 => name of patterns
# header line 6 => number of objects (calos, muons) or other declarations
# (header line 7 => only graphics)
# (header line 8 => only text and graphics)
# header line 9 => number of columns, starting with 0
#
# patterns:
# values in column 0 are event numbers (decimal), starting with 0 (synchronisation data)
# patterns for 1024 events (memories of cond-chips on GTL9U can contain only 1024 events) are in this file
# values in columns 1-119 are the hexadecimal patterns, the rightmost digit in a string is LSB
#
# header:
# e |<--------------------------------------------------------------------------PSB/GTL9U(REC)------------------------------------------------------------------------------------------------------------->|<--------------------------------------------------------------------------PSB/GMT(AUF,AUB)--------------------------------------------------------------------------------------------------------------------------------------------------->|<----------------------------------------------------------------GMT REGIONAL MUONs----------------------------------------------------------->|<----GMT(SORT)/GTL9U(REC)----->|<--------------GTL9U(COND)/FDL(ALGO)---------------->|<-----------FDL----------->|
# v |PSB slot13/ch6+7 |PSB slot13/ch4+5 |PSB slot13/ch2+3 |PSB slot13/ch0+1 |PSB slot14/ch6+7 |PSB slot14/ch4+5 |PSB slot14/ch2+3 |PSB slot14/ch0+1 |PSB slot15/ch2+3 |PSB slot15/ch0+1 |PSB slot19/ch6+7 |PSB slot19/ch4+5 |PSB slot19/ch2+3 |PSB slot19/ch0+1 |PSB slot20/ch6+7 |PSB slot20/ch4+5 |PSB slot20/ch2+3 |PSB slot20/ch0+1 |PSB slot21/ch6+7 |PSB slot21/ch4+5 |PSB slot21/ch2+3 |PSB slot21/ch0+1 |GMT INF |GMT INC |GMT IND |GMT INB |GMT SORT |COND1 |COND2 |PSB slot9/ch0+1 |FINOR |
# e |ch6 ch7 ch6 ch7 |ch4 ch5 ch4 ch5 |ch2 ch3 ch2 ch3 |ch0 ch1 ch0 ch1 |ch6 ch7 ch6 ch7 |ch4 ch5 ch4 ch5 |ch2 ch3 ch2 ch3 |ch0 ch1 ch0 ch1 |ch2 ch3 ch2 ch3 |ch0 ch1 ch0 ch1 |ch6 ch7 ch6 ch7 |ch4 ch5 ch4 ch5 |ch2 ch3 ch2 ch3 |ch0 ch1 ch0 ch1 |ch6 ch7 ch6 ch7 |ch4 ch5 ch4 ch5 |ch2 ch3 ch2 ch3 |ch0 ch1 ch0 ch1 |ch6 ch7 ch6 ch7 |ch4 ch5 ch4 ch5 |ch2 ch3 ch2 ch3 |ch0 ch1 ch0 ch1 | | | | | | | |ch0 ch1 ch0 ch1 | |
# n |GTL9U REC1 |GTL9U REC1 |GTL9U REC2 |GTL9U REC2 |GTL9U REC2 |GTL9U REC2 |GTL9U REC3 |GTL9U REC3 |GTL9U REC3 |GTL9U REC3 |GMT AUF |GMT AUF |GMT AUB |GMT AUB |GMT AUF |GMT AUF |GMT AUB |GMT AUB |GMT AUF |GMT AUF |GMT AUB |GMT AUB | | | | |GTL9U REC1 |FDL ALGO |FDL ALGO |FDL ALGO | |
# t |calo1 (ieg) |calo2 (eg) |calo3 (jet) |calo4 (fwdjet) |calo5 (tau) |calo6 (esums) |calo7 (hfbc/etsums)|calo8 (free) |calo9 (totem) |calo10 (free) |MQF4 |MQF3 |MQB2 |MQB1 |MQF8 |MQF7 |MQB6 |MQB5 |MQF12 |MQF11 |MQB10 |MQB9 |RPC forward |CSC |DT |RPC barrel |muon (sorted four) |algo |algo |techtrigger | |
# | 1 2 3 4 | 1 2 3 4 | 1 2 3 4 | 1 2 3 4 | 1 2 3 4 | 1 2 3 4 | 1 2 3 4 | 1 2 3 4 | 1 2 3 4 | 1 2 3 4 |45M 45Q 6M 6Q |45M 45Q 6M 6Q |01M 01Q 23M 23Q |01M 01Q 23M 23Q |45M 45Q 6M 6Q |45M 45Q 6M 6Q |01M 01Q 23M 23Q |01M 01Q 23M 23Q |45M 45Q 6M 6Q |45M 45Q 6M 6Q |01M 01Q 23M 23Q |01M 01Q 23M 23Q | 1 2 3 4 | 1 2 3 4 | 1 2 3 4 | 1 2 3 4 | 1 2 3 4 |191--160 159--128 127---96|95----64 63----32 31-----0|15-0 47-32 31-16 63-48| |
# | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | |
# columns: | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | |
# 0 | 1 2 3 4 | 5 6 7 8 | 9 10 11 12 | 13 14 15 16 | 17 18 19 20 | 21 22 23 24 | 25 26 27 28 | 29 30 31 32 | 33 34 35 36 | 37 38 39 40 | 41 42 43 44 | 45 46 47 48 | 49 50 51 52 | 53 54 55 56 | 57 58 59 60 | 61 62 63 64 | 65 66 67 68 | 69 70 71 72 | 73 74 75 76 | 77 78 79 80 | 81 82 83 84 | 85 86 87 88 | 89 90 91 92 | 93 94 95 96 | 97 98 99 100 | 101 102 103 104 | 105 106 107 108 | 109 110 111 | 112 113 114 | 115 116 117 118|119 |
"""),
# footer
PatternFileFooter = cms.string(""),
# A vector of column names to be written for each pattern file line
PatternFileColumns = cms.vstring(),
# A vector of the lengths (in bits!) of each column
PatternFileLengths = cms.vuint32(),
# A vector of default values for each column
PatternFileDefaultValues = cms.vuint32(),
# By default, do not add comments with detailed information
DebugOutput = cms.bool(False)
)
def addBlock(analyzer, name, count, length, default):
for i in range(1,count+1):
analyzer.PatternFileColumns.append("%s%d" % (name, i))
analyzer.PatternFileLengths.append(length)
analyzer.PatternFileDefaultValues.append(default)
def addPSB(analyzer, name):
addBlock(analyzer, name, 4, 16, 0)
def addRegionalMuons(analyzer, name):
# regional muons are different - they need to have a default of 0x0000ff00 when
# empty to make input cable disconnects recognizable
addBlock(analyzer, name, 4, 32, 0x0000ff00)
def addGMTMuons(analyzer, name):
addBlock(analyzer, name, 4, 26, 0)
# set up format:
fields = l1GtPatternGenerator.PatternFileColumns
lengths = l1GtPatternGenerator.PatternFileLengths
defaults = l1GtPatternGenerator.PatternFileDefaultValues
# column 1..20: some fairly standard PSBs (calo1 - calo5)
for name in [ "gctIsoEm", "gctEm", "cenJet", "forJet", "tauJet" ]:
addPSB(l1GtPatternGenerator, name)
# then the energy sums, which are slightly more complicated
# (calo6)
fields += ["etTotal1", "etMiss1", "etHad1", "etMissPhi1"]
lengths += [ 16, 16, 16, 16]
defaults += [ 0, 0, 0, 0]
# HF bit counts / etsums (which are mangled in the C++ code)
# (calo7)
fields += [ "hfPsbValue1_l", "htMiss1", "hfPsbValue1_h", "unknown"]
lengths += [ 16, 16, 16, 16]
defaults += [ 0, 0, 0, 0]
# calo8 - free
addPSB(l1GtPatternGenerator, "unknown")
# calo9 - "totem", currently
addPSB(l1GtPatternGenerator, "unknown")
# calo 10
# BPTX/Castor and TBD data - default to 0xffff to get BPTX triggers matching GT emulator
addBlock(l1GtPatternGenerator, "unknown", 4, 16, 0xffff)
# 12 more PSBs we don't fill
for i in range(12):
addPSB(l1GtPatternGenerator, "unknown")
# regional muons
addRegionalMuons(l1GtPatternGenerator, "fwdMuon")
addRegionalMuons(l1GtPatternGenerator, "cscMuon")
addRegionalMuons(l1GtPatternGenerator, "dtMuon")
addRegionalMuons(l1GtPatternGenerator, "brlMuon")
# global muons
addGMTMuons(l1GtPatternGenerator, "gmtMuon")
# GT stuff
addBlock(l1GtPatternGenerator, "gtDecisionExt", 2, 32, 0)
addBlock(l1GtPatternGenerator, "gtDecision", 4, 32, 0)
# tech triggers: a bit complicated, since we like to mix up
# half-words (see header)
fields += ["gtTechTrigger1_l", "gtTechTrigger2_l", "gtTechTrigger1_h", "gtTechTrigger2_h"]
lengths += [ 16, 16, 16, 16]
defaults += [ 0, 0, 0, 0]
fields += ["gtFinalOr"]
lengths += [ 9]
defaults += [ 0]
# just to make sure the python magic adds up to the proper output format
if len(fields) != 119:
raise ValueError("Expecting 119 data fields (120 - event number) in pattern file format, got %d!" % len(fields) )
# For debugging: Get an overview of your pattern file format
#print fields
#print lengths
#print defaults
|
scripts/plain_models/cifar_resnet/train_pytorch.py | microsoft/archai | 344 | 12760982 | <reponame>microsoft/archai
import argparse
import math
from typing import List, Mapping, Optional, Tuple, Any
import os
import logging
import numpy as np
import time
import torch
from torch import nn
from torch.optim.optimizer import Optimizer
from torch.optim.lr_scheduler import _LRScheduler
from torch.nn.modules.loss import _Loss
from torch.utils.data import DataLoader
import torchvision
import torchvision.transforms as transforms
import yaml
from archai.common import utils
from archai import cifar10_models
def train(epochs, train_dl, val_dal, net, device, crit, optim,
sched, sched_on_epoch, half, quiet, grad_clip:float) -> List[Mapping]:
train_acc, test_acc = 0.0, 0.0
metrics = []
for epoch in range(epochs):
lr = optim.param_groups[0]['lr']
train_acc, loss = train_epoch(epoch, net, train_dl, device, crit, optim,
sched, sched_on_epoch, half, grad_clip)
val_acc = test(net, val_dal, device,
half) if val_dal is not None else math.nan
metrics.append({'val_top1': val_acc, 'train_top1': train_acc, 'lr': lr,
'epoch': epoch, 'train_loss': loss})
if not quiet:
logging.info(f'train_epoch={epoch}, val_top1={val_acc},'
f' train_top1={train_acc}, lr={lr:.4g}')
return metrics
def optim_sched_orig(net, epochs):
lr, momentum, weight_decay = 0.1, 0.9, 1.0e-4
optim = torch.optim.SGD(net.parameters(),
lr, momentum=momentum, weight_decay=weight_decay)
logging.info(f'lr={lr}, momentum={momentum}, weight_decay={weight_decay}')
sched = torch.optim.lr_scheduler.MultiStepLR(optim,
milestones=[100, 150, 200, 400, 600]) # resnet original paper
sched_on_epoch = True
logging.info(f'sched_on_epoch={sched_on_epoch}, sched={str(sched)}')
return optim, sched, sched_on_epoch
def optim_sched_cosine(net, epochs):
lr, momentum, weight_decay = 0.025, 0.9, 1.0e-4
optim = torch.optim.SGD(net.parameters(),
lr, momentum=momentum, weight_decay=weight_decay)
logging.info(f'lr={lr}, momentum={momentum}, weight_decay={weight_decay}')
sched = torch.optim.lr_scheduler.CosineAnnealingLR(optim, epochs)
sched_on_epoch = True
logging.info(f'sched_on_epoch={sched_on_epoch}, sched={str(sched)}')
return optim, sched, sched_on_epoch
def get_data(datadir: str, train_batch_size=128, test_batch_size=4096,
cutout=0, train_num_workers=-1, test_num_workers=-1,
val_percent=10.0)\
-> Tuple[DataLoader, Optional[DataLoader], DataLoader]:
if utils.is_debugging():
train_num_workers = test_num_workers = 0
logging.info('debugger=true, num_workers=0')
if train_num_workers <= -1:
train_num_workers = torch.cuda.device_count()*4
if test_num_workers <= -1:
test_num_workers = torch.cuda.device_count()*4
train_transform = cifar10_transform(aug=True, cutout=cutout)
trainset = torchvision.datasets.CIFAR10(root=datadir, train=True,
download=True, transform=train_transform)
val_len = int(len(trainset) * val_percent / 100.0)
train_len = len(trainset) - val_len
valset = None
if val_len:
trainset, valset = torch.utils.data.random_split(
trainset, [train_len, val_len])
train_dl = torch.utils.data.DataLoader(trainset, batch_size=train_batch_size,
shuffle=True, num_workers=train_num_workers, pin_memory=True)
if valset is not None:
val_dl = torch.utils.data.DataLoader(valset, batch_size=test_batch_size,
shuffle=False, num_workers=test_num_workers, pin_memory=True)
else:
val_dl = None
test_transform = cifar10_transform(aug=False, cutout=0)
testset = torchvision.datasets.CIFAR10(root=datadir, train=False,
download=True, transform=test_transform)
test_dl = torch.utils.data.DataLoader(testset, batch_size=test_batch_size,
shuffle=False, num_workers=test_num_workers, pin_memory=True)
logging.info(
f'train_len={train_len}, val_len={val_len}, test_len={len(testset)}')
return train_dl, val_dl, test_dl
def train_epoch(epoch, net, train_dl, device, crit, optim,
sched, sched_on_epoch, half, grad_clip:float) -> Tuple[float, float]:
correct, total, loss_total = 0, 0, 0.0
net.train()
for batch_idx, (inputs, targets) in enumerate(train_dl):
inputs = inputs.to(device, non_blocking=True)
targets = targets.to(device, non_blocking=True)
if half:
inputs = inputs.half()
outputs, loss = train_step(net, crit, optim, sched, sched_on_epoch,
inputs, targets, grad_clip)
loss_total += loss
_, predicted = outputs.max(1)
total += targets.size(0)
correct += predicted.eq(targets).sum().item()
if sched and sched_on_epoch:
sched.step()
return 100.0*correct/total, loss_total
def train_step(net: nn.Module,
crit: _Loss, optim: Optimizer, sched: _LRScheduler, sched_on_epoch: bool,
inputs: torch.Tensor, targets: torch.Tensor, grad_clip:float) -> Tuple[torch.Tensor, float]:
outputs = net(inputs)
loss = crit(outputs, targets)
optim.zero_grad()
loss.backward()
nn.utils.clip_grad_norm_(net.parameters(), grad_clip)
optim.step()
if sched and not sched_on_epoch:
sched.step()
return outputs, loss.item()
def test(net, test_dl, device, half) -> float:
correct, total = 0, 0
net.eval()
with torch.no_grad():
for batch_idx, (inputs, targets) in enumerate(test_dl):
inputs = inputs.to(device, non_blocking=False)
targets = targets.to(device)
if half:
inputs = inputs.half()
outputs = net(inputs)
_, predicted = outputs.max(1)
total += targets.size(0)
correct += predicted.eq(targets).sum().item()
return 100.0*correct/total
def param_size(model: torch.nn.Module) -> int:
"""count all parameters excluding auxiliary"""
return sum(v.numel() for name, v in model.named_parameters()
if "auxiliary" not in name)
def cifar10_transform(aug: bool, cutout=0):
MEAN = [0.49139968, 0.48215827, 0.44653124]
STD = [0.24703233, 0.24348505, 0.26158768]
transf = [
transforms.ToTensor(),
transforms.Normalize(MEAN, STD)
]
if aug:
aug_transf = [
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip()
]
transf = aug_transf + transf
if cutout > 0: # must be after normalization
transf += [CutoutDefault(cutout)]
return transforms.Compose(transf)
class CutoutDefault:
"""
Reference : https://github.com/quark0/darts/blob/master/cnn/utils.py
"""
def __init__(self, length):
self.length = length
def __call__(self, img):
h, w = img.size(1), img.size(2)
mask = np.ones((h, w), np.float32)
y = np.random.randint(h)
x = np.random.randint(w)
y1 = np.clip(y - self.length // 2, 0, h)
y2 = np.clip(y + self.length // 2, 0, h)
x1 = np.clip(x - self.length // 2, 0, w)
x2 = np.clip(x + self.length // 2, 0, w)
mask[y1: y2, x1: x2] = 0.
mask = torch.from_numpy(mask)
mask = mask.expand_as(img)
img *= mask
return img
def log_metrics(expdir: str, filename: str, metrics, test_acc: float, args) -> None:
print('filename:', f'test_acc: {test_acc}', metrics[-1])
results = [
('test_acc', test_acc),
('val_acc', metrics[-1]['val_top1']),
('epochs', args.epochs),
('train_batch_size', args.train_batch_size),
('test_batch_size', args.test_batch_size),
('model_name', args.model_name),
('exp_name', args.experiment_name),
('exp_desc', args.experiment_description),
('seed', args.seed),
('devices', utils.cuda_device_names()),
('half', args.half),
('cutout', args.cutout),
('train_acc', metrics[-1]['train_top1']),
('loader_workers', args.loader_workers),
('date', str(time.time())),
]
utils.append_csv_file(os.path.join(expdir, f'{filename}.tsv'), results)
with open(os.path.join(expdir, f'{filename}.yaml'), 'w') as f:
yaml.dump(metrics, f)
def create_crit(device, half):
crit = nn.CrossEntropyLoss().to(device)
if half:
crit.half()
return crit
def create_model(model_name, device, half) -> nn.Module:
model_class = getattr(cifar10_models, model_name)
net = model_class()
logging.info(f'param_size_m={param_size(net):.1e}')
net = net.to(device)
if half:
net.half()
return net
def main():
parser = argparse.ArgumentParser(description='Pytorch cifar training')
parser.add_argument('--experiment-name', '-n', default='train_pytorch')
parser.add_argument('--experiment-description', '-d',
default='Train cifar usin pure PyTorch code')
parser.add_argument('--epochs', '-e', type=int, default=1)
parser.add_argument('--model-name', '-m', default='resnet34')
parser.add_argument('--device', default='',
help='"cuda" or "cpu" or "" in which case use cuda if available')
parser.add_argument('--train-batch-size', '-b', type=int, default=128)
parser.add_argument('--test-batch-size', type=int, default=4096)
parser.add_argument('--seed', '-s', type=float, default=42)
parser.add_argument('--half', type=lambda x: x.lower() == 'true',
nargs='?', const=True, default=False)
parser.add_argument('--cutout', type=int, default=0)
parser.add_argument('--grad-clip', type=float, default=5.0)
parser.add_argument('--datadir', default='',
help='where to find dataset files, default is ~/torchvision_data_dir')
parser.add_argument('--outdir', default='',
help='where to put results, default is ~/logdir')
parser.add_argument('--loader-workers', type=int, default=-1,
help='number of thread/workers for data loader (-1 means auto)')
args = parser.parse_args()
if not args.datadir:
args.datadir = os.environ.get('PT_DATA_DIR', '') or '~/dataroot'
if not args.outdir:
args.outdir = os.environ.get('PT_OUTPUT_DIR', '')
if not args.outdir:
args.outdir = os.path.join(
'~/logdir', 'cifar_testbed', args.experiment_name)
expdir = utils.full_path(args.outdir)
os.makedirs(expdir, exist_ok=True)
utils.setup_cuda(args.seed)
datadir = utils.full_path(args.datadir)
os.makedirs(datadir, exist_ok=True)
utils.create_logger(filepath=os.path.join(expdir, 'logs.log'))
# log config for reference
logging.info(
f'exp_name="{args.experiment_name}", exp_desc="{args.experiment_description}"')
logging.info(
f'model_name="{args.model_name}", seed={args.seed}, epochs={args.epochs}')
logging.info(f'half={args.half}, cutout={args.cutout}')
logging.info(f'datadir="{datadir}"')
logging.info(f'expdir="{expdir}"')
logging.info(f'train_batch_size={args.train_batch_size}')
if args.device:
device = torch.device(args.device)
else:
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
# load data just before train start so any errors so far is not delayed
train_dl, val_dl, test_dl = get_data(datadir=datadir,
train_batch_size=args.train_batch_size, test_batch_size=args.test_batch_size,
train_num_workers=args.loader_workers, test_num_workers=args.loader_workers,
cutout=args.cutout)
epochs = args.epochs
net = create_model(args.model_name, device, args.half)
crit = create_crit(device, args.half)
optim, sched, sched_on_epoch = optim_sched_orig(net, epochs)
train_metrics = train(epochs, train_dl, val_dl, net, device, crit, optim,
sched, sched_on_epoch, args.half, False, grad_clip=args.grad_clip)
test_acc = test(net, test_dl, device, args.half)
log_metrics(expdir, 'train_metrics', train_metrics, test_acc, args)
if __name__ == '__main__':
main()
|
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.