hexsha
stringlengths 40
40
| size
int64 5
2.06M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
248
| max_stars_repo_name
stringlengths 5
125
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
248
| max_issues_repo_name
stringlengths 5
125
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
248
| max_forks_repo_name
stringlengths 5
125
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 5
2.06M
| avg_line_length
float64 1
1.02M
| max_line_length
int64 3
1.03M
| alphanum_fraction
float64 0
1
| count_classes
int64 0
1.6M
| score_classes
float64 0
1
| count_generators
int64 0
651k
| score_generators
float64 0
1
| count_decorators
int64 0
990k
| score_decorators
float64 0
1
| count_async_functions
int64 0
235k
| score_async_functions
float64 0
1
| count_documentation
int64 0
1.04M
| score_documentation
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
7db33bbd439b5c7268b5e21a4ea3bb5bcb4b092b | 3,148 | py | Python | libvirt_vm_optimizer/util/arg_parser.py | atiratree/libvirt-vm-optimizer | a022391ea86e3609e3b9c01fc2b84279939a26ab | [
"MIT"
]
| 1 | 2019-01-16T18:59:59.000Z | 2019-01-16T18:59:59.000Z | libvirt_vm_optimizer/util/arg_parser.py | suomiy/libvirt-vm-optimizer | a022391ea86e3609e3b9c01fc2b84279939a26ab | [
"MIT"
]
| null | null | null | libvirt_vm_optimizer/util/arg_parser.py | suomiy/libvirt-vm-optimizer | a022391ea86e3609e3b9c01fc2b84279939a26ab | [
"MIT"
]
| 1 | 2021-05-04T00:06:12.000Z | 2021-05-04T00:06:12.000Z | import argparse
from argparse import ArgumentError
from libvirt_vm_optimizer.util.utils import Profile
class Settings:
def __init__(self, libvirt_xml=None,
output_xml=None,
in_place=False,
profile=Profile.DEFAULT,
force_multithreaded_pinning=False,
connection_uri=None):
self.libvirt_xml = libvirt_xml
self.output_xml = output_xml
self.profile = profile
self.in_place = in_place
self.connection_uri = connection_uri
self.force_multithreaded_pinning = force_multithreaded_pinning
class ArgParser:
@staticmethod
def require_args():
parser = argparse.ArgumentParser(usage='libvirt-vm-optimizer.py [LIBVIRT_XML]\n'
'\n'
' - optimizes LIBVIRT_XML (supports kvm|qemu)')
parser.add_argument('LIBVIRT_XML', nargs='?',
help=f'VM libvirt.xml (will read from stdin if not specified)')
parser.add_argument('-o', '--output', type=str, nargs='?',
dest='output',
required=False, const=True,
help=f'output file (will be printed to stdout if not specified)')
parser.add_argument('-i', '--in-place', action='store_true',
dest='in_place',
help=f'edit files in place')
parser.add_argument('-p', '--profile', type=str, nargs='?',
dest='profile',
default='default',
required=False, const=True,
help=f'one of (default, cpu, server )')
parser.add_argument('-m', '--force-multithreaded-pinning', action='store_true',
dest='multithreaded_pinning',
help=f'setup CPU pinning in simultaneous multithreading systems (experimental and may be slower)')
parser.add_argument('-c', '--connect', type=str, nargs='?',
dest='uri',
default='qemu:///system',
required=False, const=True,
help=f'connection URI (uses default connection if not specified)')
args = parser.parse_args()
return ArgParser._as_settings(args)
@staticmethod
def _as_settings(args):
libvirt_xml = args.LIBVIRT_XML
output_xml = args.output
profile = Profile.from_str(args.profile)
in_place = args.in_place
uri = args.uri
multithreaded_pinning = args.multithreaded_pinning
if in_place and not libvirt_xml:
raise ArgumentError(None, message="no LIBVIRT_XML specified")
return Settings(libvirt_xml=libvirt_xml,
output_xml=output_xml,
in_place=in_place,
profile=profile,
force_multithreaded_pinning=multithreaded_pinning,
connection_uri=uri)
| 39.848101 | 126 | 0.542567 | 3,038 | 0.965057 | 0 | 0 | 2,498 | 0.79352 | 0 | 0 | 664 | 0.210928 |
7db3b96495442c5054ba6d121c6a02f0d28e7612 | 8,323 | py | Python | lib_bgp_data/collectors/mrt/mrt_base/mrt_file.py | jfuruness/lib_bgp_data | 25f7d57b9e2101c7aefb325e8d728bd91f47d557 | [
"BSD-3-Clause"
]
| 16 | 2018-09-24T05:10:03.000Z | 2021-11-29T19:18:59.000Z | lib_bgp_data/collectors/mrt/mrt_base/mrt_file.py | jfuruness/lib_bgp_data | 25f7d57b9e2101c7aefb325e8d728bd91f47d557 | [
"BSD-3-Clause"
]
| 4 | 2019-10-09T18:54:17.000Z | 2021-03-05T14:02:50.000Z | lib_bgp_data/collectors/mrt/mrt_base/mrt_file.py | jfuruness/lib_bgp_data | 25f7d57b9e2101c7aefb325e8d728bd91f47d557 | [
"BSD-3-Clause"
]
| 3 | 2018-09-17T17:35:18.000Z | 2020-03-24T16:03:31.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""This module contains class MRT_File.
The MRT_File class contains the functionality to load and parse
mrt files. This is done through a series of steps, detailed in README.
"""
__authors__ = ["Justin Furuness", "Matt Jaccino"]
__credits__ = ["Justin Furuness", "Matt Jaccino", "Cameron Morris"]
__Lisence__ = "BSD"
__maintainer__ = "Justin Furuness"
__email__ = "[email protected]"
__status__ = "Production"
import os
import logging
from .tables import MRT_Announcements_Table
from ....utils import utils
from ....utils.base_classes import File
class MRT_File(File):
"""Converts MRT files to CSVs and then inserts them into a database.
In depth explanation in README.
"""
__slots__ = []
def parse_file(self, bgpscanner=True):
"""Parses a downloaded file and inserts it into the database
if bgpscanner is set to True, bgpscanner is used to parser files
which is faster, but ignores malformed announcements. While
these malformed announcements are few and far between, bgpdump
does not ignore them and should be used for full data runs. For
testing however, bgpscanner is much faster and has almost all
data required. More in depth explanation at the top of the file
Note that when tested for speed, logging doesn't slow down parse_files
Or it does, and I just turned it off wrong.
"""
# Sets CSV path
self.csv_name = f"{self.csv_dir}/{os.path.basename(self.path)}.csv"
# Parses the MRT file into a csv file
self._convert_dump_to_csv(bgpscanner)
# Inserts the csv file into the MRT_Announcements Table
utils.csv_to_db(MRT_Announcements_Table, self.csv_name)
# Deletes all old files
utils.delete_paths([self.path, self.csv_name])
utils.incriment_bar(logging.root.level)
########################
### Helper Functions ###
########################
def _convert_dump_to_csv(self, bgpscanner=True):
"""Parses MRT file into a CSV
This function uses bgpscanner to first be able to read
the MRT file. This is because BGPScanner is the fastest tool to
use for this task. The drawback of bgpscanner is that it ignores
malformed announcements. There aren't a lot of these, and it's
much faster, but for a full data set the slower tool bgpdump
should be used. Then the sed commands parse the file and
format the data for a CSV. Then this is stored as a tab
delimited CSV file, and the original is deleted. For a more in
depth explanation see top of file. For parsing spefics, see each
function listed below.
"""
args = self._bgpscanner_args() if bgpscanner else self._bgpdump_args()
# writes to a csv
args += '> ' + self.csv_name
utils.run_cmds(args)
logging.debug(f"Wrote {self.csv_name}\n\tFrom {self.url}")
utils.delete_paths(self.path)
def _bgpscanner_args(self):
"""Parses MRT file into a CSV using bgpscanner
For a more in depth explanation see _convert_dump_to_csv. For
explanation on specifics of the parsing, see below.
"""
# I know this may seem unmaintanable, that's because this is a
# Fast way to to this. Please, calm down.
# Turns out not fast - idk if other regexes are faster
# bgpscanner outputs this format:
# TYPE|SUBNETS|AS_PATH|NEXT_HOP|ORIGIN|ATOMIC_AGGREGATE|
# AGGREGATOR|COMMUNITIES|SOURCE|TIMESTAMP|ASN 32 BIT
# Example: =|1.23.250.0/24|14061 6453 9498 45528 45528|
# 198.32.160.170|i|||
# 6453:50 6453:1000 6453:1100 6453:1113 14061:402 14061:2000
# 14061:2002 14061:4000 14061:4002|198.32.160.170 14061|
# 1545345848|1
# Also please note: sed needs escape characters, so if something
# is escaped once it is for sed. If it is escaped twice, it is
# to escape something in sed, and a second escape for the python
# Below are the things that need to be escaped:
# Parenthesis are escaped because they are sed capture groups
# + is escaped to get sed's special plus (at least one)
# . is escaped for sed to recognize it as a period to match
# / is escaped for sed to match the actual forward slash
# performs bgpdump on the file
bash_args = 'bgpscanner '
bash_args += self.path
# Cuts out columns we don't need
bash_args += ' | cut -d "|" -f1,2,3,10'
# Now we have TYPE|SUBNETS|AS_PATH|TIMESTAMP
# Ex: =|1.23.250.0/24|14061 6453 9498 45528 45528|1545345848
# Makes sure gets announcement, withdrawl, or rib
# -n for no output if nothing there
bash_args += ' | sed -n "s/[=|+|-]|'
# Now we focus on SUBNETS|AS_PATH|TIMESTAMP
# Ex: 1.23.250.0/24|14061 6453 9498 45528 45528|1545345848
# Gets three capture groups.
# The first capture group is the prefix
# Captures chars normally in IPV4 or IPV6 prefixes
bash_args += '\([0|1|2|3|4|5|6|7|8|9|%|\.|\:|a|b|c|d|e|f|/]\+\)|'
# I left this old code here in case someone can figure it out
# https://unix.stackexchange.com/questions/145402/
# It appears sed doesn't support this kind of alternation
# It appears you cannot perform alternation with char classes
# So while it is slower to use ^, that is the way it will run
# until someone can figure out a crazier sed command. And even
# if you could, it would appear that it wouldn't be cross
# platform compatable, so it probably shouldn't be done anyways
# The regex for prefix is done in this way instead of non
# greedy matching because sed doesn't have non greedy matching
# so instead the | must be excluded which is slower than this
# bash_args += '\([[[:digit:]]\+\.[[:digit:]]\+\.[[:digit:]]\+'
# bash_args += '\.[[:digit:]]\+\/[[:digit:]]\+|'
# Now we match for ipv6 prefixes
# bash_args += '[0|1|2|3|4|5|6|7|8|9|%|\:|\.|a|b|c|d|e|f]*]\)|'
# Now we focus on AS_PATH|TIMESTAMP
# Ex: 14061 6453 9498 45528 45528|1545345848
# Second capture group is as path except for the last number
bash_args += '\([^{]*[[:space:]]\)*'
# Now we have all but the last number
# Ex: 45528|1545345848
# Third capture group is the origin
bash_args += '\([^{]*\)'
# Now we have just the time
# Example: |1545345848
# Fourth capture group is the time
bash_args += '|\(.*\)'
# Replacement with the capture groups
# Must double escape here or python freaks out
bash_args += '/\\1\\t{\\2\\3}\\t\\3\\t\\4/p" | '
# Replaces spaces in array to commas
# Need to pipe to new sed because you need the -n -p args
# to make sed not output the full string if it doesn't match
# And you cannot add -e args after that
bash_args += 'sed -e "s/ /, /g" '
return bash_args
def _bgpdump_args(self):
"""Parses MRT file into a CSV using bgpdump
For a more in depth explanation see _convert_dump_to_csv. For
explanation on specifics of the parsing, see below. Also note,
you must use the updated bgpdump tool, not the apt repo.
"""
# performs bgpdump on the file
bash_args = 'bgpdump -q -m -t change '
bash_args += self.path
# Cuts out columns we don't need
bash_args += ' | cut -d "|" -f2,6,7 '
# Deletes any announcements with as sets
bash_args += '|sed -e "/{.*}/d" '
# Performs regex matching with sed and adds brackets to as_path
bash_args += '-e "s/\(.*|.*|\)\(.*$\)/\\1{\\2}/g" '
# Replaces pipes and spaces with commas for csv insertion
# leaves out first one: -e "s/, / /"
bash_args += '-e "s/ /, /g" -e "s/|/\t/g" '
# Adds a column for the origin
bash_args += '-e "s/\([[:digit:]]\+\)}/\\1}\t\\1/g"'
# Rearrange columns to match for csv_to_db
bash_args += '| awk \'BEGIN {FS="\t"};{OFS="\t"};{ print '
bash_args += '$2, $3, $4, $1}\''
return bash_args
| 42.464286 | 78 | 0.622252 | 7,720 | 0.92755 | 0 | 0 | 0 | 0 | 0 | 0 | 6,235 | 0.749129 |
7db66263d9d342b5a826306669c5a5214abeb3e7 | 1,611 | py | Python | demo.py | mhy12345/rcaudio | 90fcc3c2d2586905c7f35ea5c2ac6b2c2cf70029 | [
"MIT"
]
| 31 | 2018-09-27T03:35:06.000Z | 2022-01-11T09:49:26.000Z | demo.py | yeashen/rcaudio | 90fcc3c2d2586905c7f35ea5c2ac6b2c2cf70029 | [
"MIT"
]
| 3 | 2018-11-20T07:49:24.000Z | 2021-01-06T11:48:41.000Z | demo.py | yeashen/rcaudio | 90fcc3c2d2586905c7f35ea5c2ac6b2c2cf70029 | [
"MIT"
]
| 7 | 2019-04-23T06:32:23.000Z | 2020-09-25T14:18:32.000Z | from rcaudio import *
import time
import logging
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(filename)s[line:%(lineno)d] %(levelname)s %(message)s')
def demo1():
CR = CoreRecorder(
time = 10,
sr = 1000,
batch_num = 100,
frames_per_buffer = 100,
)
CR.start()
while True:
if not CR.buffer.empty():
x = CR.buffer.get()
print('*'*int(abs(x)))
def demo2():
SR = SimpleRecorder()
VA = VolumeAnalyzer(rec_time = 1)
SR.register(VA)
SR.start()
while True:
print("VOLUME : ",VA.get_volume())
time.sleep(1)
def demo3():
SR = SimpleRecorder(sr = 20000)
BA = BeatAnalyzer(rec_time = 15, initial_bpm = 120, smooth_ratio = .8)
SR.register(BA)
SR.start()
while True:
print(BA.block_until_next_beat())
def demo4():
SR = SimpleRecorder(sr = 20000)
BA = BeatAnalyzer(rec_time = 15, initial_bpm = 120, smooth_ratio = .8)
VA = VolumeAnalyzer(rec_time = 1)
SR.register(BA)
SR.register(VA)
SR.start()
low_volume_count = 0
while True:
v = VA.get_volume()
if v < 50:
low_volume_count += 1
if low_volume_count > 4:
break
SR.stop()
SR.join()
def demo5():
SR = SimpleRecorder(sr = 1000)
FA = FeatureAnalyzer(refresh_time = 1)
SR.register(FA)
SR.start()
cpos = 0
while True:
if len(FA.result) > cpos:
print("Zero Crossing Rate : ",FA.result[cpos])
cpos += 1
time.sleep(.01)
demo2()
| 22.690141 | 85 | 0.556797 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 106 | 0.065798 |
7db6acccc13d73c452c9d80805e389c51f138158 | 346 | py | Python | Backend/linux.py | TheInvincibleLearner/simranquirky.github.io | 21a2524b321493b9ff82eb8b4fcc10af8f8face7 | [
"MIT"
]
| null | null | null | Backend/linux.py | TheInvincibleLearner/simranquirky.github.io | 21a2524b321493b9ff82eb8b4fcc10af8f8face7 | [
"MIT"
]
| 10 | 2021-09-29T13:25:21.000Z | 2021-10-05T13:51:36.000Z | Backend/linux.py | TheInvincibleLearner/simranquirky.github.io | 21a2524b321493b9ff82eb8b4fcc10af8f8face7 | [
"MIT"
]
| 7 | 2021-09-22T13:26:35.000Z | 2021-10-05T03:07:43.000Z | #!/usr/bin/python3
print("content-type: text/html")
print()
import subprocess as sp
import cgi
fs = cgi.FieldStorage()
cmd = fs.getvalue("command")
output = sp.getoutput("sudo "+cmd)
print("<body style='padding: 40px;'>")
print('<h1 style="color:#df405a;" >Output</h1>')
print("<pre>{}</pre>".format(output))
print("</body>")
| 20.352941 | 49 | 0.635838 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 156 | 0.450867 |
7db6de9a9058b5930c41c0b6f46d74899e0a554e | 1,334 | py | Python | tests/test_swagger_registry.py | niall-byrne/flask-restful-swagger | 4ce4284627f27e1e8d58ff922abbefe9e7fd8c21 | [
"MIT"
]
| 667 | 2015-01-05T02:11:09.000Z | 2022-03-30T02:01:04.000Z | tests/test_swagger_registry.py | Deepstatsanalysis/flask-restful-swagger | 4ce4284627f27e1e8d58ff922abbefe9e7fd8c21 | [
"MIT"
]
| 83 | 2015-01-05T19:39:23.000Z | 2021-11-22T16:39:52.000Z | tests/test_swagger_registry.py | Deepstatsanalysis/flask-restful-swagger | 4ce4284627f27e1e8d58ff922abbefe9e7fd8c21 | [
"MIT"
]
| 184 | 2015-01-05T19:20:23.000Z | 2022-03-21T10:32:34.000Z | from flask import Flask
from flask_restful_swagger.swagger import SwaggerRegistry
try:
from unittest.mock import patch
except ImportError:
from mock import patch
@patch("flask_restful_swagger.swagger._get_current_registry")
@patch("flask_restful_swagger.swagger.render_homepage")
def test_get_swagger_registry(homepage, registry):
mock_registry = {
"apiVersion": "mock_version",
"swaggerVersion": "mock_swagger_version",
"basePath": "mock_path",
"spec_endpoint_path": "mock_spec_endpoint_path",
"description": "mock_description",
}
registry.return_value = mock_registry
app = Flask(__name__)
resource = SwaggerRegistry()
bases = [base.__name__ for base in SwaggerRegistry.__mro__]
assert sorted(bases) == [
"MethodView",
"Resource",
"SwaggerRegistry",
"View",
"object",
]
with app.test_request_context(path="/some_path.html"):
_ = resource.get()
assert homepage.called
homepage.assert_called_once_with(
"mock_pathmock_spec_endpoint_path/_/resource_list.json"
)
with app.test_request_context(path="/some_path"):
homepage.reset_mock()
response = resource.get()
assert not homepage.called
assert response == mock_registry
| 26.68 | 67 | 0.676912 | 0 | 0 | 0 | 0 | 1,159 | 0.868816 | 0 | 0 | 398 | 0.298351 |
7db77614b73b30faa1f8658a19a8d335313caf9b | 1,921 | py | Python | gcp_census/bigquery/bigquery_handler.py | ocadotechnology/gcp-census | 6ce7c55a798efd83c07b9677081d26bb6113e2ed | [
"Apache-2.0"
]
| 40 | 2017-05-18T12:39:11.000Z | 2021-12-02T11:24:18.000Z | gcp_census/bigquery/bigquery_handler.py | ocadotechnology/gcp-census | 6ce7c55a798efd83c07b9677081d26bb6113e2ed | [
"Apache-2.0"
]
| 35 | 2017-05-18T12:41:36.000Z | 2019-11-15T10:06:19.000Z | gcp_census/bigquery/bigquery_handler.py | ocadotechnology/gcp-census | 6ce7c55a798efd83c07b9677081d26bb6113e2ed | [
"Apache-2.0"
]
| 7 | 2018-01-18T12:39:51.000Z | 2018-08-16T09:17:00.000Z | import logging
import webapp2
from googleapiclient.errors import HttpError
from gcp_census.bigquery.bigquery_client import BigQuery
from gcp_census.bigquery.bigquery_task import BigQueryTask
class BigQueryBaseClass(webapp2.RequestHandler):
def __init__(self, request=None, response=None):
super(BigQueryBaseClass, self).__init__(request, response)
self.bigquery = BigQuery()
self.bigquery_task = BigQueryTask(self.bigquery)
def handle_exception(self, exception, debug): # nopep8 pylint: disable=W0613
logging.exception(exception)
if isinstance(exception, HttpError):
if exception.resp.status == 404:
logging.info("Received 404 error code, task won't be retried")
self.response.set_status(200)
else:
self.response.set_status(exception.resp.status)
else:
self.response.set_status(500)
class BigQueryMainHandler(BigQueryBaseClass):
def get(self):
self.bigquery_task.schedule_task_for_each_project()
self.response.write("BigQuery process started. "
"Check the console for task progress.")
class BigQueryProjectHandler(BigQueryBaseClass):
def get(self, project_id):
self.bigquery_task.schedule_task_for_each_dataset(project_id)
class BigQueryDatasetHandler(BigQueryBaseClass):
def get(self, project_id, dataset_id):
page_token = self.request.get('pageToken', None)
self.bigquery_task.schedule_task_for_each_table(project_id,
dataset_id,
page_token)
class BigQueryTableHandler(BigQueryBaseClass):
def get(self, project_id, dataset_id, table_id):
self.bigquery_task.stream_table_metadata(project_id, dataset_id,
table_id)
| 36.942308 | 80 | 0.660073 | 1,713 | 0.891723 | 0 | 0 | 0 | 0 | 0 | 0 | 155 | 0.080687 |
7db8db74363fb05b1c46621fca683280e13e4190 | 67 | py | Python | Solutions/Python/Posix command(7 kyu).py | collenirwin/Codewars-Solutions | 14bad3878d3fc37c7e73cbaaaa24cd28f759ce3b | [
"MIT"
]
| null | null | null | Solutions/Python/Posix command(7 kyu).py | collenirwin/Codewars-Solutions | 14bad3878d3fc37c7e73cbaaaa24cd28f759ce3b | [
"MIT"
]
| null | null | null | Solutions/Python/Posix command(7 kyu).py | collenirwin/Codewars-Solutions | 14bad3878d3fc37c7e73cbaaaa24cd28f759ce3b | [
"MIT"
]
| null | null | null | from os import popen
def get_output(s):
return popen(s).read() | 16.75 | 26 | 0.701493 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
7db9356e6b7de8c8a7ac0cabb607897d76784d53 | 3,056 | py | Python | resources/src/gcp_iam_service_account.py | kfirz/deployster | b95fdb9cf150eee765f7ef3dbdee3666119e76f9 | [
"Apache-2.0"
]
| null | null | null | resources/src/gcp_iam_service_account.py | kfirz/deployster | b95fdb9cf150eee765f7ef3dbdee3666119e76f9 | [
"Apache-2.0"
]
| 19 | 2017-12-28T19:39:37.000Z | 2018-04-18T23:24:45.000Z | resources/src/gcp_iam_service_account.py | kfirz/deployster | b95fdb9cf150eee765f7ef3dbdee3666119e76f9 | [
"Apache-2.0"
]
| 1 | 2018-04-06T16:50:49.000Z | 2018-04-06T16:50:49.000Z | #!/usr/bin/env python3.6
import argparse
import json
import sys
from typing import Sequence, MutableSequence
from dresources import DAction, action
from external_services import ExternalServices
from gcp import GcpResource
class GcpIamServiceAccount(GcpResource):
def __init__(self, data: dict, svc: ExternalServices = ExternalServices()) -> None:
super().__init__(data=data, svc=svc)
self.config_schema.update({
"type": "object",
"required": ["project_id", "email"],
"additionalProperties": False,
"properties": {
"project_id": {"type": "string", "pattern": "^[a-z]([-a-z0-9]*[a-z0-9])$"},
"email": {"type": "string"},
"display_name": {"type": "string", "minLength": 1}
}
})
def discover_state(self):
project_id = self.info.config['project_id']
sa_email = self.info.config["email"]
return self.svc.find_service_account(project_id=project_id, email=sa_email)
def get_actions_for_missing_state(self) -> Sequence[DAction]:
sa_email = self.info.config["email"]
return [DAction(name=f"create-service-account", description=f"Create service account '{sa_email}'")]
def get_actions_for_discovered_state(self, state: dict) -> Sequence[DAction]:
actions: MutableSequence[DAction] = []
if 'display_name' in self.info.config and self.info.config['display_name'] != state['displayName']:
sa_email = self.info.config["email"]
actions.append(DAction(name=f"update-display-name",
description=f"Update display name of service account '{sa_email}'",
args=["update_display_name", state['etag']]))
return actions
def configure_action_argument_parser(self, action: str, argparser: argparse.ArgumentParser):
super().configure_action_argument_parser(action, argparser)
if action == 'update_display_name':
argparser.add_argument('etag',
type=str,
metavar='ETAG',
help="current ETag of the resource, for safe updates")
@action
def create_service_account(self, args):
if args: pass
self.svc.create_service_account(
project_id=self.info.config['project_id'],
email=self.info.config["email"],
display_name=self.info.config['display_name'] if 'display_name' in self.info.config else None)
@action
def update_display_name(self, args):
self.svc.update_service_account_display_name(
project_id=self.info.config['project_id'],
email=self.info.config["email"],
display_name=self.info.config['display_name'] if 'display_name' in self.info.config else None,
etag=args.etag)
def main():
GcpIamServiceAccount(json.loads(sys.stdin.read())).execute() # pragma: no cover
if __name__ == "__main__":
main()
| 39.688312 | 108 | 0.620419 | 2,689 | 0.879908 | 0 | 0 | 658 | 0.215314 | 0 | 0 | 668 | 0.218586 |
7dbac9eb3255daf0f5135503edb3305af6da290c | 806 | py | Python | tests/consumtodb_test.py | thomas-for-aiven/monitor | bd712fed77c3b3cea3e1dd0f99318043ff3dc166 | [
"MIT"
]
| null | null | null | tests/consumtodb_test.py | thomas-for-aiven/monitor | bd712fed77c3b3cea3e1dd0f99318043ff3dc166 | [
"MIT"
]
| null | null | null | tests/consumtodb_test.py | thomas-for-aiven/monitor | bd712fed77c3b3cea3e1dd0f99318043ff3dc166 | [
"MIT"
]
| null | null | null | #!/usr/bin/python3
import pytest
import monitor.monitorshared as m
import monitor.consumtodb as con
def test_db_connection(tmpdir):
"test postgres connection"
conf = m.Configuration('configx.ini', "test")
# in case the field is empty
if conf.db_host == '':
pytest.skip("no broker configured in config.ini")
db_handle = con.connect_db(conf)
# function will fail if cannot connect
assert db_handle
def test_kafka_connection(tmpdir):
# we do the real config here
conf = m.Configuration('configx.ini', "test")
# in case the field is empty
if conf.kafka_broker == '':
pytest.skip("no broker configured in config.ini")
kafka_handle = con.connect_kafka(conf, 'TESTCON')
# function will fail if cannot connect
assert kafka_handle
| 23.028571 | 57 | 0.691067 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 327 | 0.405707 |
7dbc7331779b26c50f838cb805bfffb5e23cfa30 | 542 | py | Python | pytorch3dunet/unet3d/config.py | VolkerH/pytorch-3dunet | 01ee7d53ef1c8edb2bd45d76faf7df447144fb67 | [
"MIT"
]
| null | null | null | pytorch3dunet/unet3d/config.py | VolkerH/pytorch-3dunet | 01ee7d53ef1c8edb2bd45d76faf7df447144fb67 | [
"MIT"
]
| null | null | null | pytorch3dunet/unet3d/config.py | VolkerH/pytorch-3dunet | 01ee7d53ef1c8edb2bd45d76faf7df447144fb67 | [
"MIT"
]
| null | null | null | import argparse
import torch
import yaml
def load_config():
parser = argparse.ArgumentParser(description='UNet3D training')
parser.add_argument('--config', type=str, help='Path to the YAML config file', required=True)
args = parser.parse_args()
config = _load_config_yaml(args.config)
# Get a device to train on
device = torch.device("cuda:0" if torch.cuda.is_available() else 'cpu')
config['device'] = device
return config
def _load_config_yaml(config_file):
return yaml.load(open(config_file, 'r'))
| 27.1 | 97 | 0.714022 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 107 | 0.197417 |
7dbdd82b44af43747c667e7503af84473c437bc0 | 4,630 | py | Python | Code/chatbot.py | pavithra-b-reddy/Chatbot-CS310 | 0cae72c974272d00ee5db3c980f48c0dbfa16e2b | [
"MIT"
]
| null | null | null | Code/chatbot.py | pavithra-b-reddy/Chatbot-CS310 | 0cae72c974272d00ee5db3c980f48c0dbfa16e2b | [
"MIT"
]
| null | null | null | Code/chatbot.py | pavithra-b-reddy/Chatbot-CS310 | 0cae72c974272d00ee5db3c980f48c0dbfa16e2b | [
"MIT"
]
| null | null | null | # This codes are referenced from the Github repo (https://github.com/parulnith/Building-a-Simple-Chatbot-in-Python-using-NLTK/blob/master/chatbot.py)
# Loading the required packages
import nltk
import random
import string
import warnings
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics.pairwise import cosine_similarity
from textblob import TextBlob
# Setup
warnings.filterwarnings('ignore') # Ignore warning messages
f = open('corpus_linguistics.txt', 'r') # opening the corpus
text = f.read() # reading the corpus
# Convert all text from corpus to lower case
text = text.lower()
# Perform tokenization
sent_tokens = nltk.sent_tokenize(text)
word_tokens = nltk.word_tokenize(text)
# Initialize set of greetings and responses
user_greetings = ["hi", "hello", "good morning", "hey", "what's up"]
bot_greetings = ["Hello, how may I be of assistance?"]
user_gratitude = ["thank you", "thanks", "that was helpful"]
bot_gratitude = ["You're welcome! Is there anything else you need?",
"Happy to help! Are there other questions that I could help "
"with?"]
bot_exit_text = ["Thank you for using my services. Have a great day!",
"Hope I was helpful. See you later :)", "Bye!"]
languages = {"en": "English", "fr": "French", "es": "Spanish",
"la": "Latin"}
# Text Preprocessing
lemmatizer = nltk.stem.WordNetLemmatizer() # Text Lemmatization
# Function to perform lemmatization
def LemTokens(tokens):
return [lemmatizer.lemmatize(token) for token in tokens]
remove_punct_dict = dict((ord(punct), None) for punct in string.punctuation)
# Function to perform normalization
def LemNormalize(text):
return LemTokens(
nltk.word_tokenize(text.lower().translate(remove_punct_dict)))
# Generating response
def respond(input_text):
bot_message = ""
sent_tokens.append(input_text)
TfidfVec = TfidfVectorizer(tokenizer=LemNormalize, stop_words='english') # TF-IDF approach
tfidf = TfidfVec.fit_transform(sent_tokens)
vals = cosine_similarity(tfidf[-1], tfidf)
idx = vals.argsort()[0][-2]
flat = vals.flatten()
flat.sort()
req_tfidf = flat[-2]
if req_tfidf == 0:
bot_message += "Apologies, I cannot understand your question. Please " \
"rephrase your question and try again. "
else:
bot_message += sent_tokens[idx]
return bot_message
# Perform sentiment analysis
def extract_sentiment(text):
processed_text = TextBlob(text) # Here, we use the textblob module to implement sentiment analysis
sentiment = processed_text.sentiment
if sentiment.polarity < 0: # we manually set the rule for testing the mood of a sentence
return "negative"
elif sentiment.polarity > 0:
return "positive"
else:
return "neutral"
# Language detection
def get_language(text):
processed_text = TextBlob(text)
return processed_text.detect_language()
# Interact with chatbot framework based on input from user
def bot(choice, input_text):
exit_status = False
while exit_status is False:
input_text = input_text.lower() # lowercase the input
if input_text != 'bye':
if choice == "1":
if input_text in user_greetings: # Generate random response from the greetings set
return random.choice(bot_greetings)
else:
if input_text in user_gratitude: # Generate random response from the gratitude set
return random.choice(bot_gratitude)
else:
return respond(input_text) # Generate a response using NLTK that answers the user's question
sent_tokens.remove(input_text)
elif choice == "2":
return_string = "Detected Language: " + languages[
get_language(input_text)] + "\n" # Language detection
if get_language(input_text) == "en":
return_string += "Detected Sentiment: " + extract_sentiment(
input_text) # Sentiment analysis
else:
return_string += "Sentiment can only be detected for " \
"text in English "
return return_string
else:
exit_status = True
return "Invalid choice!\nOnly 1 and 2 are valid choices " \
"\nPlease try running the program again. "
else:
exit_status = True
return random.choice(bot_exit_text)
| 35.615385 | 149 | 0.649028 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,722 | 0.371922 |
7dbe53a8adce10ce3864cc27ec6021037f99abf0 | 2,714 | py | Python | relo/core/log.py | cwoebker/relo | db11dea794b4b241578f8de4f11d9dbbbbabf894 | [
"BSD-3-Clause"
]
| null | null | null | relo/core/log.py | cwoebker/relo | db11dea794b4b241578f8de4f11d9dbbbbabf894 | [
"BSD-3-Clause"
]
| null | null | null | relo/core/log.py | cwoebker/relo | db11dea794b4b241578f8de4f11d9dbbbbabf894 | [
"BSD-3-Clause"
]
| null | null | null | #!/usr/bin/env python
# encoding: utf-8
import sys
LEVEL = {
'NORMAL': 0,
'INFO': 1,
'DEBUG': 2,
'CRITICAl': 0,
'ERROR': 0,
'EXCEPTION': 0,
}
class Color(object):
ESCAPE = '\033[%sm'
BOLD = '1;%s'
UNDERLINE = '4;%s'
BLUE_ARROW = ESCAPE % (BOLD % '34') # Blue Bold
DEBUG = ESCAPE % (BOLD % '35') # Magenta Bold
HEAD = ESCAPE % (BOLD % '1') # Bold White (Standard Color)
INFO = ESCAPE % '32' # Green Normal
WARNING = ESCAPE % '33' # Yellow Normal
ERROR = ESCAPE % '31' # Red Normal
CRITICAL = ESCAPE % (UNDERLINE % '31') # Red Underline
# SPECIAL
ITEM = ESCAPE % (BOLD % '37') # Black Bold/Bright
SUBITEM = ESCAPE % '37' # White Normal
ENDC = ESCAPE % '0'
@classmethod
def _deco(cls, msg, color):
return '%s%s%s' % (color, msg, cls.ENDC)
@classmethod
def blueArrow(cls, msg):
return cls._deco(msg, cls.BLUE_ARROW)
@classmethod
def head(cls, msg):
return cls._deco(msg, cls.HEAD)
@classmethod
def debug(cls, msg):
return cls._deco(msg, cls.DEBUG)
@classmethod
def info(cls, msg):
return cls._deco(msg, cls.INFO)
@classmethod
def warning(cls, msg):
return cls_deco(msg, cls.WARNING)
@classmethod
def error(cls, msg):
return cls._deco(msg, cls.ERROR)
@classmethod
def critical(cls, msg):
return cls._deco(msg, cls.CRITICAL)
@classmethod
def item(cls, msg):
return cls._deco(msg, cls.ITEM)
@classmethod
def subitem(cls, msg):
return cls._deco(msg, cls.SUBITEM)
class Logger(object):
def __init__(self):
self.level = 0
def debug(self, msg):
if self.level >= LEVEL['DEBUG']:
self._stdout(Color.debug("DEBUG: ") + "%s\n" % msg)
def head(self, msg):
self._stdout(Color.blueArrow('=> ') + Color.head("%s\n") % msg)
def log(self, msg):
self._stdout("%s\n" % msg)
def info(self, msg):
if self.level >= LEVEL['INFO']:
self._stdout(Color.info("INFO: ") + "%s\n" % msg)
def warning(self, msg):
self._stdout(Color.warning("WARNING: ") + "%s\n" % msg)
def error(self, msg):
self._stderr(Color.error("ERROR: ") + "%s\n" % msg)
def critical(self, msg):
self._stderr(Color.critical("CRITICAL: ") + "%s\n" % msg)
def item(self, msg):
self._stdout(Color.item(" - %s\n" % msg))
def subitem(self, msg):
self._stdout(Color.subitem(" @ %s\n" % msg))
def _stdout(self, msg):
sys.stdout.write(msg)
sys.stdout.flush()
def _stderr(self, msg):
sys.stderr.write(msg)
sys.stderr.flush()
logger = Logger() | 26.096154 | 71 | 0.562638 | 2,524 | 0.929993 | 0 | 0 | 811 | 0.298821 | 0 | 0 | 436 | 0.160648 |
7dbeb142bc5611ae233fb17f68720f678cc9d5f9 | 2,031 | py | Python | client/src/proto3/socket_server.py | andrhahn/pi-spy | 04013565c83eb20db85688c0abb23d6f83d3fbaa | [
"MIT"
]
| 1 | 2020-08-17T18:32:06.000Z | 2020-08-17T18:32:06.000Z | client/src/proto3/socket_server.py | andrhahn/pi-spy | 04013565c83eb20db85688c0abb23d6f83d3fbaa | [
"MIT"
]
| null | null | null | client/src/proto3/socket_server.py | andrhahn/pi-spy | 04013565c83eb20db85688c0abb23d6f83d3fbaa | [
"MIT"
]
| null | null | null | import SocketServer
import io
import logging
import struct
import threading
import PIL.Image
import pika
import config
logging.basicConfig(level=logging.INFO)
class RequestHandler(SocketServer.BaseRequestHandler):
def handle(self):
print 'Process socket connections thread:', threading.current_thread().name
try:
mf = self.request.makefile('rb')
while True:
image_len = struct.unpack('<L', mf.read(struct.calcsize('<L')))[0]
image_bytes = mf.read(image_len)
if not image_len:
break
image_stream = io.BytesIO()
image_stream.write(image_bytes)
image_stream.seek(0)
image = PIL.Image.open(image_stream)
image.verify()
print 'Image verified.'
queue_channel = queue_connection.channel()
queue_channel.exchange_declare(exchange='images', exchange_type='fanout')
queue_channel.basic_publish(exchange='images', routing_key='', body=image_bytes)
print 'Sent image.'
finally:
print 'Disconnected with client'
class ThreadedTCPServer(SocketServer.ThreadingMixIn, SocketServer.TCPServer):
pass
if __name__ == "__main__":
print 'Connecting to queue server'
queue_connection = pika.BlockingConnection(
pika.ConnectionParameters(host=config.get('queue_server_host'), port=int(config.get('queue_server_port'))))
socket_server_port = int(config.get('socket_server_port'))
print 'Starting socket server on port ', socket_server_port
socket_server = ThreadedTCPServer((config.get('socket_server_host'), socket_server_port), RequestHandler)
try:
socket_server.serve_forever()
except KeyboardInterrupt:
pass
print 'Closing queue connection'
queue_connection.close()
print 'Stopping socket server'
socket_server.shutdown()
socket_server.server_close()
| 24.46988 | 115 | 0.652388 | 1,132 | 0.557361 | 0 | 0 | 0 | 0 | 0 | 0 | 329 | 0.161989 |
7dbec2776e2389304afbc67d66e86856becf8f17 | 537 | py | Python | lib/showFaces.py | ZakDoesGaming/OregonTrail | 90cab35536ac5c6ba9e772ac5c29c914017c9c23 | [
"MIT"
]
| 6 | 2018-05-07T04:04:58.000Z | 2021-05-15T17:44:16.000Z | lib/showFaces.py | ZakDoesGaming/OregonTrail | 90cab35536ac5c6ba9e772ac5c29c914017c9c23 | [
"MIT"
]
| null | null | null | lib/showFaces.py | ZakDoesGaming/OregonTrail | 90cab35536ac5c6ba9e772ac5c29c914017c9c23 | [
"MIT"
]
| 2 | 2017-05-27T17:06:23.000Z | 2020-08-26T17:57:10.000Z | from pygame import image
class ShowFaces():
def __init__(self, filePath, colour = (0, 0, 0), posX = 0, posY = 100, resourcePath = ""):
self.filePath = filePath
self.colour = colour
self.posX = posX
self.posY = posY
self.resourcePath = resourcePath
self.image = image.load(self.resourcePath + "img/faces/" + self.filePath + ".png")
self.faceRect = self.image.get_rect()
def update(self):
self.faceRect.centerX = self.posX + self.image.get_width() / 2
self.faceRect.centerY = self.posY + self.image.get_height() / 2 | 35.8 | 91 | 0.690875 | 511 | 0.951583 | 0 | 0 | 0 | 0 | 0 | 0 | 20 | 0.037244 |
7dbf4c0c61fb56b588d550f32b9ba42ac0a71e93 | 3,506 | py | Python | Thirdparty/libpsd/build.py | stinvi/dava.engine | 2b396ca49cdf10cdc98ad8a9ffcf7768a05e285e | [
"BSD-3-Clause"
]
| 26 | 2018-09-03T08:48:22.000Z | 2022-02-14T05:14:50.000Z | Thirdparty/libpsd/build.py | ANHELL-blitz/dava.engine | ed83624326f000866e29166c7f4cccfed1bb41d4 | [
"BSD-3-Clause"
]
| null | null | null | Thirdparty/libpsd/build.py | ANHELL-blitz/dava.engine | ed83624326f000866e29166c7f4cccfed1bb41d4 | [
"BSD-3-Clause"
]
| 45 | 2018-05-11T06:47:17.000Z | 2022-02-03T11:30:55.000Z | import os
import shutil
import build_utils
def get_supported_targets(platform):
if platform == 'win32':
return ['win32']
elif platform == 'darwin':
return ['macos']
elif platform == 'linux':
return ['linux']
else:
return []
def get_dependencies_for_target(target):
if target == 'win32':
return ['zlib']
else:
return []
def build_for_target(target, working_directory_path, root_project_path):
if target == 'win32':
_build_win32(working_directory_path, root_project_path)
elif target == 'macos':
_build_macos(working_directory_path, root_project_path)
elif target == 'linux':
_build_linux(working_directory_path, root_project_path)
def get_download_info():
return 'https://sourceforge.net/projects/libpsd/files/libpsd/0.9/libpsd-0.9.zip'
def _download_and_extract(working_directory_path):
source_folder_path = os.path.join(working_directory_path, 'libpsd_source')
url = get_download_info()
build_utils.download_and_extract(
url,
working_directory_path,
source_folder_path,
build_utils.get_url_file_name_no_ext(url))
return source_folder_path
@build_utils.run_once
def _patch_sources(source_folder_path, working_directory_path):
build_utils.apply_patch(
os.path.abspath('patch_v0.9.diff'), working_directory_path)
shutil.copyfile(
'CMakeLists.txt', os.path.join(source_folder_path, 'CMakeLists.txt'))
def _build_win32(working_directory_path, root_project_path):
source_folder_path = _download_and_extract(working_directory_path)
_patch_sources(source_folder_path, working_directory_path)
cmake_flags = ['-DZLIB_INCLUDE_DIR=' + os.path.join(working_directory_path, '../zlib/zlib_source/')]
build_utils.build_and_copy_libraries_win32_cmake(
os.path.join(working_directory_path, 'gen'),
source_folder_path,
root_project_path,
'psd.sln', 'psd',
'psd.lib', 'psd.lib',
'libpsd.lib', 'libpsd.lib',
'libpsd.lib', 'libpsd.lib',
cmake_flags,
static_runtime=False)
_copy_headers(source_folder_path, root_project_path)
def _build_macos(working_directory_path, root_project_path):
source_folder_path = _download_and_extract(working_directory_path)
_patch_sources(source_folder_path, working_directory_path)
build_utils.build_and_copy_libraries_macos_cmake(
os.path.join(working_directory_path, 'gen'),
source_folder_path,
root_project_path,
'psd.xcodeproj', 'psd',
'libpsd.a',
'libpsd.a')
_copy_headers(source_folder_path, root_project_path)
def _build_linux(working_directory_path, root_project_path):
source_folder_path = _download_and_extract(working_directory_path)
_patch_sources(source_folder_path, working_directory_path)
build_utils.build_and_copy_libraries_linux_cmake(
gen_folder_path=os.path.join(working_directory_path, 'gen'),
source_folder_path=source_folder_path,
root_project_path=root_project_path,
target="all",
lib_name='libpsd.a')
_copy_headers(source_folder_path, root_project_path)
def _copy_headers(source_folder_path, root_project_path):
include_path = os.path.join(root_project_path, 'Libs/include/libpsd')
build_utils.copy_files_by_name(
os.path.join(source_folder_path, 'include'),
include_path,
['libpsd.h', 'psd_color.h', 'psd_types.h'])
| 31.585586 | 104 | 0.72162 | 0 | 0 | 0 | 0 | 281 | 0.080148 | 0 | 0 | 473 | 0.134912 |
7dc01542f166fdf824058cb9a8b3de627c4cc58f | 53 | py | Python | molpal/__init__.py | mchaker/lab-molpal | f4db7ee2ca51515b4246604867a93a3aac08107d | [
"MIT"
]
| 1 | 2022-03-27T10:17:25.000Z | 2022-03-27T10:17:25.000Z | molpal/__init__.py | mchaker/lab-molpal | f4db7ee2ca51515b4246604867a93a3aac08107d | [
"MIT"
]
| 2 | 2022-03-27T20:08:23.000Z | 2022-03-28T11:47:11.000Z | molpal/__init__.py | mchaker/lab-molpal | f4db7ee2ca51515b4246604867a93a3aac08107d | [
"MIT"
]
| 1 | 2022-03-27T20:20:01.000Z | 2022-03-27T20:20:01.000Z | from .explorer import Explorer
__version__ = "1.0.2" | 17.666667 | 30 | 0.754717 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7 | 0.132075 |
7dc06bfcfd35ab80fe1f5fe2ede0d1828b1336ca | 6,192 | py | Python | Data.py | praenubilus/lc-tool | 6af4c557d2780758a4d53cd70554e16b70809859 | [
"MIT"
]
| null | null | null | Data.py | praenubilus/lc-tool | 6af4c557d2780758a4d53cd70554e16b70809859 | [
"MIT"
]
| null | null | null | Data.py | praenubilus/lc-tool | 6af4c557d2780758a4d53cd70554e16b70809859 | [
"MIT"
]
| null | null | null | import subprocess
import os.path
import json
import time
import urllib.parse
from typing import Any, Tuple
import config
from requests_html import HTMLSession
from markdownify import markdownify
class Data:
def __init__(
self,
data_file_path: str = config.DATA_FILE_PATH,
preload: bool = False,
fetch_rule: bool = True,
) -> None:
super().__init__()
self.data_file_path = data_file_path
data = None
if preload:
data = self.load() # load from existing data file
if ( # check whether the data file is valid
not data
or abs(time.time() - data["timestamp"]) / 60
> config.DATA_RENEW_THRESHOLD_IN_MIN
):
data = self.fetch()
self.data = data
def load(self, path: str = None) -> Any:
data = None
if not path:
path = self.data_file_path
if os.path.exists(path):
with open(path, "r") as fp:
data_ser = json.load(fp)
data = json.loads(data_ser)
self.data = data
return data
def fetch(self, url: str = config.DATA_API_URL) -> Any:
# fetcch data
print("\n-------------Start fetching data-------------")
r = subprocess.check_output(f"curl {url}", shell=True)
print("\n-------------Finish fetching data-------------")
print("\n-------------Start serializing data-------------")
json_data = json.loads(r.decode("utf-8"))
# indexing based on question frontend id
data = {}
for q in json_data["stat_status_pairs"]:
qid = q["stat"]["frontend_question_id"]
if qid in data:
raise RuntimeError(f"question #{qid} already exists, duplicate!")
else:
data[str(qid).zfill(config.QID_PADDING_SIZE)] = q
print(f"Total feteched questions: {len(data)} ")
data["timestamp"] = time.time()
print("\n-------------Finish serializing data-------------")
return data
def do_persistence(
self, data_serialized: str = None, path=config.DATA_FILE_PATH
) -> None:
print("\n-------------Start data persistence-------------")
if not data_serialized:
data_serialized = json.dumps(self.data)
if not data_serialized or not path:
raise RuntimeError("invalid input data or file path.")
with open(path, "w") as fp:
json.dump(data_serialized, fp)
print("\n-------------Finish data persistence-------------")
class Problem:
def __init__(self, qid: int, blob: Any, auto_parse=False) -> None:
super().__init__()
self.qid = str(qid).zfill(config.QID_PADDING_SIZE)
self.difficulty = blob["difficulty"]["level"]
self.is_paid = blob["paid_only"]
self.stat = blob["stat"]
if auto_parse:
self.parse(self.stat)
def parse(self, stat=None):
self.total_acs, self.total_submitted, self.ac_rate = self._parse_statistics(
stat
)
self.title = self._parse_title(stat)
self.title_slug = self._parse_title_slug(stat)
self.url = self._parse_url(stat)
self.url_solution = self._parse_url_solution(stat)
def _parse_statistics(self, stat) -> Tuple[int, int]:
acs, submissions = stat["total_acs"], stat["total_submitted"]
return acs, submissions, acs / submissions if submissions > 0 else 0
def _parse_title(self, stat):
return stat["question__title"]
def _parse_title_slug(self, stat):
return stat["question__title_slug"]
def _parse_url(self, stat):
title_slug = self._parse_title_slug(stat)
return urllib.parse.urljoin(config.PROBLEM_URL_PREFIX, title_slug)
def _parse_url_solution(self, stat):
# be careful about the urljoin behavior: base abs url + part only(will swap if exists)
return (
urllib.parse.urljoin(
config.PROBLEM_URL_PREFIX, stat["question__article__slug"] + "/solution"
)
if stat["question__article__slug"]
else None
)
def _scrape_n_render(self, url=None):
if not url:
url = self.url
response = HTMLSession().get(url)
response.html.render()
return response
def scrape(self, url=None):
r = self._scrape_n_render(url=url)
# self.content, self.contetnt_md = self._scrape_problem_content(r.html)
# with open("html-content.html", "w") as f:
# f.write(r.html.html)
# with open("html-raw-content.html", "w") as f:
# f.write(r.html.raw_html.decode("utf-8"))
self.tags = self._scrape_problem_topics(r.html)
self.companies = self._scrape_problem_companies(r.html)
def _scrape_problem_topics(self, html):
t_elements = html.xpath("//a[starts-with(@class,'topic-tag')]/span")
return [t.text for t in t_elements]
def _scrape_problem_companies(self, html):
# companies tags are only available to paid user.
# TODO: add login and cookies support
t_elements = html.xpath("//a[starts-with(@href,'/company')]")
return [t.text for t in t_elements]
def _scrape_problem_content(self, html):
content = html.xpath("//div[contains(@class,'question-content')]/div")[0]
markdown_content = markdownify(self.html_preprocess(content.html))
# with open("test.md", "w") as fp:
# fp.write(md_out)
return content, markdown_content
def html2markdown_preprocess(self, html: str) -> str:
# replace all <code>,</code> to inline markdown code: `backtip`
# replace all \n newline to <br> in html, otherwise it cannot be parsed as newline
# replace all <pre></pre> to code block ```, default type is json for better display
res = (
html.replace("<code>", "`")
.replace("</code>", "`")
.replace("\n", "<br>")
.replace("<pre>", "```json<br>")
.replace("</pre>", "```<br>")
)
return res | 34.786517 | 94 | 0.581234 | 5,992 | 0.9677 | 0 | 0 | 0 | 0 | 0 | 0 | 1,688 | 0.27261 |
7dc0ff0cd5dcfd9cb62fcfb00a3e84da41a487f5 | 1,407 | py | Python | examples/python/WeightedCentroidalVoronoi.py | mparno/sdot2d | f632824fc4f0285eab6de911cca8932f69ece705 | [
"BSD-3-Clause"
]
| null | null | null | examples/python/WeightedCentroidalVoronoi.py | mparno/sdot2d | f632824fc4f0285eab6de911cca8932f69ece705 | [
"BSD-3-Clause"
]
| null | null | null | examples/python/WeightedCentroidalVoronoi.py | mparno/sdot2d | f632824fc4f0285eab6de911cca8932f69ece705 | [
"BSD-3-Clause"
]
| null | null | null | import pysdot as ot
import numpy as np
import matplotlib.pyplot as plt
numPts = 100
xbnds = [0.0,1.0] # minimum and maximum x values
ybnds = [0.0,1.0] # minimum and maximum y values
Ns = [50,50]
bbox = ot.BoundingBox(xbnds[0],xbnds[1],ybnds[0],ybnds[1])
grid = ot.RegularGrid(bbox, Ns[0], Ns[1])
dens = np.ones(Ns)
for i in range(Ns[0]):
for j in range(Ns[1]):
pt = grid.Center(i,j)
dens[i,j] = np.exp(-30.0*( (pt[0]-0.5)**2 + (pt[1]-0.5)**2))
dist = ot.DiscretizedDistribution(grid,dens)
# Construct the Centroidal Voronoi diagram. This function uses Lloyd's algorithm
# with latin hypercube samples as initial points (https://en.wikipedia.org/wiki/Lloyd%27s_algorithm)
# Arguments to BuildCentroidal are:
# - The bounding box
# - The number of seed points (same as number of cells) in the Voronoi diagram
# - The maximum number of allowed iterations in Lloyd's algorithm
# - A tolerance on the maximum distance between a cell centroid and seed point.
diag = ot.LaguerreDiagram.BuildCentroidal(bbox,numPts,1000,0.001,dist)
areas = diag.Areas(dist)
# Plot the resulting centroidal Voronoi diagram
fig, axs = plt.subplots(ncols=2,figsize=(14,6))
ot.PlotDiagram(diag, axs[0], distribution=dist, cell_colors=areas)
axs[0].set_title('Weighted CVD')
axs[1].imshow(dens.T,extent=[xbnds[0],xbnds[1],ybnds[0],ybnds[1]],origin='lower',alpha=0.8)
axs[1].set_title('Density')
plt.show()
| 33.5 | 100 | 0.713575 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 595 | 0.422886 |
7dc1969b2d44d9ad370f7f09a3b9e9919cb4e854 | 589 | py | Python | Combinatorialifier.py | Theta291/Partial-Application-in-Python | db503fbf7a1c173c01fca86a858875e38c41997a | [
"MIT"
]
| null | null | null | Combinatorialifier.py | Theta291/Partial-Application-in-Python | db503fbf7a1c173c01fca86a858875e38c41997a | [
"MIT"
]
| null | null | null | Combinatorialifier.py | Theta291/Partial-Application-in-Python | db503fbf7a1c173c01fca86a858875e38c41997a | [
"MIT"
]
| null | null | null | #Exercise: Try to make a function that accepts a function of only positional arguments and returns a function that takes the same number of positional arguments and, given they are all iterators, attempts every combination of one arguments from each iterator.
#Skills: Partial application, Iteration
papplycomboreverse = lambda fun, xiter : lambda *args : [fun(*args, x) for x in xiter]
def combo(fun):
def returnfun(*args):
currfun = fun
for arg in reversed(args):
currfun = papplycomboreverse(currfun, arg)
return currfun()
return returnfun
| 45.307692 | 259 | 0.726655 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 298 | 0.505942 |
7dc20b2c7644374b96c9964dd3914cc3c9615ccd | 237 | py | Python | ursa/graph/node.py | adgirish/ursa | c14fccacb81efd33e86453f979cb4ec799aa8a3a | [
"Apache-2.0"
]
| null | null | null | ursa/graph/node.py | adgirish/ursa | c14fccacb81efd33e86453f979cb4ec799aa8a3a | [
"Apache-2.0"
]
| null | null | null | ursa/graph/node.py | adgirish/ursa | c14fccacb81efd33e86453f979cb4ec799aa8a3a | [
"Apache-2.0"
]
| null | null | null | class Node:
"""
This object is a generic node, the basic component of a Graph.
Fields:
data -- the data this node will contain. This data can be any format.
"""
def __init__(self, data):
self.data = data
| 23.7 | 73 | 0.620253 | 236 | 0.995781 | 0 | 0 | 0 | 0 | 0 | 0 | 165 | 0.696203 |
7dc217926986aef9243e5b82602418597122bc4f | 6,780 | py | Python | api/api_funct.py | pjclock/haproxy-wi | 2ea59c892ae24d824d29dd0cee580c969f64cc87 | [
"Apache-2.0"
]
| null | null | null | api/api_funct.py | pjclock/haproxy-wi | 2ea59c892ae24d824d29dd0cee580c969f64cc87 | [
"Apache-2.0"
]
| null | null | null | api/api_funct.py | pjclock/haproxy-wi | 2ea59c892ae24d824d29dd0cee580c969f64cc87 | [
"Apache-2.0"
]
| 1 | 2019-11-19T14:59:25.000Z | 2019-11-19T14:59:25.000Z | import os
import sys
os.chdir(os.path.dirname(__file__))
sys.path.append(os.path.dirname(__file__))
sys.path.append(os.path.join(sys.path[0], '/var/www/haproxy-wi/app/'))
from bottle import route, run, template, hook, response, request, post
import sql
import funct
def return_dict_from_out(id, out):
data = {}
data[id] = {}
for k in out:
if "Ncat:" not in k:
k = k.split(':')
data[id][k[0]] = k[1].strip()
else:
data[id] = {"error":"Can\'t connect to HAproxy"}
return data
def check_permit_to_server(id):
servers = sql.select_servers(id_hostname=id)
login = request.headers.get('login')
for s in servers:
servers = sql.get_dick_permit(username=login, ip=s[2])
return servers
def get_server(id):
data = {}
try:
servers = check_permit_to_server(id)
for s in servers:
data = {
'id':s[0],
'hostname':s[1],
'ip':s[2],
'group':s[3],
'virt':s[4],
'enable':s[5],
'master':s[6],
'creds':s[7]
}
except:
server = data
return dict(server=data)
def get_status(id):
try:
servers = check_permit_to_server(id)
for s in servers:
cmd = 'echo "show info" |nc %s %s -w 1|grep -e "Ver\|CurrConns\|Maxco\|MB\|Uptime:"' % (s[2], sql.get_setting('haproxy_sock_port'))
out = funct.subprocess_execute(cmd)
data = return_dict_from_out(id, out[0])
except:
data = {}
data[id] = {"error":"Cannot find the server"}
return dict(error=data)
return dict(status=data)
def get_all_statuses():
data = {}
try:
servers = sql.select_servers()
login = request.headers.get('login')
sock_port = sql.get_setting('haproxy_sock_port')
for s in servers:
servers = sql.get_dick_permit(username=login)
for s in servers:
cmd = 'echo "show info" |nc %s %s -w 1|grep -e "Ver\|CurrConns\|Maxco\|MB\|Uptime:"' % (s[2], sock_port)
data[s[2]] = {}
out = funct.subprocess_execute(cmd)
data[s[2]] = return_dict_from_out(s[1], out[0])
except:
data = {"error":"Cannot find the server"}
return dict(error=data)
return dict(status=data)
def actions(id, action):
if action == 'start' or action == 'stop' or action == 'restart':
try:
servers = check_permit_to_server(id)
for s in servers:
cmd = [ "sudo systemctl %s haproxy" % action ]
error = funct.ssh_command(s[2], cmd)
done = error if error else 'done'
data = {'id':s[0],'ip':s[2],'action':action,'hostname':s[1],'status':done}
return dict(status=data)
except:
return dict(status='error')
else:
return dict(status='wrong action')
def runtime(id):
data = {}
try:
action = request.headers.get('action')
haproxy_sock = sql.get_setting('haproxy_sock')
servers = check_permit_to_server(id)
cmd = [ 'echo "%s" |sudo socat stdio %s' % (action, haproxy_sock) ]
for s in servers:
out = funct.ssh_command(s[2], cmd)
data = {}
data[id] = {}
sep_data = out.split('\r\n')
data[id] = {'ouput':sep_data}
return dict(status=data)
except:
return dict(status='error')
def show_backends(id):
data = {}
try:
servers = check_permit_to_server(id)
for s in servers:
out = funct.show_backends(s[2], ret=1)
data = {id: out}
except:
data = {}
data[id] = {"error":"Cannot find the server"}
return dict(error=data)
return dict(backends=data)
def get_config(id):
data = {}
try:
servers = check_permit_to_server(id)
for s in servers:
cfg = '/tmp/'+s[2]+'.cfg'
out = funct.get_config(s[2], cfg)
os.system("sed -i 's/\\n/\n/g' "+cfg)
try:
conf = open(cfg, "r")
config_read = conf.read()
conf.close
except IOError:
conf = '<br />Can\'t read import config file'
data = {id: config_read}
except:
data = {}
data[id] = {"error":"Cannot find the server"}
return dict(error=data)
return dict(config=data)
def upload_config(id):
data = {}
body = request.body.getvalue().decode('utf-8')
save = request.headers.get('action')
login = request.headers.get('login')
if save == '':
save = 'save'
elif save == 'restart':
save = ''
try:
servers = check_permit_to_server(id)
for s in servers:
ip = s[2]
cfg = '/tmp/'+ip+'.cfg'
cfg_for_save = hap_configs_dir + ip + "-" + funct.get_data('config') + ".cfg"
try:
with open(cfg, "w") as conf:
conf.write(body)
return_mess = 'config was uploaded'
os.system("/bin/cp %s %s" % (cfg, cfg_for_save))
out = funct.upload_and_restart(ip, cfg, just_save=save)
funct.logging('localhost', " config was uploaded via REST API", login=login)
if out:
return_mess == out
except IOError:
return_mess = "cannot upload config"
data = {id: return_mess}
except:
data = {}
data[id] = {"error":"Cannot find the server"}
return dict(error=data)
return dict(config=data)
def add_to_config(id):
data = {}
body = request.body.getvalue().decode('utf-8')
save = request.headers.get('action')
hap_configs_dir = funct.get_config_var('configs', 'haproxy_save_configs_dir')
login = request.headers.get('login')
if save == '':
save = 'save'
elif save == 'restart':
save = ''
try:
servers = check_permit_to_server(id)
for s in servers:
ip = s[2]
cfg = '/tmp/'+ip+'.cfg'
cfg_for_save = hap_configs_dir + ip + "-" + funct.get_data('config') + ".cfg"
out = funct.get_config(ip, cfg)
try:
with open(cfg, "a") as conf:
conf.write('\n'+body+'\n')
return_mess = 'section was added to the config'
os.system("/bin/cp %s %s" % (cfg, cfg_for_save))
funct.logging('localhost', " section was added via REST API", login=login)
out = funct.upload_and_restart(ip, cfg, just_save=save)
if out:
return_mess = out
except IOError:
return_mess = "cannot upload config"
data = {id: return_mess}
except:
data[id] = {"error":"Cannot find the server"}
return dict(error=data)
return dict(config=data)
def show_log(id):
data = {}
rows = request.headers.get('rows')
waf = request.headers.get('waf')
grep = request.headers.get('grep')
hour = request.headers.get('starthour')
minut = request.headers.get('startminut')
hour1 = request.headers.get('endhour')
minut1 = request.headers.get('endminut')
if rows is None:
rows = '10'
if waf is None:
waf = '0'
if hour is None:
hour = '00'
if minut is None:
minut = '00'
if hour1 is None:
hour1 = '24'
if minut1 is None:
minut1 = '00'
try:
servers = check_permit_to_server(id)
for s in servers:
ip = s[2]
except:
data[id] = {"error":"Cannot find the server"}
return dict(error=data)
out = funct.show_haproxy_log(ip, rows=rows, waf=str(waf), grep=grep, hour=str(hour), minut=str(minut), hour1=str(hour1), minut1=str(minut1), html=0)
data = {id: out}
return dict(log=data)
| 22.450331 | 149 | 0.629499 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,300 | 0.19174 |
7dc490740f712aa8ee9b1a1e793a10bb7cab5ed9 | 27,885 | py | Python | trove-11.0.0/trove/guestagent/datastore/experimental/vertica/service.py | scottwedge/OpenStack-Stein | 7077d1f602031dace92916f14e36b124f474de15 | [
"Apache-2.0"
]
| 1 | 2020-04-08T07:42:19.000Z | 2020-04-08T07:42:19.000Z | trove/guestagent/datastore/experimental/vertica/service.py | ttcong/trove | 1db2dc63fdd5409eafccebe79ff2900d0535ed13 | [
"Apache-2.0"
]
| 5 | 2019-08-14T06:46:03.000Z | 2021-12-13T20:01:25.000Z | trove/guestagent/datastore/experimental/vertica/service.py | ttcong/trove | 1db2dc63fdd5409eafccebe79ff2900d0535ed13 | [
"Apache-2.0"
]
| 2 | 2020-03-15T01:24:15.000Z | 2020-07-22T20:34:26.000Z | # Copyright [2015] Hewlett-Packard Development Company, L.P.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import subprocess
import tempfile
from oslo_log import log as logging
from oslo_utils import netutils
from six.moves import configparser
from trove.common import cfg
from trove.common.db import models
from trove.common import exception
from trove.common.i18n import _
from trove.common import instance as rd_instance
from trove.common.stream_codecs import PropertiesCodec
from trove.common import utils
from trove.guestagent.common.configuration import ConfigurationManager
from trove.guestagent.common.configuration import ImportOverrideStrategy
from trove.guestagent.common import guestagent_utils
from trove.guestagent.common import operating_system
from trove.guestagent.common.operating_system import FileMode
from trove.guestagent.datastore.experimental.vertica import system
from trove.guestagent.datastore import service
from trove.guestagent import pkg
from trove.guestagent import volume
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
packager = pkg.Package()
DB_NAME = 'db_srvr'
MOUNT_POINT = CONF.vertica.mount_point
# We will use a fake configuration file for the options managed through
# configuration groups that we apply directly with ALTER DB ... SET ...
FAKE_CFG = os.path.join(MOUNT_POINT, "vertica.cfg.fake")
class VerticaAppStatus(service.BaseDbStatus):
def _get_actual_db_status(self):
"""Get the status of dbaas and report it back."""
try:
out, err = system.shell_execute(system.STATUS_ACTIVE_DB,
system.VERTICA_ADMIN)
if out.strip() == DB_NAME:
# UP status is confirmed
LOG.info("Service Status is RUNNING.")
return rd_instance.ServiceStatuses.RUNNING
else:
LOG.info("Service Status is SHUTDOWN.")
return rd_instance.ServiceStatuses.SHUTDOWN
except exception.ProcessExecutionError:
LOG.exception("Failed to get database status.")
return rd_instance.ServiceStatuses.CRASHED
class VerticaApp(object):
"""Prepares DBaaS on a Guest container."""
def __init__(self, status):
self.state_change_wait_time = CONF.state_change_wait_time
self.status = status
revision_dir = \
guestagent_utils.build_file_path(
os.path.join(MOUNT_POINT,
os.path.dirname(system.VERTICA_ADMIN)),
ConfigurationManager.DEFAULT_STRATEGY_OVERRIDES_SUB_DIR)
if not operating_system.exists(FAKE_CFG):
operating_system.write_file(FAKE_CFG, '', as_root=True)
operating_system.chown(FAKE_CFG, system.VERTICA_ADMIN,
system.VERTICA_ADMIN_GRP, as_root=True)
operating_system.chmod(FAKE_CFG, FileMode.ADD_GRP_RX_OTH_RX(),
as_root=True)
self.configuration_manager = \
ConfigurationManager(FAKE_CFG, system.VERTICA_ADMIN,
system.VERTICA_ADMIN_GRP,
PropertiesCodec(delimiter='='),
requires_root=True,
override_strategy=ImportOverrideStrategy(
revision_dir, "cnf"))
def update_overrides(self, context, overrides, remove=False):
if overrides:
self.apply_overrides(overrides)
def remove_overrides(self):
config = self.configuration_manager.get_user_override()
self._reset_config(config)
self.configuration_manager.remove_user_override()
def apply_overrides(self, overrides):
self.configuration_manager.apply_user_override(overrides)
self._apply_config(overrides)
def _reset_config(self, config):
try:
db_password = self._get_database_password()
for k, v in config.items():
alter_db_cmd = system.ALTER_DB_RESET_CFG % (DB_NAME, str(k))
out, err = system.exec_vsql_command(db_password, alter_db_cmd)
if err:
if err.is_warning():
LOG.warning(err)
else:
LOG.error(err)
raise RuntimeError(_("Failed to remove config %s") % k)
except Exception:
LOG.exception("Vertica configuration remove failed.")
raise RuntimeError(_("Vertica configuration remove failed."))
LOG.info("Vertica configuration reset completed.")
def _apply_config(self, config):
try:
db_password = self._get_database_password()
for k, v in config.items():
alter_db_cmd = system.ALTER_DB_CFG % (DB_NAME, str(k), str(v))
out, err = system.exec_vsql_command(db_password, alter_db_cmd)
if err:
if err.is_warning():
LOG.warning(err)
else:
LOG.error(err)
raise RuntimeError(_("Failed to apply config %s") % k)
except Exception:
LOG.exception("Vertica configuration apply failed")
raise RuntimeError(_("Vertica configuration apply failed"))
LOG.info("Vertica config apply completed.")
def _enable_db_on_boot(self):
try:
command = ["sudo", "su", "-", system.VERTICA_ADMIN, "-c",
(system.SET_RESTART_POLICY % (DB_NAME, "always"))]
subprocess.Popen(command)
command = ["sudo", "su", "-", "root", "-c",
(system.VERTICA_AGENT_SERVICE_COMMAND % "enable")]
subprocess.Popen(command)
except Exception:
LOG.exception("Failed to enable database on boot.")
raise RuntimeError(_("Could not enable database on boot."))
def _disable_db_on_boot(self):
try:
command = (system.SET_RESTART_POLICY % (DB_NAME, "never"))
system.shell_execute(command, system.VERTICA_ADMIN)
command = (system.VERTICA_AGENT_SERVICE_COMMAND % "disable")
system.shell_execute(command)
except exception.ProcessExecutionError:
LOG.exception("Failed to disable database on boot.")
raise RuntimeError(_("Could not disable database on boot."))
def stop_db(self, update_db=False, do_not_start_on_reboot=False):
"""Stop the database."""
LOG.info("Stopping Vertica.")
if do_not_start_on_reboot:
self._disable_db_on_boot()
try:
# Stop vertica-agent service
command = (system.VERTICA_AGENT_SERVICE_COMMAND % "stop")
system.shell_execute(command)
# Using Vertica adminTools to stop db.
db_password = self._get_database_password()
stop_db_command = (system.STOP_DB % (DB_NAME, db_password))
out, err = system.shell_execute(system.STATUS_ACTIVE_DB,
system.VERTICA_ADMIN)
if out.strip() == DB_NAME:
system.shell_execute(stop_db_command, system.VERTICA_ADMIN)
if not self.status._is_restarting:
if not self.status.wait_for_real_status_to_change_to(
rd_instance.ServiceStatuses.SHUTDOWN,
self.state_change_wait_time, update_db):
LOG.error("Could not stop Vertica.")
self.status.end_restart()
raise RuntimeError(_("Could not stop Vertica!"))
LOG.debug("Database stopped.")
else:
LOG.debug("Database is not running.")
except exception.ProcessExecutionError:
LOG.exception("Failed to stop database.")
raise RuntimeError(_("Could not stop database."))
def start_db(self, update_db=False):
"""Start the database."""
LOG.info("Starting Vertica.")
try:
self._enable_db_on_boot()
# Start vertica-agent service
command = ["sudo", "su", "-", "root", "-c",
(system.VERTICA_AGENT_SERVICE_COMMAND % "start")]
subprocess.Popen(command)
# Using Vertica adminTools to start db.
db_password = self._get_database_password()
start_db_command = ["sudo", "su", "-", system.VERTICA_ADMIN, "-c",
(system.START_DB % (DB_NAME, db_password))]
subprocess.Popen(start_db_command)
if not self.status._is_restarting:
self.status.end_restart()
LOG.debug("Database started.")
except Exception as e:
raise RuntimeError(_("Could not start Vertica due to %s") % e)
def start_db_with_conf_changes(self, config_contents):
"""
Currently all that this method does is to start Vertica. This method
needs to be implemented to enable volume resize on guestagent side.
"""
LOG.info("Starting Vertica with configuration changes.")
if self.status.is_running:
format = 'Cannot start_db_with_conf_changes because status is %s.'
LOG.debug(format, self.status)
raise RuntimeError(format % self.status)
LOG.info("Initiating config.")
self.configuration_manager.save_configuration(config_contents)
self.start_db(True)
def restart(self):
"""Restart the database."""
try:
self.status.begin_restart()
self.stop_db()
self.start_db()
finally:
self.status.end_restart()
def add_db_to_node(self, members=netutils.get_my_ipv4()):
"""Add db to host with admintools"""
LOG.info("Calling admintools to add DB to host")
try:
# Create db after install
db_password = self._get_database_password()
create_db_command = (system.ADD_DB_TO_NODE % (members,
DB_NAME,
db_password))
system.shell_execute(create_db_command, "dbadmin")
except exception.ProcessExecutionError:
# Give vertica some time to get the node up, won't be available
# by the time adminTools -t db_add_node completes
LOG.info("adminTools failed as expected - wait for node")
self.wait_for_node_status()
LOG.info("Vertica add db to host completed.")
def remove_db_from_node(self, members=netutils.get_my_ipv4()):
"""Remove db from node with admintools"""
LOG.info("Removing db from node")
try:
# Create db after install
db_password = self._get_database_password()
create_db_command = (system.REMOVE_DB_FROM_NODE % (members,
DB_NAME,
db_password))
system.shell_execute(create_db_command, "dbadmin")
except exception.ProcessExecutionError:
# Give vertica some time to get the node up, won't be available
# by the time adminTools -t db_add_node completes
LOG.info("adminTools failed as expected - wait for node")
# Give vertica some time to take the node down - it won't be available
# by the time adminTools -t db_add_node completes
self.wait_for_node_status()
LOG.info("Vertica remove host from db completed.")
def create_db(self, members=netutils.get_my_ipv4()):
"""Prepare the guest machine with a Vertica db creation."""
LOG.info("Creating database on Vertica host.")
try:
# Create db after install
db_password = self._get_database_password()
create_db_command = (system.CREATE_DB % (members, DB_NAME,
MOUNT_POINT, MOUNT_POINT,
db_password))
system.shell_execute(create_db_command, system.VERTICA_ADMIN)
except Exception:
LOG.exception("Vertica database create failed.")
raise RuntimeError(_("Vertica database create failed."))
LOG.info("Vertica database create completed.")
def install_vertica(self, members=netutils.get_my_ipv4()):
"""Prepare the guest machine with a Vertica db creation."""
LOG.info("Installing Vertica Server.")
try:
# Create db after install
install_vertica_cmd = (system.INSTALL_VERTICA % (members,
MOUNT_POINT))
system.shell_execute(install_vertica_cmd)
except exception.ProcessExecutionError:
LOG.exception("install_vertica failed.")
raise RuntimeError(_("install_vertica failed."))
self._generate_database_password()
LOG.info("install_vertica completed.")
def update_vertica(self, command, members=netutils.get_my_ipv4()):
LOG.info("Calling update_vertica with command %s", command)
try:
update_vertica_cmd = (system.UPDATE_VERTICA % (command, members,
MOUNT_POINT))
system.shell_execute(update_vertica_cmd)
except exception.ProcessExecutionError:
LOG.exception("update_vertica failed.")
raise RuntimeError(_("update_vertica failed."))
# self._generate_database_password()
LOG.info("update_vertica completed.")
def add_udls(self):
"""Load the user defined load libraries into the database."""
LOG.info("Adding configured user defined load libraries.")
password = self._get_database_password()
loaded_udls = []
for lib in system.UDL_LIBS:
func_name = lib['func_name']
lib_name = lib['lib_name']
language = lib['language']
factory = lib['factory']
path = lib['path']
if os.path.isfile(path):
LOG.debug("Adding the %(func)s library as %(lib)s.",
{'func': func_name, 'lib': lib_name})
out, err = system.exec_vsql_command(
password,
system.CREATE_LIBRARY % (lib_name, path)
)
if err:
if err.is_warning():
LOG.warning(err)
else:
LOG.error(err)
raise RuntimeError(_("Failed to create library %s.")
% lib_name)
out, err = system.exec_vsql_command(
password,
system.CREATE_SOURCE % (func_name, language,
factory, lib_name)
)
if err:
if err.is_warning():
LOG.warning(err)
else:
LOG.error(err)
raise RuntimeError(_("Failed to create source %s.")
% func_name)
loaded_udls.append(func_name)
else:
LOG.warning("Skipping %(func)s as path %(path)s not "
"found.", {"func": func_name, "path": path})
LOG.info("The following UDL functions are available for use: %s",
loaded_udls)
def _generate_database_password(self):
"""Generate and write the password to vertica.cnf file."""
config = configparser.ConfigParser()
config.add_section('credentials')
config.set('credentials', 'dbadmin_password',
utils.generate_random_password())
self.write_config(config)
def write_config(self, config,
unlink_function=os.unlink,
temp_function=tempfile.NamedTemporaryFile):
"""Write the configuration contents to vertica.cnf file."""
LOG.debug('Defining config holder at %s.', system.VERTICA_CONF)
tempfile = temp_function('w', delete=False)
try:
config.write(tempfile)
tempfile.close()
command = (("install -o root -g root -m 644 %(source)s %(target)s"
) % {'source': tempfile.name,
'target': system.VERTICA_CONF})
system.shell_execute(command)
unlink_function(tempfile.name)
except Exception:
unlink_function(tempfile.name)
raise
def read_config(self):
"""Reads and returns the Vertica config."""
try:
config = configparser.ConfigParser()
config.read(system.VERTICA_CONF)
return config
except Exception:
LOG.exception("Failed to read config %s.", system.VERTICA_CONF)
raise RuntimeError
def _get_database_password(self):
"""Read the password from vertica.cnf file and return it."""
return self.read_config().get('credentials', 'dbadmin_password')
def install_if_needed(self, packages):
"""Install Vertica package if needed."""
LOG.info("Preparing Guest as Vertica Server.")
if not packager.pkg_is_installed(packages):
LOG.debug("Installing Vertica Package.")
packager.pkg_install(packages, None, system.INSTALL_TIMEOUT)
def _set_readahead_for_disks(self):
"""This method sets readhead size for disks as needed by Vertica."""
device = volume.VolumeDevice(CONF.device_path)
device.set_readahead_size(CONF.vertica.readahead_size)
LOG.debug("Set readhead size as required by Vertica.")
def prepare_for_install_vertica(self):
"""This method executes preparatory methods before
executing install_vertica.
"""
command = ("VERT_DBA_USR=%s VERT_DBA_HOME=/home/dbadmin "
"VERT_DBA_GRP=%s /opt/vertica/oss/python/bin/python"
" -m vertica.local_coerce" %
(system.VERTICA_ADMIN, system.VERTICA_ADMIN_GRP))
try:
self._set_readahead_for_disks()
system.shell_execute(command)
except exception.ProcessExecutionError:
LOG.exception("Failed to prepare for install_vertica.")
raise
def mark_design_ksafe(self, k):
"""Wrapper for mark_design_ksafe function for setting k-safety """
LOG.info("Setting Vertica k-safety to %s", str(k))
out, err = system.exec_vsql_command(self._get_database_password(),
system.MARK_DESIGN_KSAFE % k)
# Only fail if we get an ERROR as opposed to a warning complaining
# about setting k = 0
if "ERROR" in err:
LOG.error(err)
raise RuntimeError(_("Failed to set k-safety level %s.") % k)
def _create_user(self, username, password, role=None):
"""Creates a user, granting and enabling the given role for it."""
LOG.info("Creating user in Vertica database.")
out, err = system.exec_vsql_command(self._get_database_password(),
system.CREATE_USER %
(username, password))
if err:
if err.is_warning():
LOG.warning(err)
else:
LOG.error(err)
raise RuntimeError(_("Failed to create user %s.") % username)
if role:
self._grant_role(username, role)
def _grant_role(self, username, role):
"""Grants a role to the user on the schema."""
out, err = system.exec_vsql_command(self._get_database_password(),
system.GRANT_TO_USER
% (role, username))
if err:
if err.is_warning():
LOG.warning(err)
else:
LOG.error(err)
raise RuntimeError(_("Failed to grant role %(r)s to user "
"%(u)s.")
% {'r': role, 'u': username})
out, err = system.exec_vsql_command(self._get_database_password(),
system.ENABLE_FOR_USER
% (username, role))
if err:
LOG.warning(err)
def enable_root(self, root_password=None):
"""Resets the root password."""
LOG.info("Enabling root.")
user = models.DatastoreUser.root(password=root_password)
if not self.is_root_enabled():
self._create_user(user.name, user.password, 'pseudosuperuser')
else:
LOG.debug("Updating %s password.", user.name)
try:
out, err = system.exec_vsql_command(
self._get_database_password(),
system.ALTER_USER_PASSWORD % (user.name, user.password))
if err:
if err.is_warning():
LOG.warning(err)
else:
LOG.error(err)
raise RuntimeError(_("Failed to update %s "
"password.") % user.name)
except exception.ProcessExecutionError:
LOG.error("Failed to update %s password.", user.name)
raise RuntimeError(_("Failed to update %s password.")
% user.name)
return user.serialize()
def is_root_enabled(self):
"""Return True if root access is enabled else False."""
LOG.debug("Checking is root enabled.")
try:
out, err = system.shell_execute(system.USER_EXISTS %
(self._get_database_password(),
'root'), system.VERTICA_ADMIN)
if err:
LOG.error(err)
raise RuntimeError(_("Failed to query for root user."))
except exception.ProcessExecutionError:
raise RuntimeError(_("Failed to query for root user."))
return out.rstrip() == "1"
def get_public_keys(self, user):
"""Generates key (if not found), and sends public key for user."""
LOG.debug("Public keys requested for user: %s.", user)
user_home_directory = os.path.expanduser('~' + user)
public_key_file_name = user_home_directory + '/.ssh/id_rsa.pub'
try:
key_generate_command = (system.SSH_KEY_GEN % user_home_directory)
system.shell_execute(key_generate_command, user)
except exception.ProcessExecutionError:
LOG.debug("Cannot generate key.")
try:
read_key_cmd = ("cat %(file)s" % {'file': public_key_file_name})
out, err = system.shell_execute(read_key_cmd)
except exception.ProcessExecutionError:
LOG.exception("Cannot read public key.")
raise
return out.strip()
def authorize_public_keys(self, user, public_keys):
"""Adds public key to authorized_keys for user."""
LOG.debug("public keys to be added for user: %s.", user)
user_home_directory = os.path.expanduser('~' + user)
authorized_file_name = user_home_directory + '/.ssh/authorized_keys'
try:
read_key_cmd = ("cat %(file)s" % {'file': authorized_file_name})
out, err = system.shell_execute(read_key_cmd)
public_keys.append(out.strip())
except exception.ProcessExecutionError:
LOG.debug("Cannot read authorized_keys.")
all_keys = '\n'.join(public_keys) + "\n"
try:
with tempfile.NamedTemporaryFile("w", delete=False) as tempkeyfile:
tempkeyfile.write(all_keys)
copy_key_cmd = (("install -o %(user)s -m 600 %(source)s %(target)s"
) % {'user': user, 'source': tempkeyfile.name,
'target': authorized_file_name})
system.shell_execute(copy_key_cmd)
os.remove(tempkeyfile.name)
except exception.ProcessExecutionError:
LOG.exception("Cannot install public keys.")
os.remove(tempkeyfile.name)
raise
def _export_conf_to_members(self, members):
"""This method exports conf files to other members."""
try:
for member in members:
COPY_CMD = (system.SEND_CONF_TO_SERVER % (system.VERTICA_CONF,
member,
system.VERTICA_CONF))
system.shell_execute(COPY_CMD)
except exception.ProcessExecutionError:
LOG.exception("Cannot export configuration.")
raise
def install_cluster(self, members):
"""Installs & configures cluster."""
cluster_members = ','.join(members)
LOG.debug("Installing cluster with members: %s.", cluster_members)
self.install_vertica(cluster_members)
self._export_conf_to_members(members)
LOG.debug("Creating database with members: %s.", cluster_members)
self.create_db(cluster_members)
LOG.debug("Cluster configured on members: %s.", cluster_members)
def grow_cluster(self, members):
"""Adds nodes to cluster."""
cluster_members = ','.join(members)
LOG.debug("Growing cluster with members: %s.", cluster_members)
self.update_vertica("--add-hosts", cluster_members)
self._export_conf_to_members(members)
LOG.debug("Creating database with members: %s.", cluster_members)
self.add_db_to_node(cluster_members)
LOG.debug("Cluster configured on members: %s.", cluster_members)
def shrink_cluster(self, members):
"""Removes nodes from cluster."""
cluster_members = ','.join(members)
LOG.debug("Shrinking cluster with members: %s.", cluster_members)
self.remove_db_from_node(cluster_members)
self.update_vertica("--remove-hosts", cluster_members)
def wait_for_node_status(self, status='UP'):
"""Wait until all nodes are the same status"""
# select node_state from nodes where node_state <> 'UP'
def _wait_for_node_status():
out, err = system.exec_vsql_command(self._get_database_password(),
system.NODE_STATUS % status)
LOG.debug("Polled vertica node states: %s", out)
if err:
LOG.error(err)
raise RuntimeError(_("Failed to query for root user."))
return "0 rows" in out
try:
utils.poll_until(_wait_for_node_status, time_out=600,
sleep_time=15)
except exception.PollTimeOut:
raise RuntimeError(_("Timed out waiting for cluster to "
"change to status %s") % status)
| 45.048465 | 79 | 0.58146 | 26,031 | 0.933513 | 0 | 0 | 0 | 0 | 0 | 0 | 6,792 | 0.243572 |
7dc4cee1dbb027b9999c91c7ea99faa307db2e19 | 15,179 | py | Python | waymo_open_dataset/waymo_detection_dataset.py | abahnasy/IDP | c131a597ad72105f67f0ff8850f4eb8275a9800b | [
"MIT"
]
| null | null | null | waymo_open_dataset/waymo_detection_dataset.py | abahnasy/IDP | c131a597ad72105f67f0ff8850f4eb8275a9800b | [
"MIT"
]
| null | null | null | waymo_open_dataset/waymo_detection_dataset.py | abahnasy/IDP | c131a597ad72105f67f0ff8850f4eb8275a9800b | [
"MIT"
]
| null | null | null | """ Waymo dataset with votes.
Author: Ahmed Bahnasy
Date: 2020
"""
import os
import sys
import numpy as np
import pickle
from torch.utils.data import Dataset
import scipy.io as sio # to load .mat files for depth points
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
# ROOT_DIR = os.path.dirname(BASE_DIR)
sys.path.append(BASE_DIR)
sys.path.append(os.path.join(BASE_DIR, '..', 'utils'))
from box_util import get_corners_from_labels_array
import pc_util
import waymo_utils
from model_util_waymo import WaymoDatasetConfig
DC = WaymoDatasetConfig() # dataset specific config
MAX_NUM_OBJ = 128 # maximum number of objects allowed per scene
# RAW_LABELS = {0: 'TYPE_UNKNOWN', 1: 'TYPE_VEHICLE' , 2: 'TYPE_PEDESTRIAN', 3: 'TYPE_SIGN', 4: 'TYPE_CYCLIST'}
class WaymoDetectionVotesDataset(Dataset):
def __init__(self, split_set='train', num_points=180000,
use_height=False,
augment=False,
verbose:bool = True):
# self.mapping_labels = {1:0,2:1,4:2} # map dataset labels to our labels to handle discarded classes
# self.excluded_labels = [0,3] # exclude unknowns and signs labels
self.split_set = split_set
self.type2class = {0: 'TYPE_UNKNOWN', 1: 'TYPE_VEHICLE' , 2: 'TYPE_PEDESTRIAN', 3: 'TYPE_SIGN', 4: 'TYPE_CYCLIST'}
self.class2type = {self.type2class[t]:t for t in self.type2class}
self.classes = ['TYPE_VEHICLE'] #, 'TYPE_PEDESTRIAN', 'TYPE_CYCLIST']
self.data_path = os.path.join(BASE_DIR,
'dataset') # TODO: rename to votes data path
# self.raw_data_path = os.path.join(BASE_DIR, 'dataset')
# access segments dictionary list
# load segments_dict_list dictionary
self.segments_dict_list_path = os.path.join(self.data_path, split_set, 'segments_dict_list')
if not os.path.exists(self.segments_dict_list_path):
raise ValueError('segments Dictionary list is not found, make sure to preprocess the data first')
with open(self.segments_dict_list_path, 'rb') as f:
self.segments_dict_list = pickle.load(f)
self.num_segments = len(self.segments_dict_list)
if verbose: print("No of segments in the dataset is {}".format(len(self.segments_dict_list)))
self.num_frames = 0
for segment_dict in self.segments_dict_list:
# add total number of frames in every segment
self.num_frames += segment_dict['frame_count']
# self.scan_names = sorted(list(set([os.path.basename(x).split("_")[1].split('.')[0] for x in os.listdir(os.path.join(self.data_path, 'training', 'votes'))])))
self.num_points = num_points
self.augment = augment
self.use_height = use_height
def __len__(self):
return self.num_frames
def resolve_idx_to_frame_path(self, idx):
''' Get Global idx and transorm into segment frame idx
'''
frame_idx = idx
for segment_dict in self.segments_dict_list:
if frame_idx >= segment_dict['frame_count']:
frame_idx -= segment_dict['frame_count']
else:
frames_list = os.listdir(os.path.join(self.data_path, self.split_set, segment_dict['id']))
frame_path = os.path.join(self.data_path, self.split_set, segment_dict['id'], frames_list[frame_idx])
if not os.path.exists(frame_path):
raise ValueError("Frame path doesn't exist, error in idx_to_frame_path function")
return frame_path
def filtrate_objects(self, labels):
'''
obje_list Nx8 array contains all annotated objects
'''
type_whitelist = [self.class2type[i] for i in self.classes]
# remove unwanted classes
rows_to_be_deleted = []
for i in range(labels.shape[0]):
if not labels[i,0] in type_whitelist:
rows_to_be_deleted.append(i)
labels = np.delete(labels, rows_to_be_deleted, 0)
return labels
def __getitem__(self, idx):
"""
Returns a dict with following keys:
point_clouds: (N,3+C)
center_label: (MAX_NUM_OBJ,3) for GT box center XYZ
heading_class_label: (MAX_NUM_OBJ,) with int values in 0,...,NUM_HEADING_BIN-1
heading_residual_label: (MAX_NUM_OBJ,)
size_classe_label: (MAX_NUM_OBJ,) with int values in 0,...,NUM_SIZE_CLUSTER
size_residual_label: (MAX_NUM_OBJ,3)
sem_cls_label: (MAX_NUM_OBJ,) semantic class index
box_label_mask: (MAX_NUM_OBJ) as 0/1 with 1 indicating a unique box
vote_label: (N,9) with votes XYZ (3 votes: X1Y1Z1, X2Y2Z2, X3Y3Z3)
if there is only one vote than X1==X2==X3 etc.
vote_label_mask: (N,) with 0/1 with 1 indicating the point
is in one of the object's OBB.
scan_idx: int scan index in scan_names list
max_gt_bboxes: unused
"""
frame_data_path = self.resolve_idx_to_frame_path(idx)
segment_id = frame_data_path.split('/')[-2]
frame_idx = frame_data_path.split('/')[-1].split('_')[-1].split('.')[0]
# print('data idx is ', idx)
# print('extracted segment id is ', segment_id)
# print('extracted frame idx is ', frame_idx)
# print("path is ", frame_data_path)
point_cloud = np.load(os.path.join(self.data_path, self.split_set, 'votes', '{}'.format(segment_id), '{}_{}_pc.npz'.format(segment_id, frame_idx)))['pc'] # Nx3
if not os.path.exists(os.path.join(self.data_path, self.split_set, 'votes', '{}'.format(segment_id), '{}_{}_pc.npz'.format(segment_id, frame_idx))):
print('this path does not exist !!')
print(os.path.join(self.data_path, self.split_set, 'votes', '{}'.format(segment_id), '{}_{}_pc.npz'.format(segment_id, frame_idx)))
assert point_cloud.shape[1] == 3
frame_data_path = os.path.join(self.data_path, self.split_set,'{}'.format(segment_id) ,'{}_{}.npz'.format(segment_id, frame_idx))
frame_data = np.load(frame_data_path)
labels = frame_data['labels']
assert labels.shape[1] == 8
# print('labels types before filterations ', labels[:,0])
labels = self.filtrate_objects(labels)
# print('labels types after filterations ', labels[:,0])
# create bboxes matrix
bboxes = np.zeros_like(labels)
for i in range(labels.shape[0]):
# if labels[i,0] in self.excluded_labels: # skip signs and unknown labels
# continue
bboxes[i, 0:3] = labels[i,4:7] #centers
bboxes[i, 3:6] = labels[i,1:4] #lwh
bboxes[i, 6] = labels[i,7] # heading
bboxes[i, 7] = DC.raw2used_labels[labels[i,0]] #label
point_votes = np.load(os.path.join(self.data_path, self.split_set, 'votes', '{}'.format(segment_id) ,'{}_{}_votes.npz'.format(segment_id, frame_idx)))['point_votes'] # Nx10
assert point_votes.shape[1] == 10
point_cloud = point_cloud[:,0:3]
if self.use_height:
floor_height = np.percentile(point_cloud[:,2],0.99)
height = point_cloud[:,2] - floor_height
point_cloud = np.concatenate([point_cloud, np.expand_dims(height, 1)],1) # (N,4)
# ------------------------------- DATA AUGMENTATION ------------------------------
if self.augment:
raise NotImplementedError
# Rotation along up-axis/Z-axis
rot_angle = (np.random.random()*np.pi/3) - np.pi/6 # -30 ~ +30 degree
rot_mat = waymo_utils.rotz(rot_angle)
point_votes_end = np.zeros_like(point_votes)
point_votes_end[:,1:4] = np.dot(point_cloud[:,0:3] + point_votes[:,1:4], np.transpose(rot_mat))
point_votes_end[:,4:7] = np.dot(point_cloud[:,0:3] + point_votes[:,4:7], np.transpose(rot_mat))
point_votes_end[:,7:10] = np.dot(point_cloud[:,0:3] + point_votes[:,7:10], np.transpose(rot_mat))
point_cloud[:,0:3] = np.dot(point_cloud[:,0:3], np.transpose(rot_mat))
bboxes[:,0:3] = np.dot(bboxes[:,0:3], np.transpose(rot_mat))
bboxes[:,6] -= rot_angle
point_votes[:,1:4] = point_votes_end[:,1:4] - point_cloud[:,0:3]
point_votes[:,4:7] = point_votes_end[:,4:7] - point_cloud[:,0:3]
point_votes[:,7:10] = point_votes_end[:,7:10] - point_cloud[:,0:3]
# Augment point cloud scale: 0.85x-1.15x
scale_ratio = np.random.random()*0.3+0.85
scale_ratio = np.expand_dims(np.tile(scale_ratio,3),0)
point_cloud[:,0:3] *= scale_ratio
bboxes[:,0:3] *= scale_ratio
bboxes[:,3:6] *= scale_ratio
point_votes[:,1:4] *= scale_ratio
point_votes[:,4:7] *= scale_ratio
point_votes[:,7:10] *= scale_ratio
if self.use_height:
point_cloud[:,-1] *= scale_ratio[0,0]
# ------------------------------- LABELS ------------------------------
box3d_centers = np.zeros((MAX_NUM_OBJ, 3))
box3d_sizes = np.zeros((MAX_NUM_OBJ, 3))
angle_classes = np.zeros((MAX_NUM_OBJ,))
angle_residuals = np.zeros((MAX_NUM_OBJ,))
size_classes = np.zeros((MAX_NUM_OBJ,))
size_residuals = np.zeros((MAX_NUM_OBJ, 3))
label_mask = np.zeros((MAX_NUM_OBJ))
label_mask[0:bboxes.shape[0]] = 1
max_bboxes = np.zeros((MAX_NUM_OBJ, 8))
max_bboxes[0:bboxes.shape[0],:] = bboxes
for i in range(bboxes.shape[0]):
bbox = bboxes[i]
semantic_class = bbox[7]
box3d_center = bbox[0:3]
angle_class, angle_residual = DC.angle2class(bbox[6])
# NOTE: The mean size stored in size2class is of full length of box edges,
# while in sunrgbd_data.py data dumping we dumped *half* length l,w,h.. so have to time it by 2 here
box3d_size = bbox[3:6]
size_class, size_residual = DC.size2class(box3d_size, DC.class2type[semantic_class])
box3d_centers[i,:] = box3d_center
angle_classes[i] = angle_class
angle_residuals[i] = angle_residual
size_classes[i] = size_class
size_residuals[i] = size_residual
box3d_sizes[i,:] = box3d_size
target_bboxes_mask = label_mask
target_bboxes = np.zeros((MAX_NUM_OBJ, 6))
for i in range(bboxes.shape[0]):
bbox = bboxes[i]
corners_3d = np.transpose(get_corners_from_labels_array(bbox)) # 8 x 3
# import pdb; pdb.set_trace()
# compute axis aligned box
xmin = np.min(corners_3d[:,0])
ymin = np.min(corners_3d[:,1])
zmin = np.min(corners_3d[:,2])
xmax = np.max(corners_3d[:,0])
ymax = np.max(corners_3d[:,1])
zmax = np.max(corners_3d[:,2])
target_bbox = np.array([(xmin+xmax)/2, (ymin+ymax)/2, (zmin+zmax)/2, xmax-xmin, ymax-ymin, zmax-zmin])
target_bboxes[i,:] = target_bbox
point_cloud, choices = pc_util.random_sampling(point_cloud, self.num_points, return_choices=True)
point_votes_mask = point_votes[choices,0]
point_votes = point_votes[choices,1:]
ret_dict = {}
ret_dict['point_clouds'] = point_cloud.astype(np.float32)
ret_dict['center_label'] = target_bboxes.astype(np.float32)[:,0:3]
ret_dict['heading_class_label'] = angle_classes.astype(np.int64)
ret_dict['heading_residual_label'] = angle_residuals.astype(np.float32)
ret_dict['size_class_label'] = size_classes.astype(np.int64)
ret_dict['size_residual_label'] = size_residuals.astype(np.float32)
target_bboxes_semcls = np.zeros((MAX_NUM_OBJ))
target_bboxes_semcls[0:bboxes.shape[0]] = bboxes[:,-1] # from 0 to 4
ret_dict['sem_cls_label'] = target_bboxes_semcls.astype(np.int64)
ret_dict['box_label_mask'] = target_bboxes_mask.astype(np.float32)
ret_dict['vote_label'] = point_votes.astype(np.float32)
ret_dict['vote_label_mask'] = point_votes_mask.astype(np.int64)
# ret_dict['scan_idx'] = np.array(idx).astype(np.int64) # TODO: wrong indicator, add frame name and segment name instead
# ret_dict['max_gt_bboxes'] = max_bboxes #ABAHNASY: not used parameter
return ret_dict
def viz_votes(pc, point_votes, point_votes_mask):
""" Visualize point votes and point votes mask labels
pc: (N,3 or 6), point_votes: (N,9), point_votes_mask: (N,)
"""
inds = (point_votes_mask==1)
pc_obj = pc[inds,0:3]
pc_obj_voted1 = pc_obj + point_votes[inds,0:3]
pc_obj_voted2 = pc_obj + point_votes[inds,3:6]
pc_obj_voted3 = pc_obj + point_votes[inds,6:9]
pc_util.write_ply(pc_obj, 'pc_obj.ply')
pc_util.write_ply(pc_obj_voted1, 'pc_obj_voted1.ply')
pc_util.write_ply(pc_obj_voted2, 'pc_obj_voted2.ply')
pc_util.write_ply(pc_obj_voted3, 'pc_obj_voted3.ply')
def viz_obb(pc, label, mask, angle_classes, angle_residuals,
size_classes, size_residuals):
""" Visualize oriented bounding box ground truth
pc: (N,3)
label: (K,3) K == MAX_NUM_OBJ
mask: (K,)
angle_classes: (K,)
angle_residuals: (K,)
size_classes: (K,)
size_residuals: (K,3)
"""
oriented_boxes = []
K = label.shape[0]
for i in range(K):
if mask[i] == 0: continue
obb = np.zeros(7)
obb[0:3] = label[i,0:3]
heading_angle = DC.class2angle(angle_classes[i], angle_residuals[i])
box_size = DC.class2size(size_classes[i], size_residuals[i])
obb[3:6] = box_size
obb[6] = -1 * heading_angle
print(obb)
oriented_boxes.append(obb)
pc_util.write_oriented_bbox(oriented_boxes, 'gt_obbs.ply')
pc_util.write_ply(label[mask==1,:], 'gt_centroids.ply')
def get_sem_cls_statistics():
""" Compute number of objects for each semantic class """
d = WaymoDetectionVotesDataset(use_height=True, augment=False)
sem_cls_cnt = {}
for i in range(len(d)):
if i%10==0: print(i)
sample = d[i]
pc = sample['point_clouds']
sem_cls = sample['sem_cls_label']
mask = sample['box_label_mask']
for j in sem_cls:
if mask[j] == 0: continue
if sem_cls[j] not in sem_cls_cnt:
sem_cls_cnt[sem_cls[j]] = 0
sem_cls_cnt[sem_cls[j]] += 1
print(sem_cls_cnt)
if __name__=='__main__':
d = WaymoDetectionVotesDataset(use_height=True, augment=False)
# for i in range(len(d)):
sample = d[0]
print(sample['vote_label'].shape, sample['vote_label_mask'].shape)
pc_util.write_ply(sample['point_clouds'], 'pc.ply')
viz_votes(sample['point_clouds'], sample['vote_label'], sample['vote_label_mask'])
viz_obb(sample['point_clouds'], sample['center_label'], sample['box_label_mask'],
sample['heading_class_label'], sample['heading_residual_label'],
sample['size_class_label'], sample['size_residual_label'])
| 46.277439 | 180 | 0.620792 | 11,756 | 0.774491 | 0 | 0 | 0 | 0 | 0 | 0 | 4,609 | 0.303643 |
7dc5dc988616aaca00dd30fca002242eb44adc92 | 2,792 | py | Python | smoketests/tests/test_dir_test.py | erlware-deprecated/sinan | 5172974e75f30bd7050b99ff2e6849501008ec44 | [
"MIT"
]
| 7 | 2015-06-03T19:18:39.000Z | 2022-01-26T10:39:03.000Z | smoketests/tests/test_dir_test.py | ericbmerritt/sinan | 36d89008a332e1283fc9f7081165e91fb0547885 | [
"MIT"
]
| 3 | 2020-06-25T04:15:54.000Z | 2020-06-25T04:16:13.000Z | smoketests/tests/test_dir_test.py | ericbmerritt/sinan | 36d89008a332e1283fc9f7081165e91fb0547885 | [
"MIT"
]
| 3 | 2015-11-27T10:33:31.000Z | 2018-07-31T22:56:32.000Z | import unittest
import sin_testing as st
import pexpect
import os
class TestFail(st.SmokeTest):
@st.sinan("build")
def build(self, child, app_desc):
if not os.path.isfile(os.path.join(os.getcwd(),
"test", "test_module.erl")):
raise "Nome module file"
child.expect(pexpect.EOF)
if not os.path.isfile(os.path.join(os.getcwd(),
"_build", app_desc.project_name,
"lib", app_desc.project_name + "-" +
app_desc.project_version, "ebin",
"test_module.beam")):
raise "File Not Built"
def output_testdir(self):
path = os.path.join(os.getcwd(), "test")
try:
os.makedirs(path)
except OSError as exc:
if exc.errno == errno.EEXIST:
pass
else: raise
Module = """
-module(test_module).
-export([test/0]).
test() ->
ok."""
module_file = os.path.join(path, "test_module.erl")
new_file = open(module_file, "w")
new_file.write(Module)
new_file.close()
@st.sinan("gen foo")
def run_custom_gen(self, child, appdesc):
child.expect("your name> ")
child.sendline(appdesc.user_name)
child.expect("your email> ")
child.sendline(appdesc.email)
child.expect('copyright holder \("%s"\)> ' % appdesc.user_name)
child.sendline()
child.expect('project version> ')
child.sendline(appdesc.project_version)
child.expect('Please specify the ERTS version \(".*"\)> ')
child.sendline()
child.expect('Is this a single application project \("n"\)> ')
child.sendline("y")
child.expect('Would you like a build config\? \("y"\)> ')
child.sendline()
child.expect("Project was created, you should be good to go!")
child.expect(pexpect.EOF)
def test_gen_name(self):
appdesc = st.AppDesc(user_name = "Smoke Test Gen",
email = "[email protected]",
copyright_holder = "Smoke Test Copy, LLC.",
# This needs to match the gen name since
# we are overriding it
project_name = "foo",
project_version = "0.134.0.0")
self.run_custom_gen(appdesc)
currentdir = os.getcwd()
projdir = os.path.join(currentdir, appdesc.project_name)
os.chdir(projdir)
self.output_testdir()
self.build(appdesc)
os.chdir(currentdir)
if __name__ == '__main__':
unittest.main()
| 31.370787 | 79 | 0.523997 | 2,676 | 0.958453 | 0 | 0 | 1,439 | 0.515401 | 0 | 0 | 615 | 0.220272 |
7dc85646e762b266d883108a8fd66e58db5c4d2f | 7,362 | py | Python | budgetportal/tests/test_management_commands.py | fluenty/datamanager | 97ba9d58d4527b7d61b730ea4896f09a56e6ae60 | [
"MIT"
]
| null | null | null | budgetportal/tests/test_management_commands.py | fluenty/datamanager | 97ba9d58d4527b7d61b730ea4896f09a56e6ae60 | [
"MIT"
]
| null | null | null | budgetportal/tests/test_management_commands.py | fluenty/datamanager | 97ba9d58d4527b7d61b730ea4896f09a56e6ae60 | [
"MIT"
]
| null | null | null | from budgetportal.models import (
FinancialYear,
Sphere,
Government,
Department,
Programme,
)
from django.core.management import call_command
from django.test import TestCase
from tempfile import NamedTemporaryFile
from StringIO import StringIO
import yaml
class BasicPagesTestCase(TestCase):
def setUp(self):
year = FinancialYear.objects.create(slug="2030-31")
# spheres
national = Sphere.objects.create(financial_year=year, name='National')
provincial = Sphere.objects.create(financial_year=year, name='Provincial')
# governments
self.fake_national_government = Government.objects.create(sphere=national, name='South Africa')
self.fake_provincial_government = Government.objects.create(
sphere=provincial,
name='Free State'
)
def test_load_departments_national(self):
filename = 'budgetportal/tests/test_data/test_management_commands_national_departments.csv'
call_command('load_departments', '2030-31', 'national', filename)
presidency = Department.objects.get(government=self.fake_national_government, name='The Presidency')
self.assertEqual(presidency.vote_number, 1)
self.assertTrue(presidency.is_vote_primary)
self.assertIn("To serve the president", presidency.intro)
self.assertIn("Facilitate a common", presidency.intro)
self.assertTrue(presidency.website_url, 'www.thepresidency.gov.za')
parliament = Department.objects.get(government=self.fake_national_government, vote_number=2)
self.assertEqual(parliament.name, 'Parliament')
self.assertTrue(parliament.is_vote_primary)
self.assertIn("Provide the support services", parliament.intro)
self.assertIn("These are aligned", parliament.intro)
self.assertTrue(parliament.website_url, 'www.parliament.gov.za')
def test_load_departments_provincial(self):
filename = 'budgetportal/tests/test_data/test_management_commands_provincial_departments.csv'
call_command('load_departments', '2030-31', 'provincial', filename)
premier = Department.objects.get(
government=self.fake_provincial_government,
name='Premier'
)
self.assertEqual(premier.vote_number, 1)
self.assertTrue(premier.is_vote_primary)
self.assertIn("Implementing all national legislation within functional areas", premier.intro)
self.assertIn("Leading Free State", premier.intro)
self.assertTrue(premier.website_url, 'www.testpremier.gov.za')
legislature = Department.objects.get(
government=self.fake_provincial_government,
name='Free State Legislature'
)
self.assertEqual(legislature.vote_number, 2)
self.assertTrue(legislature.is_vote_primary)
self.assertIn("The legislative authority of a", legislature.intro)
self.assertIn("The vision of the Free State Legislature", legislature.intro)
self.assertTrue(premier.website_url, 'www.testlegislature.co.za')
class ExportImportProgrammesTestCase(TestCase):
def setUp(self):
self.year = FinancialYear.objects.create(slug="2030-31")
# spheres
national = Sphere.objects.create(financial_year=self.year, name='National')
# governments
south_africa = Government.objects.create(sphere=national, name='South Africa')
self.department = Department.objects.create(
government=south_africa,
name="Some Department",
vote_number=1,
is_vote_primary=True,
intro=""
)
Programme.objects.create(
department=self.department,
name="A programme",
programme_number=1
)
Programme.objects.create(
department=self.department,
name="Another programme",
programme_number=2
)
def test_load_programmes_from_export(self):
"""Test that exported programmes can be loaded correctly"""
with NamedTemporaryFile() as csv_file:
# Download the CSV
response = self.client.get('/2030-31/national/programmes.csv')
self.assertEqual(response.status_code, 200)
csv_file.write(response.content)
csv_file.flush()
# Delete all programmes
Programme.objects.all().delete()
# Create them again
out = StringIO()
result = call_command('load_programmes', '2030-31', 'national', csv_file.name, stdout=out)
result = yaml.load(out.getvalue())
self.assertEqual(result['number_added'], 2)
# Check that it was successful
programme_1 = Programme.objects.get(department=self.department, programme_number=1)
programme_2 = Programme.objects.get(department=self.department, programme_number=2)
self.assertEqual("A programme", programme_1.name)
self.assertEqual("Another programme", programme_2.name)
class ExportImportDepartmentsTestCase(TestCase):
def setUp(self):
self.year = FinancialYear.objects.create(slug="2030-31")
# spheres
national = Sphere.objects.create(financial_year=self.year, name='National')
Sphere.objects.create(financial_year=self.year, name='Provincial')
# governments
self.fake_national_government = Government.objects.create(sphere=national, name='South Africa')
self.department_one = Department.objects.create(
government=self.fake_national_government,
name="Some Department 1",
vote_number=1,
is_vote_primary=True,
intro="",
website_url="test.com"
)
self.department_one = Department.objects.create(
government=self.fake_national_government,
name="Some Department 2",
vote_number=2,
is_vote_primary=False,
intro="",
website_url=None
)
def test_load_departments_from_export(self):
"""Test that exported departments can be loaded correctly
Note: departments export currently do national and provincial, so this only works
because we are not creating any provincial departments prior to exporting. """
with NamedTemporaryFile() as csv_file:
# Download the CSV
response = self.client.get('/2030-31/departments.csv')
self.assertEqual(response.status_code, 200)
csv_file.write(response.content)
csv_file.flush()
# Delete all departments
Department.objects.all().delete()
# Create them again
out = StringIO()
result = call_command('load_departments', '2030-31', 'national', csv_file.name, stdout=out)
result = yaml.load(out.getvalue())
# self.assertEqual(result['number_added'], 2)
# Check that it was successful
dept_1 = Department.objects.get(government=self.fake_national_government, vote_number=1)
dept_2 = Department.objects.get(government=self.fake_national_government, vote_number=2)
self.assertEqual("Some Department 1", dept_1.name)
self.assertEqual("Some Department 2", dept_2.name)
| 40.450549 | 108 | 0.664493 | 7,077 | 0.961288 | 0 | 0 | 0 | 0 | 0 | 0 | 1,690 | 0.229557 |
7dc8bc4931a3fbdfc68cead350a1a5f0c0c77747 | 898 | py | Python | src/fedservice/utils.py | rohe/fedservice | 1460d21217b804cac0f38fa26ffa24bee7cf6dad | [
"Apache-2.0"
]
| 3 | 2018-11-28T12:01:31.000Z | 2020-12-16T21:43:29.000Z | src/fedservice/utils.py | peppelinux/fedservice | 0dc5fd0bd33e181b6a1a9bbef6835b2ce5d2f568 | [
"Apache-2.0"
]
| 13 | 2020-02-10T15:33:37.000Z | 2022-02-01T16:43:36.000Z | src/fedservice/utils.py | peppelinux/fedservice | 0dc5fd0bd33e181b6a1a9bbef6835b2ce5d2f568 | [
"Apache-2.0"
]
| 4 | 2019-05-29T10:04:48.000Z | 2020-10-14T09:52:53.000Z | import json
import logging
import ssl
import sys
from oidcrp.exception import ResponseError
logger = logging.getLogger(__name__)
def load_json(file_name):
with open(file_name) as fp:
js = json.load(fp)
return js
def fed_parse_response(instance, info, sformat="", state="", **kwargs):
if sformat in ['jose', 'jws', 'jwe']:
resp = instance.post_parse_response(info, state=state)
if not resp:
logger.error('Missing or faulty response')
raise ResponseError("Missing or faulty response")
return resp
else:
return instance.parse_response(info, sformat, state, **kwargs)
def compact(qsdict):
res = {}
for key, val in qsdict.items():
if isinstance(val, int):
res[key] = val
elif len(val) == 1:
res[key] = val[0]
else:
res[key] = val
return res | 22.45 | 71 | 0.609131 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 76 | 0.084633 |
7dc8ee9b4d42b514db819f378b41dbecf850d307 | 3,884 | py | Python | plugins/rd_bot.py | deg4uss3r/rd_bot | f82a929b59331d2ee67067369cd6ffe92a4fa7e6 | [
"MIT"
]
| null | null | null | plugins/rd_bot.py | deg4uss3r/rd_bot | f82a929b59331d2ee67067369cd6ffe92a4fa7e6 | [
"MIT"
]
| 3 | 2016-09-17T09:51:49.000Z | 2019-11-13T20:58:37.000Z | plugins/rd_bot.py | deg4uss3r/rd_bot | f82a929b59331d2ee67067369cd6ffe92a4fa7e6 | [
"MIT"
]
| null | null | null | from __future__ import unicode_literals
import requests
import json
import os
import sys
outputs = []
def get_lat_lng(city):
try:
googleclientSecretFile = open('google_api_key', 'r')
GCLIENTSECRET = googleclientSecretFile.read()
GCLIENTSECRET = GCLIENTSECRET[:-1]
except:
print("fatal error reading google API key")
sys.exit(1)
addr = city.split(' ')
address=''
for i in addr:
address+=i
address+='+'
try:
google_url = 'https://maps.googleapis.com/maps/api/geocode/json?address=' + address + '&key=' + GCLIENTSECRET
g_block = requests.get(google_url)
g = g_block.json()
except:
print("fatal error with google maps request")
sys.exit(1)
g_lat = g['results'][0]['geometry']['location']['lat']
g_lng = g['results'][0]['geometry']['location']['lng']
return g_lat, g_lng
def get_weather(city):
lat,lng = get_lat_lng(city)
APPID = '181c98fe0d98b16f927103e0e0963ef5'
OWM_URL = 'http://api.openweathermap.org/data/2.5/weather?&lat=' + str(lat) + '&lon=' + str(lng) + '&units=imperial&APPID='+APPID
try:
r_block = requests.get(OWM_URL)
r = r_block.json()
except:
print("fatal error with Open Weather Request")
sys.exit(1)
temp = r['main']['temp']
country = r['sys']['country']
city_name = r['name']
response = " Current weather for " + city_name + ", " + country + " " + str(temp)
return response
def get_beers(city):
try:
clientSecretFile = open('untapped-private-api', 'r')
CLIENTSECRET = clientSecretFile.read()
CLIENTSECRET = CLIENTSECRET[:-1]
except:
print("fatal error reading untappd API key")
sys.exit(1)
lat,lng = get_lat_lng(city)
try:
UNTAPPD = 'https://api.untappd.com/v4/thepub/local/?&client_id=189BD8671F3124A796C4B9C78BB8FED66DA4C4C9&client_secret='+CLIENTSECRET+'&radius=2&lat=' + str(lat) + '&lng=' + str(lng)
except:
print("fatal error with untappd request")
sys.ext(1)
try:
b_block = requests.get(UNTAPPD)
b = b_block.json()
beer_list = b['response']['checkins']['items']
except:
print("fatal error with parsing untappd response")
sys.exit(1)
beer_return_list = []
beer_exists_flag = False
for i in beer_list:
beer_sentence = "* *" + i['beer']['beer_name'] + "* (" + i['beer']['beer_style'] + ") by *" + i['brewery']['brewery_name'] + "* at *" + i['venue']['venue_name'] + "* on " + i['created_at'][5:-6]
beer_sentence.encode('UTF-8')
beer_name = i['beer']['beer_name']
beer_name.encode('UTF-8')
for b in beer_return_list:
if beer_name in b:
beer_exists_flag = True
else:
beer_exists_flag = False
if beer_exists_flag:
continue
else:
beer_return_list.append(beer_sentence)
return beer_return_list
def process_message(data):
channel = data['channel']
try:
content = data['text']
user = '<@'+data['user']+'>'
except:
outputs.append([channel, "sorry " + user + "something went really wrong"])
print("fatal error parsing slack input")
sys.exit(1)
content = content.lower()
if content[:12] == '<@u2ceq0rr6>' and 'get weather' in content:
city = content[content.index('weather')+8:]
output = user
output += get_weather(city)
outputs.append([channel, output])
if content[:12] == '<@u2ceq0rr6>' and 'get beer' in content:
city = content[content.index('beer')+5:]
beer_list = get_beers(city)
output = user+'\n'
for i in beer_list:
output += i+'\n'
outputs.append([channel, output])
| 28.77037 | 205 | 0.583162 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,080 | 0.278064 |
7dcb26b78425a819dd55aa11592a32323ece117a | 6,855 | py | Python | swss.py | andycranston/swss | 22db6b2e2eb5711d4fd06bd7a094342ad6be8b62 | [
"MIT"
]
| null | null | null | swss.py | andycranston/swss | 22db6b2e2eb5711d4fd06bd7a094342ad6be8b62 | [
"MIT"
]
| null | null | null | swss.py | andycranston/swss | 22db6b2e2eb5711d4fd06bd7a094342ad6be8b62 | [
"MIT"
]
| null | null | null | #! /usr/bin/python3
#
# @(!--#) @(#) swss.py, version 002, 27-july-2018
#
# open a series of home pages and take a screen shot of each one
#
################################################################################################
#
# imports
#
import sys
import os
import argparse
import glob
import shutil
import tempfile
import time
import datetime
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.common.exceptions import *
################################################################################################
def expandips(ip):
octets = ip.split('.')
if len(octets) != 4:
return [ip]
lastoctet = octets[3]
if lastoctet.find('-') == -1:
return [ip]
startend = lastoctet.split('-')
if len(startend) != 2:
return [ip]
try:
start = int(startend[0])
end = int(startend[1])
except ValueError:
return [ip]
iplist = []
while start <= end:
iplist.append("{}.{}.{}.{}".format(octets[0], octets[1], octets[2], start))
start += 1
return iplist
#########################################################################
def readipsfromfile(filename):
global progname
funcname = 'readipsfromfile'
try:
file = open(filename, "r", encoding="utf-8")
except IOError:
print("{}: {}: cannot open IP address/hostname file \"{}\" for reading".format(filename))
sys.exit(2)
iplist = []
for line in file:
line = line.strip()
if line == "":
continue
iplist.append(line)
return iplist
#########################################################################
def leadzeroip(ip):
octets = ip.split('.')
if len(octets) != 4:
return ip
for octet in octets:
try:
dummy = int(octet)
except ValueError:
return ip
return "{:03d}-{:03d}-{:03d}-{:03d}".format(int(octets[0]), int(octets[1]), int(octets[2]), int(octets[3]))
#########################################################################
def csv(s):
r = '"'
for c in s:
if c == '"':
r += '""'
else:
r += c
r += '"'
return r
#########################################################################
def deletefile(filename):
try:
os.remove(filename)
except FileNotFoundError:
pass
return
#########################################################################
def logmsg(logfile, msg):
global progname
print("{}: {}".format(progname, msg))
print("{}: {}".format(datetime.datetime.now(), msg), file=logfile)
logfile.flush()
return
#########################################################################
def swss(ipaddresses, port, logfile, csvfile):
global progname
funcname = 'wss'
logmsg(logfile, "starting FireFox browser")
browser = webdriver.Firefox()
browser.implicitly_wait(10)
browser.set_window_position(0, 0)
browser.maximize_window()
logmsg(logfile, "FireFox started")
for ip in ipaddresses:
# construct URL and port number
if (port == "http") or (port == "80"):
url = "http://{}/".format(ip)
portnum = "80"
elif (port == "https") or (port == "443"):
url = "https://{}/".format(ip)
portnum = "443"
else:
url = "http://{}:{}/".format(ip, port)
portnum = port
# get the url
logmsg(logfile, "getting URL \"{}\" on port number {}".format(url, portnum))
try:
browser.get(url)
logmsg(logfile, "waiting for page load to settle")
time.sleep(2.0)
geterrortext = ""
except WebDriverException as exception:
geterrortext = str(exception).strip()
logmsg(logfile, "error getting URL \"{}\" - \"{}\"".format(url, geterrortext))
# get the page title
try:
title = browser.title
logmsg(logfile, "page title is \"{}\"".format(title))
except UnexpectedAlertPresentException:
title = "<error getting title>"
logmsg(logfile, title)
# take a screenshot
screenshotfilename = 'swss-{}-{}.png'.format(leadzeroip(ip), portnum)
logmsg(logfile, "taking screenshot to file \"{}\"".format(screenshotfilename))
deletefile(screenshotfilename)
try:
browser.save_screenshot(screenshotfilename)
except IOError:
screenshotfilename = "Error taking screenshot to file \"{}\"".format(screenshotfilename)
logmsg(logfile, screenshotfilename)
deletefile(screenshotfilename)
print('{},{},{},{},{},{}'.format(csv(leadzeroip(ip)), csv(portnum), csv(url), csv(title), csv(screenshotfilename), csv(geterrortext)), file=csvfile)
csvfile.flush()
logmsg(logfile, "stopping FireFox browser")
browser.quit()
logmsg(logfile, "FireFox stopped")
return
################################################################################################
def main():
funcname = 'main'
parser = argparse.ArgumentParser()
parser.add_argument("--iplist", help="list of IP addresses to visit", required=True)
parser.add_argument("--logfile", help="log file name", default="swss.log")
parser.add_argument("--csvfile", help="CSV file name", default="swss.csv")
parser.add_argument("--port", help="port (http/https/port#)", default="https")
args = parser.parse_args()
try:
logfile = open(args.logfile, 'w', encoding='utf-8')
except IOError:
print("{}: {}: unable to open log file name \"{}\" for writing".format(progname, funcname, args.logfile))
sys.exit(2)
try:
csvfile = open(args.csvfile, 'w', encoding='utf-8')
except IOError:
print("{}: {}: unable to open CSV file name \"{}\" for writing".format(progname, funcname, args.csvfile))
sys.exit(2)
ipaddresses = []
for ip in args.iplist.split(','):
if ip.find('-') != -1:
ipaddresses.extend(expandips(ip))
elif ip.find('+') == 0:
ipaddresses.extend(readipsfromfile(ip[1:]))
else:
ipaddresses.append(ip)
swss(ipaddresses, args.port, logfile, csvfile)
logfile.close()
csvfile.close()
return 0
##########################################################################
progname = os.path.basename(sys.argv[0])
sys.exit(main())
# end of file
| 26.467181 | 156 | 0.484318 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,946 | 0.28388 |
7dcb91ee413942a4aa1e8d201a6cf906a3130f7f | 1,041 | py | Python | api/migrations/versions/0be658f07ac6_state_consumed.py | eve-git/namex | 130f261500ce595b291d5428c32e1f6cc38ea505 | [
"Apache-2.0"
]
| 4 | 2018-10-05T23:41:05.000Z | 2019-06-19T16:17:50.000Z | api/migrations/versions/0be658f07ac6_state_consumed.py | eve-git/namex | 130f261500ce595b291d5428c32e1f6cc38ea505 | [
"Apache-2.0"
]
| 635 | 2018-05-31T04:12:46.000Z | 2022-03-31T18:45:42.000Z | api/migrations/versions/0be658f07ac6_state_consumed.py | thorwolpert/namex | b9d927774e4c0da0255ca5aaa7ed1890283956fd | [
"Apache-2.0"
]
| 71 | 2018-05-14T20:47:55.000Z | 2022-03-31T23:08:30.000Z | """state consumed
Revision ID: 0be658f07ac6
Revises: bd1e892d0609
Create Date: 2021-07-18 21:26:04.588007
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.sql import table, column
from sqlalchemy import String
# revision identifiers, used by Alembic.
revision = '0be658f07ac6'
down_revision = 'bd1e892d0609'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
# Create an ad-hoc table to use for the insert statement.
states_table = table('states',
column('cd', String),
column('description', String)
)
op.bulk_insert(
states_table,
[
{'cd': 'CONSUMED', 'description': 'CONSUMED by a corp'}
]
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.execute("DELETE FROM states WHERE cd = 'CONSUMED';")
# ### end Alembic commands ###
| 24.785714 | 68 | 0.612872 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 533 | 0.512008 |
7dcbb0c32530744ec259355b1498ba6ed0c58b39 | 2,878 | py | Python | tests/test_pydantic.py | hCaptcha/hmt-basemodels | 5108bf4ddf68d670607daf8d68302c01631c4be2 | [
"MIT"
]
| 3 | 2020-09-08T15:03:31.000Z | 2021-06-30T19:00:45.000Z | tests/test_pydantic.py | humanprotocol/hmt-basemodels | 5108bf4ddf68d670607daf8d68302c01631c4be2 | [
"MIT"
]
| 43 | 2019-02-28T17:43:42.000Z | 2022-02-13T11:37:08.000Z | tests/test_pydantic.py | hCaptcha/hmt-basemodels | 5108bf4ddf68d670607daf8d68302c01631c4be2 | [
"MIT"
]
| 5 | 2019-05-09T15:58:07.000Z | 2020-12-09T23:24:24.000Z | from unittest import TestCase, mock
from copy import deepcopy
from pydantic.error_wrappers import ValidationError
from basemodels.pydantic import Manifest
from basemodels.pydantic.manifest.data.taskdata import TaskDataEntry
SIMPLE = {
"job_mode": "batch",
"request_type": "image_label_multiple_choice",
"requester_accuracy_target": 0.8,
"requester_description": "pyhcaptcha internal_id: 69efdbe1-e586-42f8-bf05-a5745f75402a",
"requester_max_repeats": 7,
"requester_min_repeats": 3,
"requester_question": {"en": "deploy to only certain sites"},
"requester_restricted_answer_set": {"one": {"en": "one"}},
"task_bid_price": -1,
"unsafe_content": False,
"oracle_stake": 0.05,
"recording_oracle_addr": "0x6a0E68eA5F706339dd6bd354F53EfcB5B9e53E49",
"reputation_oracle_addr": "0x6a0E68eA5F706339dd6bd354F53EfcB5B9e53E49",
"reputation_agent_addr": "0x6a0E68eA5F706339dd6bd354F53EfcB5B9e53E49",
"groundtruth_uri": "https://hmt-jovial-lamport.hcaptcha.com/pyhcaptcha-client/taskdata/sha1:bf21a9e8fbc5a3846fb05b4fa0859e0917b2202f.json",
"taskdata_uri": "https://hmt-jovial-lamport.hcaptcha.com/pyhcaptcha-client/taskdata/sha1:97d170e1550eee4afc0af065b78cda302a97674c.json",
"job_total_tasks": 0,
"job_api_key": "417714f0-7ce6-412b-b394-0d2ae58a8c6d",
"restricted_audience": {
"sitekey": [
{"dfe03e7c-f417-4726-8b14-ae033a3cc66e": {"score": 1}},
{"dfe03e7c-f417-4726-8b12-ae033a3cc66a": {"score": 1}},
]
},
}
TASK = {
"task_key": "407fdd93-687a-46bb-b578-89eb96b4109d",
"datapoint_uri": "https://domain.com/file1.jpg",
"datapoint_hash": "f4acbe8562907183a484498ba901bfe5c5503aaa",
"metadata": {
"key_1": "value_1",
"key_2": "value_2",
}
}
class PydanticTest(TestCase):
def setUp(self):
self.m = deepcopy(SIMPLE)
def test_example_err(self):
self.m["requester_question_example"] = []
with self.assertRaises(ValidationError):
Manifest.parse_obj(self.m)
def test_working(self):
Manifest.parse_obj(self.m)
def test_unique_id(self):
m1 = deepcopy(SIMPLE)
m2 = deepcopy(SIMPLE)
self.assertNotEqual(str(Manifest(**m1).job_id), str(Manifest(**m2).job_id))
def test_taskdata(self):
""" Test taskdata """
taskdata = deepcopy(TASK)
TaskDataEntry(**taskdata)
taskdata.get("metadata")["key_1"] = 1.1
TaskDataEntry(**taskdata)
taskdata.get("metadata")["key_1"] = None
TaskDataEntry(**taskdata)
taskdata.get("metadata")["key_1"] = ""
TaskDataEntry(**taskdata)
with self.assertRaises(ValidationError):
taskdata.get("metadata")["key_1"] += 1024 * "a"
TaskDataEntry(**taskdata)
taskdata.pop("metadata")
TaskDataEntry(**taskdata)
| 33.858824 | 143 | 0.673384 | 1,087 | 0.377693 | 0 | 0 | 0 | 0 | 0 | 0 | 1,349 | 0.468728 |
7dcc1a030ac1c718e3ae0328b8bf873af5f2d223 | 1,064 | py | Python | olutils/path.py | OctaveLauby/olutils | 9d0741fe2a3ce527be60be2bf1a6904c3340e488 | [
"Apache-2.0"
]
| 1 | 2020-10-23T17:11:42.000Z | 2020-10-23T17:11:42.000Z | olutils/path.py | OctaveLauby/olutils | 9d0741fe2a3ce527be60be2bf1a6904c3340e488 | [
"Apache-2.0"
]
| 4 | 2019-05-09T12:53:33.000Z | 2020-12-03T13:49:26.000Z | olutils/path.py | OctaveLauby/olutils | 9d0741fe2a3ce527be60be2bf1a6904c3340e488 | [
"Apache-2.0"
]
| null | null | null | from os.path import exists
def get_next_path(path_frmt: str, start: int = 1) -> str:
"""Return next available path based on path_frmt (1 positional-placeholder)"""
return path_frmt.format(get_next_path_index(path_frmt, start=start))
def get_next_path_index(path_frmt: str, start: int = 1) -> int:
"""Get next index of given path format (1 positional-placeholder)
Raises:
ValueError: if path_frmt does not contain one and only one positional-placeholder
such as '{}' or '{:03d}'
"""
try:
# Try some random int to check path_frmt consistency
assert "3823243077" in path_frmt.format(3823243077)
except (IndexError, AssertionError):
# IndexError means more than one placeholder, AssertionError means none
raise ValueError("path_frmt must contain only one positional-placeholder") from None
except KeyError:
raise ValueError("path_frmt must contain no named-placeholder") from None
i = start
while exists(path_frmt.format(i)):
i += 1
return i
| 36.689655 | 92 | 0.68797 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 535 | 0.50282 |
7dccf7de030e74e41a66762279d9a43fa3b28e62 | 63 | py | Python | env/lib/python2.7/site-packages/certifi/__init__.py | wagnermarkd/stationary-hud | 96eb0457e52a7e8a691e8ae101d43353db038f57 | [
"MIT"
]
| 6 | 2021-09-18T07:19:54.000Z | 2021-09-18T07:20:07.000Z | venv/Lib/site-packages/certifi/__init__.py | Airren/mxonline-python | f16c7039b5a8ac7d2d743c83c9f44f77f02e1432 | [
"MIT"
]
| 10 | 2020-01-20T13:52:07.000Z | 2022-03-12T00:12:31.000Z | venv/Lib/site-packages/certifi/__init__.py | Airren/mxonline-python | f16c7039b5a8ac7d2d743c83c9f44f77f02e1432 | [
"MIT"
]
| 1 | 2016-08-24T01:08:34.000Z | 2016-08-24T01:08:34.000Z | from .core import where, old_where
__version__ = "2016.02.28"
| 15.75 | 34 | 0.746032 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 12 | 0.190476 |
7dcd9cbc95d9ac46a0346d6a8f8325d12f3bf6be | 681 | py | Python | setup.py | jacobschaer/qt_compat | 8121500c1fb6f95d3cfff033410e055a187a39c9 | [
"MIT"
]
| null | null | null | setup.py | jacobschaer/qt_compat | 8121500c1fb6f95d3cfff033410e055a187a39c9 | [
"MIT"
]
| null | null | null | setup.py | jacobschaer/qt_compat | 8121500c1fb6f95d3cfff033410e055a187a39c9 | [
"MIT"
]
| null | null | null | from setuptools import setup, find_packages
setup(
name="QtCompat",
version="0.1",
packages=find_packages(),
scripts=[],
# Project uses reStructuredText, so ensure that the docutils get
# installed or upgraded on the target machine
install_requires=[],
package_data={
},
# metadata for upload to PyPI
author="Jacob Schaer",
author_email="",
description="PyQt4, 5 and Pyside Compatibility Library",
license="MIT",
keywords="pyqt4 pyqt5 pyside compatibility",
url="https://github.com/jacobschaer/qt_compat/", # project home page, if any
# could also include long_description, download_url, classifiers, etc.
) | 28.375 | 82 | 0.690162 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 391 | 0.574156 |
7dcde3e58f5df9d50ac28aa03a0ad0516f667fbc | 2,730 | py | Python | test/patterns/joined_validation/test_joined_validation.py | acheshkov/aibolit | eed2fafa9fbc5f3359510cd80fee2ae8311d7ed8 | [
"MIT"
]
| null | null | null | test/patterns/joined_validation/test_joined_validation.py | acheshkov/aibolit | eed2fafa9fbc5f3359510cd80fee2ae8311d7ed8 | [
"MIT"
]
| null | null | null | test/patterns/joined_validation/test_joined_validation.py | acheshkov/aibolit | eed2fafa9fbc5f3359510cd80fee2ae8311d7ed8 | [
"MIT"
]
| null | null | null | import os
from unittest import TestCase
from aibolit.patterns.joined_validation.joined_validation import JoinedValidation
from pathlib import Path
class TestJoinedValidation(TestCase):
dir_path = Path(os.path.realpath(__file__)).parent
pattern = JoinedValidation()
def test_canFindSimpleJoinedValidation(self):
file = Path(self.dir_path, 'SimpleJoinedValidation.java')
self.assertEqual(
[3],
self.pattern.value(file),
'Could not find simple joined validation'
)
def test_canFindJoinedValidationAndOr(self):
file = Path(self.dir_path, 'JoinedValidationAndOr.java')
self.assertEqual(
[3],
self.pattern.value(file),
'Could not find joined validation in AndOr condition'
)
def test_canFindJoinedValidationOrAnd(self):
file = Path(self.dir_path, 'JoinedValidationOrAnd.java')
self.assertEqual(
[3],
self.pattern.value(file),
'Could not find joined validation in OrAnd condition'
)
def test_canFindJoinedValidationOrOr(self):
file = Path(self.dir_path, 'JoinedValidationOrOr.java')
self.assertEqual(
[3],
self.pattern.value(file),
'Could not find joined validation in OrOr condition'
)
def test_canFindJoinedValidationOrFunctionCall(self):
file = Path(self.dir_path, 'JoinedValidationOrFunctionCall.java')
self.assertEqual(
[8],
self.pattern.value(file),
'Could not find joined validation in function call'
)
def test_canFindJoinedValidationOrFieldAccess(self):
file = Path(self.dir_path, 'JoinedValidationOrFieldAccess.java')
self.assertEqual(
[6],
self.pattern.value(file),
'Could not find joined validation in field access'
)
def test_canFindNoBracketsJoinedValidation(self):
file = Path(self.dir_path, 'NoBracketsJoinedValidation.java')
self.assertEqual(
[3],
self.pattern.value(file),
'Could not find joined validation when using no brackets'
)
def test_canSkipEmptyJoinedValidation(self):
file = Path(self.dir_path, 'EmptyJoinedValidation.java')
self.assertEqual(
[],
self.pattern.value(file),
'Could not skip empty joined validation'
)
def test_canSkipNoJoinedValidation(self):
file = Path(self.dir_path, 'NoJoinedValidation.java')
self.assertEqual(
[],
self.pattern.value(file),
'Could not skip when there is no joined validation'
)
| 33.292683 | 81 | 0.630037 | 2,580 | 0.945055 | 0 | 0 | 0 | 0 | 0 | 0 | 719 | 0.26337 |
7dce2ac1c38976edf285448acc824af893571906 | 971 | py | Python | python_structure/data_structures/lists_tuples_dictionaries/tuple_defs.py | bangyen/pascal-triangle | 0831348e93c274bdd38bba5c3aeeda7596ab97ee | [
"MIT"
]
| 1 | 2020-03-11T10:20:53.000Z | 2020-03-11T10:20:53.000Z | python_structure/data_structures/lists_tuples_dictionaries/tuple_defs.py | bangyen/pascal-triangle | 0831348e93c274bdd38bba5c3aeeda7596ab97ee | [
"MIT"
]
| 1 | 2020-07-06T15:45:01.000Z | 2020-07-06T15:50:32.000Z | python_structure/data_structures/lists_tuples_dictionaries/tuple_defs.py | bangyen/pascal-triangle | 0831348e93c274bdd38bba5c3aeeda7596ab97ee | [
"MIT"
]
| 1 | 2020-07-02T05:21:58.000Z | 2020-07-02T05:21:58.000Z | """
Global tuple to avoid make a new one each time a method is called
"""
my_tuple = ("London", 123, 18.2)
def city_tuple_declaration():
city = ("Rome", "London", "Tokyo")
return city
def tuple_get_element(index: int):
try:
element = my_tuple[index]
print(element)
except IndexError:
print("index {} out of range".format(index))
def tuple_has_element(element: str) -> bool:
answer = element in my_tuple
return answer
def tuple_has_not_element(element: str) -> bool:
answer = element not in my_tuple
return answer
def bool_to_string_translator(answer: bool) -> str:
if answer:
return "Yes"
else:
return "No"
if __name__ == '__main__':
main_tuple = city_tuple_declaration()
print(main_tuple)
print(my_tuple)
tuple_get_element(5)
print(bool_to_string_translator(tuple_has_element("London")))
print(bool_to_string_translator(tuple_has_not_element("London")))
| 22.068182 | 69 | 0.676622 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 160 | 0.164779 |
7dcea3fbbfd1ee77dfca864ce3a07a6ca9ff127e | 389 | py | Python | annotations/filters.py | acdh-oeaw/ner-annotator | ee8f72248669b848eb273644d80ad52dc495a07c | [
"MIT"
]
| 1 | 2019-01-02T15:05:30.000Z | 2019-01-02T15:05:30.000Z | annotations/filters.py | acdh-oeaw/ner-annotator | ee8f72248669b848eb273644d80ad52dc495a07c | [
"MIT"
]
| 8 | 2020-02-11T23:02:04.000Z | 2021-06-10T20:39:58.000Z | annotations/filters.py | acdh-oeaw/ner-annotator | ee8f72248669b848eb273644d80ad52dc495a07c | [
"MIT"
]
| 1 | 2019-01-02T15:05:31.000Z | 2019-01-02T15:05:31.000Z | import django_filters
from . models import NerSample
class NerSampleListFilter(django_filters.FilterSet):
text = django_filters.CharFilter(
lookup_expr='icontains',
help_text=NerSample._meta.get_field('text').help_text,
label=NerSample._meta.get_field('text').verbose_name
)
class Meta:
model = NerSample
fields = ['text', 'id']
| 24.3125 | 62 | 0.678663 | 332 | 0.85347 | 0 | 0 | 0 | 0 | 0 | 0 | 33 | 0.084833 |
7dcf866c0422d8f7d07418dae857b071849168bc | 51 | py | Python | m3o_plugin/postcode.py | JustIceQAQ/play_m3o_in_python | 140b1f07cb574d1f0a2890503ae9e73ce3907f2b | [
"MIT"
]
| null | null | null | m3o_plugin/postcode.py | JustIceQAQ/play_m3o_in_python | 140b1f07cb574d1f0a2890503ae9e73ce3907f2b | [
"MIT"
]
| null | null | null | m3o_plugin/postcode.py | JustIceQAQ/play_m3o_in_python | 140b1f07cb574d1f0a2890503ae9e73ce3907f2b | [
"MIT"
]
| null | null | null | # TODO Postcode: https://m3o.com/postcode/overview
| 25.5 | 50 | 0.764706 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 50 | 0.980392 |
7dd0263f686636079c3320a5eff927b93bd01ba9 | 3,016 | py | Python | learning_algorithms/hysteretic_q_matrix.py | swj0418/Reinforcement_Learning_Framework | 1ac6bbe31cee5ea7f1e5f28d8b53aa3985b39db8 | [
"Apache-2.0"
]
| 1 | 2019-07-01T11:47:33.000Z | 2019-07-01T11:47:33.000Z | learning_algorithms/hysteretic_q_matrix.py | swj0418/Reinforcement_Learning_Framework | 1ac6bbe31cee5ea7f1e5f28d8b53aa3985b39db8 | [
"Apache-2.0"
]
| 1 | 2019-04-13T05:46:14.000Z | 2019-04-13T05:46:14.000Z | learning_algorithms/hysteretic_q_matrix.py | swj0418/Reinforcement_Learning_Framework | 1ac6bbe31cee5ea7f1e5f28d8b53aa3985b39db8 | [
"Apache-2.0"
]
| null | null | null | import numpy as np
class Agent:
def __init__(self):
self.q_table = np.zeros(shape=(3, ))
self.rewards = []
self.averaged_rewards = []
self.total_rewards = 0
self.action_cursor = 1
class HystereticAgentMatrix:
def __init__(self, environment, increasing_learning_rate=0.9, decreasing_learning_rate=0.1,
discount_factor=0.9, exploration_rate=0.01):
self.environment = environment
self.discount_factor = discount_factor
self.exploration_rate = exploration_rate
self.increasing_learning_rate = increasing_learning_rate
self.decreasing_learning_rate = decreasing_learning_rate
# Setup q_table
self.num_of_action = self.environment.actions.n
self.states_dim_x = self.environment.states.dim_x
self.states_dim_y = self.environment.states.dim_y
# Agents
self.num_of_agents = 2
self.agents = []
for i in range(self.num_of_agents):
self.agents.append(Agent())
self.steps = 1
def step(self):
actions = []
for agent in self.agents:
# Determine Actions
action = self.get_action(agent)
actions.append(action)
# Take action and update
for agent in self.agents:
# Previous State capture (Previous q value, previous position)
q_p = agent.q_table[agent.action_cursor]
# Take action
obs, reward, done, valid = self.environment.step(action=actions, agent_id=0)
# Update Q-table
bellman_value = reward + self.discount_factor * (np.max(agent.q_table[agent.action_cursor]) - q_p)
if bellman_value >= 0:
new_q = q_p + self.increasing_learning_rate * bellman_value
else:
new_q = q_p + self.decreasing_learning_rate * bellman_value
agent.q_table[agent.action_cursor] = new_q
# self.exploration_rate = self.exploration_rate / self.steps
agent.total_rewards += reward
agent.rewards.append(reward)
if self.steps > 1:
agent.averaged_rewards.append(agent.total_rewards / (self.steps + 5))
self.steps += 1
def set_exploration_rate(self, rate):
self.exploration_rate = rate
def get_action(self, agent):
if np.random.randint(0, 100) / 100 < self.exploration_rate:
# Explore
action = np.random.randint(0, self.num_of_action)
else:
action = np.argmax(agent.q_table)
agent.action_cursor = action
return action
def get_averaged_rewards(self, agent_id=0):
return self.agents[agent_id].averaged_rewards, self.agents[agent_id + 1].averaged_rewards
def get_rewards(self):
return self.agents[0].rewards, self.agents[1].rewards
def reset_reward(self):
for agent in self.agents:
agent.rewards = []
agent.averaged_rewards = [] | 33.511111 | 110 | 0.623342 | 2,994 | 0.992706 | 0 | 0 | 0 | 0 | 0 | 0 | 226 | 0.074934 |
7dd0a1a9133fdf0ceb0199e1c5e7bef38b12567d | 14,581 | py | Python | psiz/keras/layers/kernel.py | asuiconlab/psiz | 4f05348cf43d2d53ff9cc6dee633de385df883e3 | [
"Apache-2.0"
]
| null | null | null | psiz/keras/layers/kernel.py | asuiconlab/psiz | 4f05348cf43d2d53ff9cc6dee633de385df883e3 | [
"Apache-2.0"
]
| null | null | null | psiz/keras/layers/kernel.py | asuiconlab/psiz | 4f05348cf43d2d53ff9cc6dee633de385df883e3 | [
"Apache-2.0"
]
| null | null | null | # -*- coding: utf-8 -*-
# Copyright 2020 The PsiZ Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Module of TensorFlow kernel layers.
Classes:
GroupAttention: A simple group-specific attention layer.
Kernel: A kernel that allows the user to separately specify a
distance and similarity function.
AttentionKernel: A kernel that uses group-specific attention
weights and allows the user to separately specify a distance
and similarity function.
GroupAttentionVariational: A variational group attention layer.
"""
import numpy as np
import tensorflow as tf
from tensorflow.python.keras import backend as K
import psiz.keras.constraints as pk_constraints
import psiz.keras.initializers as pk_initializers
from psiz.keras.layers.variational import Variational
from psiz.keras.layers.distances.minkowski import WeightedMinkowski
from psiz.models.base import GroupLevel
@tf.keras.utils.register_keras_serializable(
package='psiz.keras.layers', name='GroupAttention'
)
class GroupAttention(tf.keras.layers.Layer):
"""Group-specific attention weights."""
def __init__(
self, n_group=1, n_dim=None, fit_group=None,
embeddings_initializer=None, embeddings_regularizer=None,
embeddings_constraint=None, **kwargs):
"""Initialize.
Arguments:
n_dim: An integer indicating the dimensionality of the
embeddings. Must be equal to or greater than one.
n_group (optional): An integer indicating the number of
different population groups in the embedding. A
separate set of attention weights will be inferred for
each group. Must be equal to or greater than one.
fit_group: Boolean indicating if variable is trainable.
shape=(n_group,)
Raises:
ValueError: If `n_dim` or `n_group` arguments are invalid.
"""
super(GroupAttention, self).__init__(**kwargs)
if (n_group < 1):
raise ValueError(
"The number of groups (`n_group`) must be an integer greater "
"than 0."
)
self.n_group = n_group
if (n_dim < 1):
raise ValueError(
"The dimensionality (`n_dim`) must be an integer "
"greater than 0."
)
self.n_dim = n_dim
# Handle initializer.
if embeddings_initializer is None:
if self.n_group == 1:
embeddings_initializer = tf.keras.initializers.Ones()
else:
scale = self.n_dim
alpha = np.ones((self.n_dim))
embeddings_initializer = pk_initializers.RandomAttention(
alpha, scale
)
self.embeddings_initializer = tf.keras.initializers.get(
embeddings_initializer
)
# Handle regularizer.
self.embeddings_regularizer = tf.keras.regularizers.get(
embeddings_regularizer
)
# Handle constraints.
if embeddings_constraint is None:
embeddings_constraint = pk_constraints.NonNegNorm(
scale=self.n_dim
)
self.embeddings_constraint = tf.keras.constraints.get(
embeddings_constraint
)
if fit_group is None:
if self.n_group == 1:
fit_group = False # TODO default should always be train
else:
fit_group = True
self.fit_group = fit_group
self.embeddings = self.add_weight(
shape=(self.n_group, self.n_dim),
initializer=self.embeddings_initializer,
trainable=fit_group, name='w', dtype=K.floatx(),
regularizer=self.embeddings_regularizer,
constraint=self.embeddings_constraint
)
self.mask_zero = False
def call(self, inputs):
"""Call.
Inflate weights by `group_id`.
Arguments:
inputs: A Tensor denoting `group_id`.
"""
output = tf.gather(self.embeddings, inputs)
# Add singleton dimension for sample_size.
output = tf.expand_dims(output, axis=0)
return output
def get_config(self):
"""Return layer configuration."""
config = super().get_config()
config.update({
'n_group': int(self.n_group),
'n_dim': int(self.n_dim),
'fit_group': self.fit_group,
'embeddings_initializer':
tf.keras.initializers.serialize(self.embeddings_initializer),
'embeddings_regularizer':
tf.keras.regularizers.serialize(self.embeddings_regularizer),
'embeddings_constraint':
tf.keras.constraints.serialize(self.embeddings_constraint)
})
return config
@tf.keras.utils.register_keras_serializable(
package='psiz.keras.layers', name='Kernel'
)
class Kernel(GroupLevel):
"""A basic population-wide kernel."""
def __init__(self, distance=None, similarity=None, **kwargs):
"""Initialize."""
super(Kernel, self).__init__(**kwargs)
if distance is None:
distance = WeightedMinkowski()
self.distance = distance
if similarity is None:
similarity = ExponentialSimilarity()
self.similarity = similarity
# Gather all pointers to theta-associated variables.
theta = self.distance.theta
theta.update(self.similarity.theta)
self.theta = theta
self._n_sample = ()
self._kl_weight = 0
@property
def n_sample(self):
return self._n_sample
@n_sample.setter
def n_sample(self, n_sample):
self._n_sample = n_sample
self.distance.n_sample = n_sample
self.similarity.n_sample = n_sample
@property
def kl_weight(self):
return self._kl_weight
@kl_weight.setter
def kl_weight(self, kl_weight):
self._kl_weight = kl_weight
# Set kl_weight of constituent layers. # TODO MAYBE use `_layers`?
self.distance.kl_weight = kl_weight
self.similarity.kl_weight = kl_weight
def call(self, inputs):
"""Call.
Compute k(z_0, z_1), where `k` is the similarity kernel.
Note: Broadcasting rules are used to compute similarity between
`z_0` and `z_1`.
Arguments:
inputs:
z_0: A tf.Tensor denoting a set of vectors.
shape = (batch_size, [n, m, ...] n_dim)
z_1: A tf.Tensor denoting a set of vectors.
shape = (batch_size, [n, m, ...] n_dim)
"""
z_0 = inputs[0]
z_1 = inputs[1]
# group = inputs[-1][:, self.group_level]
# Create identity attention weights.
attention = tf.ones_like(z_0)
# Compute distance between query and references.
dist_qr = self.distance([z_0, z_1, attention])
# Compute similarity.
sim_qr = self.similarity(dist_qr)
return sim_qr
def get_config(self):
"""Return layer configuration."""
config = super().get_config()
config.update({
'distance': tf.keras.utils.serialize_keras_object(self.distance),
'similarity': tf.keras.utils.serialize_keras_object(
self.similarity
),
})
return config
@classmethod
def from_config(cls, config):
"""Create from configuration."""
config['distance'] = tf.keras.layers.deserialize(config['distance'])
config['similarity'] = tf.keras.layers.deserialize(
config['similarity']
)
return cls(**config)
@tf.keras.utils.register_keras_serializable(
package='psiz.keras.layers', name='AttentionKernel'
)
class AttentionKernel(GroupLevel):
"""Attention kernel container."""
def __init__(
self, n_dim=None, attention=None, distance=None, similarity=None,
**kwargs):
"""Initialize.
Arguments:
n_dim: The dimensionality of the attention weights. This
should match the dimensionality of the embedding.
attention: A attention layer. If this is specified, the
argument `n_dim` is ignored.
distance: A distance layer.
similarity: A similarity layer.
"""
super(AttentionKernel, self).__init__(**kwargs)
if attention is None:
attention = GroupAttention(n_dim=n_dim, n_group=1)
self.attention = attention
if distance is None:
distance = WeightedMinkowski()
self.distance = distance
if similarity is None:
similarity = ExponentialSimilarity()
self.similarity = similarity
# Gather all pointers to theta-associated variables.
theta = self.distance.theta
theta.update(self.similarity.theta)
self.theta = theta
self._n_sample = ()
self._kl_weight = 0
def call(self, inputs):
"""Call.
Compute k(z_0, z_1), where `k` is the similarity kernel.
Note: Broadcasting rules are used to compute similarity between
`z_0` and `z_1`.
Arguments:
inputs:
z_0: A tf.Tensor denoting a set of vectors.
shape = (batch_size, [n, m, ...] n_dim)
z_1: A tf.Tensor denoting a set of vectors.
shape = (batch_size, [n, m, ...] n_dim)
group: A tf.Tensor denoting group assignments.
shape = (batch_size, k)
"""
z_0 = inputs[0]
z_1 = inputs[1]
group = inputs[-1]
# Expand attention weights.
attention = self.attention(group[:, self.group_level])
# Add singleton inner dimensions that are not related to sample_size,
# batch_size or vector dimensionality.
attention_shape = tf.shape(attention)
sample_size = tf.expand_dims(attention_shape[0], axis=0)
batch_size = tf.expand_dims(attention_shape[1], axis=0)
dim_size = tf.expand_dims(attention_shape[-1], axis=0)
n_expand = tf.rank(z_0) - tf.rank(attention)
shape_exp = tf.ones(n_expand, dtype=attention_shape[0].dtype)
shape_exp = tf.concat(
(sample_size, batch_size, shape_exp, dim_size), axis=0
)
attention = tf.reshape(attention, shape_exp)
# Compute distance between query and references.
dist_qr = self.distance([z_0, z_1, attention])
# Compute similarity.
sim_qr = self.similarity(dist_qr)
return sim_qr
# @property
# def n_dim(self):
# """Getter method for n_dim."""
# return self.attention.n_dim
@property
def n_sample(self):
return self._n_sample
@n_sample.setter
def n_sample(self, n_sample):
self._n_sample = n_sample
self.attention.n_sample = n_sample
self.distance.n_sample = n_sample
self.similarity.n_sample = n_sample
@property
def kl_weight(self):
return self._kl_weight
@kl_weight.setter
def kl_weight(self, kl_weight):
self._kl_weight = kl_weight
# Set kl_weight of constituent layers. # TODO MAYBE use `_layers`?
self.attention.kl_weight = kl_weight
self.distance.kl_weight = kl_weight
self.similarity.kl_weight = kl_weight
def get_config(self):
"""Return layer configuration."""
config = super().get_config()
config.update({
# 'n_dim': int(self.n_dim),
'attention': tf.keras.utils.serialize_keras_object(self.attention),
'distance': tf.keras.utils.serialize_keras_object(self.distance),
'similarity': tf.keras.utils.serialize_keras_object(
self.similarity
),
})
return config
@classmethod
def from_config(cls, config):
"""Create from configuration."""
config['attention'] = tf.keras.layers.deserialize(config['attention'])
config['distance'] = tf.keras.layers.deserialize(config['distance'])
config['similarity'] = tf.keras.layers.deserialize(
config['similarity']
)
return cls(**config)
@tf.keras.utils.register_keras_serializable(
package='psiz.keras.layers', name='GroupAttentionVariational'
)
class GroupAttentionVariational(Variational):
"""Variational analog of group-specific attention weights."""
def __init__(self, **kwargs):
"""Initialize.
Arguments:
kwargs: Additional key-word arguments.
"""
super(GroupAttentionVariational, self).__init__(**kwargs)
def call(self, inputs):
"""Call.
Grab `group_id` only.
Arguments:
inputs: A Tensor denoting a trial's group membership.
"""
# Run forward pass through variational posterior layer.
outputs = self.posterior(inputs)
# Apply KL divergence between posterior and prior.
self.add_kl_loss(self.posterior.embeddings, self.prior.embeddings)
return outputs
@property
def n_group(self):
"""Getter method for `n_group`"""
# TODO need better decoupling, not all distributions will have loc.
return self.posterior.embeddings.distribution.loc.shape[0]
@property
def n_dim(self):
"""Getter method for `n_group`"""
# TODO need better decoupling, not all distributions will have loc.
return self.posterior.embeddings.distribution.loc.shape[1]
@property
def mask_zero(self):
"""Getter method for embeddings `mask_zero`."""
return self.posterior.mask_zero
@property
def embeddings(self):
"""Getter method for embeddings posterior mode."""
return self.posterior.embeddings
| 32.692825 | 79 | 0.613812 | 12,640 | 0.866882 | 0 | 0 | 13,052 | 0.895138 | 0 | 0 | 5,728 | 0.39284 |
7dd13c6ad4dc8afcb18c82aeecd32fc176c29e34 | 1,261 | py | Python | apps/user/migrations/0005_auto_20190804_1443.py | tiger-fight-tonight/E-Server | 3939bc3f8c090441cc2af17f4e6cb777642fb792 | [
"Apache-2.0"
]
| 6 | 2019-07-18T16:21:17.000Z | 2020-11-19T04:47:02.000Z | apps/user/migrations/0005_auto_20190804_1443.py | tiger-fight-tonight/E-Server | 3939bc3f8c090441cc2af17f4e6cb777642fb792 | [
"Apache-2.0"
]
| null | null | null | apps/user/migrations/0005_auto_20190804_1443.py | tiger-fight-tonight/E-Server | 3939bc3f8c090441cc2af17f4e6cb777642fb792 | [
"Apache-2.0"
]
| null | null | null | # Generated by Django 2.1.7 on 2019-08-04 06:43
import datetime
from django.db import migrations, models
import uuid
class Migration(migrations.Migration):
dependencies = [
('user', '0004_auto_20190804_1438'),
]
operations = [
migrations.AlterField(
model_name='subjectinfo',
name='subject_id',
field=models.CharField(default=uuid.UUID('6c50ec1b-f1b5-426f-8365-7e1962074900'), editable=False, max_length=50, primary_key=True, serialize=False, verbose_name='科目ID'),
),
migrations.AlterField(
model_name='userprofile',
name='create_time',
field=models.DateTimeField(default=datetime.datetime(2019, 8, 4, 14, 43, 45, 491036), verbose_name='创建时间'),
),
migrations.AlterField(
model_name='userprofile',
name='update_time',
field=models.DateTimeField(auto_now=True, verbose_name='更新时间'),
),
migrations.AlterField(
model_name='userprofile',
name='user_id',
field=models.CharField(default=uuid.UUID('ea94d36f-ada5-4e0a-bfbf-e6df269b18de'), editable=False, max_length=50, primary_key=True, serialize=False, verbose_name='用户ID'),
),
]
| 35.027778 | 181 | 0.634417 | 1,164 | 0.905837 | 0 | 0 | 0 | 0 | 0 | 0 | 301 | 0.234241 |
7dd3bf8d2c2f8bbd741f59dd7b443601e8b83316 | 282 | py | Python | scripts/get_plat_name.py | uuosio/gscdk | 995d99948d7090032f76b05656cad29c6cfbb647 | [
"BSD-3-Clause"
]
| 6 | 2021-09-03T09:02:39.000Z | 2022-01-12T06:31:09.000Z | scripts/get_plat_name.py | learnforpractice/gscdk | dc17c43fa2be28500f38897a29bbbd9eb9c7ada7 | [
"BSD-3-Clause"
]
| 1 | 2021-11-01T16:46:09.000Z | 2021-11-04T12:51:45.000Z | scripts/get_plat_name.py | learnforpractice/gscdk | dc17c43fa2be28500f38897a29bbbd9eb9c7ada7 | [
"BSD-3-Clause"
]
| 2 | 2021-11-10T01:56:15.000Z | 2022-01-13T14:27:31.000Z | import platform
#check the platform for linux, macos, windows
if platform.system() == "Linux":
print("manylinux1_x86_64")
elif platform.system() == "Windows":
print("win-amd64")
elif platform.system() == "Darwin":
print("macosx_10_15_x86_64")
else:
print("Unknown")
| 25.636364 | 45 | 0.691489 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 129 | 0.457447 |
7dd3f523efb7218a00299577b756498b0e6e336c | 508 | py | Python | submissions/mirror-reflection/solution.py | Wattyyy/LeetCode | 13a9be056d0a0c38c2f8c8222b11dc02cb25a935 | [
"MIT"
]
| null | null | null | submissions/mirror-reflection/solution.py | Wattyyy/LeetCode | 13a9be056d0a0c38c2f8c8222b11dc02cb25a935 | [
"MIT"
]
| 1 | 2022-03-04T20:24:32.000Z | 2022-03-04T20:31:58.000Z | submissions/mirror-reflection/solution.py | Wattyyy/LeetCode | 13a9be056d0a0c38c2f8c8222b11dc02cb25a935 | [
"MIT"
]
| null | null | null | # https://leetcode.com/problems/mirror-reflection
class Solution:
def mirrorReflection(self, p, q):
if q == 0:
return 0
i = 0
val = 0
while True:
val += q
i += 1
if (i % 2 == 0) and (val % p == 0):
return 2
elif (i % 2 == 1) and (val % (2 * p) == 0):
return 0
elif (i % 2 == 1) and (val % p == 0):
return 1
else:
continue
| 24.190476 | 55 | 0.36811 | 455 | 0.895669 | 0 | 0 | 0 | 0 | 0 | 0 | 49 | 0.096457 |
7dd470fef059403a7425a058aa8ed792b44ec169 | 4,290 | py | Python | sdk/python/kulado_azure/batch/get_account.py | kulado/kulado-azure | f3a408fa0405fe6ae93e0049b2ae0f0e266f1cf6 | [
"ECL-2.0",
"Apache-2.0"
]
| null | null | null | sdk/python/kulado_azure/batch/get_account.py | kulado/kulado-azure | f3a408fa0405fe6ae93e0049b2ae0f0e266f1cf6 | [
"ECL-2.0",
"Apache-2.0"
]
| null | null | null | sdk/python/kulado_azure/batch/get_account.py | kulado/kulado-azure | f3a408fa0405fe6ae93e0049b2ae0f0e266f1cf6 | [
"ECL-2.0",
"Apache-2.0"
]
| null | null | null | # coding=utf-8
# *** WARNING: this file was generated by the Kulado Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import json
import warnings
import kulado
import kulado.runtime
from .. import utilities, tables
class GetAccountResult:
"""
A collection of values returned by getAccount.
"""
def __init__(__self__, account_endpoint=None, location=None, name=None, pool_allocation_mode=None, primary_access_key=None, resource_group_name=None, secondary_access_key=None, storage_account_id=None, tags=None, id=None):
if account_endpoint and not isinstance(account_endpoint, str):
raise TypeError("Expected argument 'account_endpoint' to be a str")
__self__.account_endpoint = account_endpoint
"""
The account endpoint used to interact with the Batch service.
"""
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
__self__.location = location
"""
The Azure Region in which this Batch account exists.
"""
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
__self__.name = name
"""
The Batch account name.
"""
if pool_allocation_mode and not isinstance(pool_allocation_mode, str):
raise TypeError("Expected argument 'pool_allocation_mode' to be a str")
__self__.pool_allocation_mode = pool_allocation_mode
"""
The pool allocation mode configured for this Batch account.
"""
if primary_access_key and not isinstance(primary_access_key, str):
raise TypeError("Expected argument 'primary_access_key' to be a str")
__self__.primary_access_key = primary_access_key
"""
The Batch account primary access key.
"""
if resource_group_name and not isinstance(resource_group_name, str):
raise TypeError("Expected argument 'resource_group_name' to be a str")
__self__.resource_group_name = resource_group_name
if secondary_access_key and not isinstance(secondary_access_key, str):
raise TypeError("Expected argument 'secondary_access_key' to be a str")
__self__.secondary_access_key = secondary_access_key
"""
The Batch account secondary access key.
"""
if storage_account_id and not isinstance(storage_account_id, str):
raise TypeError("Expected argument 'storage_account_id' to be a str")
__self__.storage_account_id = storage_account_id
"""
The ID of the Storage Account used for this Batch account.
"""
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
__self__.tags = tags
"""
A map of tags assigned to the Batch account.
"""
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
__self__.id = id
"""
id is the provider-assigned unique ID for this managed resource.
"""
async def get_account(name=None,resource_group_name=None,opts=None):
"""
Use this data source to access information about an existing Batch Account.
> This content is derived from https://github.com/terraform-providers/terraform-provider-azurerm/blob/master/website/docs/d/batch_account.html.markdown.
"""
__args__ = dict()
__args__['name'] = name
__args__['resourceGroupName'] = resource_group_name
__ret__ = await kulado.runtime.invoke('azure:batch/getAccount:getAccount', __args__, opts=opts)
return GetAccountResult(
account_endpoint=__ret__.get('accountEndpoint'),
location=__ret__.get('location'),
name=__ret__.get('name'),
pool_allocation_mode=__ret__.get('poolAllocationMode'),
primary_access_key=__ret__.get('primaryAccessKey'),
resource_group_name=__ret__.get('resourceGroupName'),
secondary_access_key=__ret__.get('secondaryAccessKey'),
storage_account_id=__ret__.get('storageAccountId'),
tags=__ret__.get('tags'),
id=__ret__.get('id'))
| 44.226804 | 226 | 0.675991 | 2,938 | 0.684848 | 0 | 0 | 0 | 0 | 1,066 | 0.248485 | 1,813 | 0.422611 |
7dd4c10b342878f52f717eef146ce0ddd5328f2c | 1,988 | py | Python | run/run_fd_tgv_conv.py | huppd/PINTimpact | 766b2ef4d2fa9e6727965e48a3fba7b752074850 | [
"MIT"
]
| null | null | null | run/run_fd_tgv_conv.py | huppd/PINTimpact | 766b2ef4d2fa9e6727965e48a3fba7b752074850 | [
"MIT"
]
| null | null | null | run/run_fd_tgv_conv.py | huppd/PINTimpact | 766b2ef4d2fa9e6727965e48a3fba7b752074850 | [
"MIT"
]
| null | null | null | """ running converferce for finite differences and Taylor-Green vortex """
import os
from math import pi
import xml.etree.ElementTree as ET
import platform_paths as pp
import manipulator as ma
# load parameter file
ma.set_ids('../XML/parameterTGVTime.xml')
TREE = ET.parse('../XML/parameterTGVTime.xml')
ROOT = TREE.getroot()
ma.set_parameter(ROOT, 'withoutput', 1)
ma.set_parameter(ROOT, 'initial guess', 'zero')
# ma.set_parameter( ROOT, 'refinement level', 1 )
# make executable ready
EXE = 'peri_navier3DTime'
os.chdir(pp.EXE_PATH)
os.system('make '+EXE+' -j4')
CASE_PATH = ['']*4
RUNS = range(1)
RES = [10]
STS = [0.1, 10., 1.]
NFS = [72]
ma.set_parameter(ROOT, 'nx', 65)
ma.set_parameter(ROOT, 'ny', 65)
ma.set_parameter(ROOT, 'nz', 5)
CASE_PATH[0] = pp.DATA_PATH + '/FDTGV_conv2'
pp.mkdir(CASE_PATH, 0)
for re in RES:
CASE_PATH[1] = '/re_'+str(re)
pp.mkdir(CASE_PATH, 1)
for st in STS:
CASE_PATH[2] = '/a2_'+str(st)
pp.mkdir(CASE_PATH, 2)
for nf in NFS:
CASE_PATH[3] = '/nt_'+str(nf)
pp.mkdir(CASE_PATH, 3)
#
pp.chdir(CASE_PATH, 3)
#
ma.set_parameter(ROOT, 'Re', re)
ma.set_parameter(ROOT, 'alpha2', 2.*pi*st*re)
ma.set_parameter(ROOT, 'nf', nf)
ma.set_parameter(ROOT, 'npx', 1)
ma.set_parameter(ROOT, 'npy', 1)
ma.set_parameter(ROOT, 'npz', 1)
ma.set_parameter(ROOT, 'npf', 12)
TREE.write('parameter3D.xml')
# nptot = npx[i]*npy[i]*npf[i]
nptot = 12
mem = int(max(1024, 60*1024/nptot))
for run in RUNS:
print()
print(CASE_PATH)
exeString = \
pp.exe_pre(nptot, ' -N -R "rusage[mem=' +
str(mem) + ']" -W 6:00', run) + \
pp.EXE_PATH+'/'+EXE
print(exeString)
os.system(exeString)
| 27.611111 | 74 | 0.551308 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 458 | 0.230382 |
7dd56ee0d12643635fe1de9999d5c50d9f66ca84 | 14,617 | py | Python | DCSCN.py | dattv/DCSCN-Tensorflow | eaed09c1d39236617f970b16f555ae88cfa49280 | [
"MIT"
]
| 3 | 2019-06-18T13:04:26.000Z | 2019-06-25T07:59:10.000Z | DCSCN.py | dattv/DCSCN-Tensorflow | eaed09c1d39236617f970b16f555ae88cfa49280 | [
"MIT"
]
| null | null | null | DCSCN.py | dattv/DCSCN-Tensorflow | eaed09c1d39236617f970b16f555ae88cfa49280 | [
"MIT"
]
| null | null | null | """
"""
import logging
import os
import random
import time
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
from helper import loader, utility as util
matplotlib.use("agg")
INPUT_IMAGE_DIR = "input"
INTERPOLATED_IMAGE_DIR = "interpolated"
TRUE_IMAGE_DIR = "true"
class SuperResolution:
def __init__(self, flags, model_name=""):
# model parameters
self.filters = flags.filters # number of filters in first feature extraction CNNs
self.min_filters = flags.min_filters # number of filters in last feature extraction CNNs
self.nin_filters = flags.nin_filters # Number of CNNs filters in A1 at reconstruction network
self.nin_filters2 = flags.nin_filters2 if flags.nin_filters2 != 0 else flags.nin_filters //2
# Number of CNNs filters in B1 and B2 at reconsruction network
self.cnn_size = flags.cnn_size # size of CNNs features
self.last_cnn_size = flags.last_cnn_size# Size of Last CNN filters
self.cnn_stride = 1
self.layers = flags.layers # Number of layers of CNNs
self.nin = flags.nin # Use Network In Network
self.bicubic_init = flags.bicubic_init # make bicubic interpolation values as initial input of x2
self.dropout = flags.dropout # For dropout value for value. Don't use if it's 1.0.
self.activator = flags.activator # Activator. can be [relu, leaky_relu, prelu, sigmoid, tanh]
self.filters_decay_gamma = flags.filters_decay_gamma
# Gamma
# Training parameters
self.initializer = flags.initializer # Initializer for weights can be [uniform, stddev, xavier, he, identity, zero]
self.weight_dev = flags.weight_dev # Initial weight stddev (won't be used when you use he or xavier initializer)
self.l2_decay = flags.l2_decay # l2_decay
self.optimizer = flags.optimizer # Optimizer can be [gd, momentum, adadelta, adagrad, adam, rmsprop]
self.beta1 = flags.beta1 # Beta1 for adam optimizer
self.beta2 = flags.beta2 # Beta2 of adam optimizer
self.momentum = flags.momentum # Momentum for momentum optimizer and rmsprop optimizer
self.batch_num = flags.batch_num # Number of mini-batch images for training
self.batch_image_size = flags.image_size# mage size for mini-batch
if flags.stride_size == 0:
self.stride_size = flags.batch_image_size // 2
else:
self.stride_size = flags.stride_size
# Learning rate control for training
self.initial_lr = flags.initial_lr # Initial learning rate
self.lr_decay = flags.lr_decay # Learning rate decay rate when it does not reduced during specific epoch
self.lr_decay_epoch = flags.lr_decay_epoch
# Decay learning rate when loss does not decrease (5)
# Dataset or Others
self.dataset = flags.dataset # Training dataset dir. [yang91, general100, bsd200]
self.test_dataset = flags.test_dataset # Directory of Test dataset [set5, set14, bsd100, urban100]
# Image Processing Parameters
self.scale = flags.scale # Scale factor for Super Resolution (can be 2 or more)
self.max_value = flags.max_value # For normalize image pixel value
self.chanels = flags.chanels # Number of image channels used. Use only Y of YCbCr when channels=1.
self.jpeg_mode = flags.jpeg_model # Turn on or off jpeg mode when converting from rgb to ycbcr
self.output_channels = self.scale * self.scale
#
# Environment
self.checkpoint_dir = flags.checkpoint_dir
# Directory for checkpoints
self.tf_log_dir = flags.tf_log_dir # Directory for tensorboard log
# Debuging or Logging
self.debug = flags.debug # Display each calculated MSE and weight variables
self.save_loss = flags.save_loss # Save loss
self.save_weights = flags.save_weights # Save weights and biases
self.save_images = flags.save_images # Save CNN weights as images
self.save_images_num = flags.save_images_num
# Number of CNN images saved
self.log_weight_image_num = 32
# initialize variables
self.name = self.get_model_name(model_name)
self.batch_input = self.batch_num * [None]
self.batch_input_quad = np.zeros(
shape=[self.batch_num, self.batch_image_size, self.batch_image_size, self.scale * self.scale]
)
self.batch_true_quad = np.zeros(
shape=[self.batch_num, self.batch_image_size, self.batch_image_size, self.scale * self.scale]
)
self.receptive_fields = 2 * self.layers + self.cnn_size - 2
self.complexity = 0
# initialize environment
util.make_dir(self.checkpoint_dir)
util.make_dir(flags.graph_dir)
util.make_dir(self.tf_log_dir)
if flags.initialise_tf_log:
util.clean_dir(self.tf_log_dir)
util.set_logging(flags.log_filename, stream_log_level=logging.INFO, file_log_level=logging.INFO,
tf_log_level=tf.logging.WARN)
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
self.sess = tf.InteractiveSession(config=config)
self.init_train_step()
logging.info("\nDCSCN -------------------------------")
logging.info("{} [{}]".format(util.get_now_date(), self.name))
def get_model_name(self, model_name):
if model_name is "":
name = "dcscn_L{:d}_F{:d}".format(self.layers, self.filters)
if self.min_filters != 0:
name += "to{:D}".format(self.min_filters)
if self.filters_decay_gamma != 1.0:
name += "_G{:2.2f}".format(self.filters_decay_gamma)
if self.cnn_size != 3:
name += "_C{:d}".format(self.cnn_size)
if self.scale != 2:
name += "_S{:d}".format(self.scale)
if self.nin:
name += "_NIN"
if self.nin_filters != 0:
name += "_A{:d}".format(self.nin_filters)
if self.nin_filters2 != self.nin_filters2 // 2:
name += "_B{:d}".format(self.nin_filters2)
if self.bicubic_init:
name += "_BI"
if self.dropout != 1.0:
name += "_D{:0.2f}".format(self.dropout)
if self.max_value != 255.0:
name += "_M{:2.1f}".format(self.max_value)
if self.activator != "relu":
name += "_{}".format(self.activator)
if self.dataset != "yang91":
name += "_" + self.dataset
if self.batch_image_size != 32:
name += "_B{:d}".format(self.batch_image_size)
if self.last_cnn_size != 1:
name += "_L{:d}".format(self.last_cnn_size)
else:
name = "dcscn_{}".format(model_name)
return name
def load_datasets(self, target, data_dir, batch_dir, batch_image_size, stride_size=0):
print("Loading datasets for [%s]..." % target)
util.make_dir(batch_dir)
if stride_size == 0:
stride_size = batch_image_size // 2
if self.bicubic_init:
resampling_method = "bicubic"
else:
resampling_method = "nearest"
datasets = loader.DataSets(self.scale, batch_image_size, stride_size, channels=self.channels,
jpeg_mode=self.jpeg_mode, max_value=self.max_value,
resampling_method=resampling_method)
if not datasets.is_batch_exist(batch_dir):
datasets.build_batch(data_dir, batch_dir)
if target == "training":
datasets.load_batch_train(batch_dir)
self.train = datasets
else:
datasets.load_batch_test(batch_dir)
self.test = datasets
def init_epoch_index(self):
self.batch_index = random.sample(range(0, self.train.input.count), self.train.input.count)
self.index_in_epoch = 0
self.training_psnr_sum = 0
self.training_step = 0
def build_input_batch(self):
for i in range(self.batch_num):
self.batch_input[i], self.batch_input_bicubic[i], self.batch_true[i] = self.train.load_batch_image(
self.max_value)
def conv2d(self, x, w, stride, bias=None, activator=None, leaky_relu_alpha=0.1, name=""):
conv = tf.nn.conv2d(x, w, strides=[stride, stride, 1, 1], padding="SAME", name=name + "_conv")
self.complexity += int(w.shape[0] * w.shape[1] * w.shape[2] * w.shape[3])
if bias is not None:
conv = tf.add(conv, bias, name=name + "_add")
self.complexity += int(bias.shape[0])
if activator is not None:
if activator == "relu":
conv = tf.nn.relu(conv, name=name + "_relu")
elif activator == "sigmoid":
conv = tf.nn.sigmoid(conv, name=name + "_sigmoid")
elif activator == "tanh":
conv = tf.nn.tanh(conv, name=name + "_tanh")
elif activator == "leaky_relu":
conv = tf.maximum(conv, leaky_relu_alpha * conv, name=name + "_leaky")
elif activator == "prelu":
with tf.variable_scope("prelu"):
alphas = tf.Variable(tf.constant(0.1, shape=[w.get_shape()[3]]), name=name + "_prelu")
if self.save_weights:
util.add_summaries("prelu_alpha", self.name, alphas, save_stddev=False, save_mean=False)
conv = tf.nn.relu(conv) + tf.multiply(alphas, (conv - tf.abs(conv))) * 0.5
else:
raise NameError('Not implemented activator:%s' % activator)
self.complexity += int(bias.shape[0])
return conv
def build_conv_and_bias(self, name, input_tensor, cnn_size, input_feature_num, output_feature_num,
use_activation=True, use_dropout=True):
with tf.variable_scope(name):
w = util.weight([cnn_size, cnn_size, input_feature_num, output_feature_num],
stddev=self.weight_dev, name="conv_W", initializer=self.initializer)
b = util.bias([output_feature_num], name="conv_B")
h = self.conv2d(input_tensor, w, self.cnn_stride, bias=b,
activator=self.activator if use_activation else None,
name=name)
if use_dropout and self.dropout != 1.e0:
h = tf.nn.dropout(h, self.dropout_input, name="dropout")
if self.save_weights:
util.add_summaries("weight", self.name, w, save_stddev=True, save_mean=True)
util.add_summaries("bias", self.name, b, save_stddev=True, save_mean=True)
if self.save_images and cnn_size > 1 and input_feature_num == 1:
weight_transposed = tf.transpose(w, [3, 0, 1, 2])
with tf.name_scope("image"):
tf.summary.image(self.name, weight_transposed, max_outputs=self.log_weight_image_num)
return w, b, h
def build_conv(self, name, input_tensor, cnn_size, input_feature_num, output_feature_num):
with tf.variable_scope(name):
w = util.weight([cnn_size, cnn_size, input_feature_num, output_feature_num],
stddev=self.weight_dev, name="conv_W", initializer=self.initializer)
h = self.conv2d(input_tensor, w, self.cnn_stride, bias=None, activator=None, name=name)
if self.save_weights:
util.add_summaries("weight", self.name, w, save_stddev=True, save_mean=True)
if self.save_images and cnn_size > 1 and input_feature_num == 1:
weight_transposed = tf.transpose(w, [3, 0, 1, 2])
with tf.name_scope("image"):
tf.summary.image(self.name, weight_transposed, max_outputs=self.log_weight_image_num)
return w, h
def build_input_batch(self, batch_dir):
for i in range(self.batch_num):
if self.index_in_epoch >= self.train.input.count:
self.init_epoch_index()
self.epochs_completed += 1
image_no = self.batch_index[self.index_in_epoch]
self.batch_input[i] = util.load_image(batch_dir + "/" + INPUT_IMAGE_DIR + "/%06d.bmp" % image_no,
print_console=False)
batch_input_quad = util.load_image(batch_dir + "/" + INTERPOLATED_IMAGE_DIR + "/%06d.bmp" % image_no,
print_console=False)
loader.convert_to_multi_channel_image(self.batch_input_quad[i], batch_input_quad, self.scale)
batch_true_quad = util.load_image(batch_dir + "/" + TRUE_IMAGE_DIR + "/%06d.bmp" % image_no,
print_console=False)
loader.convert_to_multi_channel_image(self.batch_true_quad[i], batch_true_quad, self.scale)
self.index_in_epoch += 1
def init_train_step(self):
self.lr = self.initial_lr
self.csv_epochs = []
self.csv_psnr = []
self.csv_training_psnr = []
self.epochs_completed = 0
self.min_validation_mse = -1
self.min_validation_epoch = -1
self.step = 0
self.start_time = time.time()
def end_train_step(self):
self.total_time = time.time() - self.start_time
def print_steps_completed(self, output_to_logging=False):
if self.step == 0:
return
processing_time = self.total_time / self.step
h = self.total_time // (60 * 60)
m = (self.total_time - h * 60 * 60) // 60
s = (self.total_time - h * 60 * 60 - m * 60)
status = "Finished at Total Epoch:%d Steps:%s Time:%02d:%02d:%02d (%2.3fsec/step)" % (
self.epochs_completed, "{:,}".format(self.step), h, m, s, processing_time)
if output_to_logging:
logging.info(status)
else:
print(status)
| 46.256329 | 126 | 0.59205 | 14,301 | 0.978381 | 0 | 0 | 0 | 0 | 0 | 0 | 2,429 | 0.166176 |
7dd643437e0865cafce1491b350b4e99be342f2c | 27 | py | Python | tests/tests.py | cjapp/tkinter_simpleEncodeDecode | 15520d73c51bb1a6a316414b2e8fb50b7be8f942 | [
"MIT"
]
| null | null | null | tests/tests.py | cjapp/tkinter_simpleEncodeDecode | 15520d73c51bb1a6a316414b2e8fb50b7be8f942 | [
"MIT"
]
| null | null | null | tests/tests.py | cjapp/tkinter_simpleEncodeDecode | 15520d73c51bb1a6a316414b2e8fb50b7be8f942 | [
"MIT"
]
| null | null | null |
from .context import main
| 9 | 25 | 0.777778 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
7dd7abdb00a4ee3724c7dfc992569e2f8f38d9dd | 23,149 | py | Python | ofa/tutorial/imagenet_eval_helper.py | johsnows/once-for-all | fac2a6388e70873666b848a316aa58c7b2e17031 | [
"Apache-2.0"
]
| null | null | null | ofa/tutorial/imagenet_eval_helper.py | johsnows/once-for-all | fac2a6388e70873666b848a316aa58c7b2e17031 | [
"Apache-2.0"
]
| null | null | null | ofa/tutorial/imagenet_eval_helper.py | johsnows/once-for-all | fac2a6388e70873666b848a316aa58c7b2e17031 | [
"Apache-2.0"
]
| null | null | null | import os.path as osp
import numpy as np
import math
from tqdm import tqdm
import torch.nn as nn
import torch.backends.cudnn as cudnn
import torch.utils.data
from torchvision import transforms, datasets
from ofa.utils import AverageMeter, accuracy
from ofa.model_zoo import ofa_specialized
from ofa.imagenet_classification.elastic_nn.utils import set_running_statistics
import copy
import random
def evaluate_ofa_resnet_subnet(ofa_net, path, net_config, data_loader, batch_size, device='cuda:0'):
assert 'w' in net_config and 'd' in net_config and 'e' in net_config
assert len(net_config['w']) == 6 and len(net_config['e']) == 18 and len(net_config['d']) == 5
ofa_net.set_active_subnet(w=net_config['w'], d=net_config['d'], e=net_config['e'])
subnet = ofa_net.get_active_subnet().to(device)
calib_bn(subnet, path, 224, batch_size)
top1 = validate(subnet, path, 224, data_loader, batch_size, device)
return top1
def evaluate_ofa_resnet_ensemble_subnet(ofa_net, path, net_config1, net_config2, data_loader, batch_size, device='cuda:0'):
assert 'w' in net_config1 and 'd' in net_config1 and 'e' in net_config1
assert len(net_config1['w']) == 6 and len(net_config1['e']) == 18 and len(net_config1['d']) == 5
ofa_net.set_active_subnet(w=net_config1['w'], d=net_config1['d'], e=net_config1['e'])
subnet1 = ofa_net.get_active_subnet().to(device)
calib_bn(subnet1, path, 224, batch_size)
ofa_net.set_active_subnet(w=net_config2['w'], d=net_config2['d'], e=net_config2['e'])
subnet2 = ofa_net.get_active_subnet().to(device)
calib_bn(subnet2, path, 224, batch_size)
# assert net_config2['r'][0]==net_config1['r'][0]
subnets = []
subnets.append(subnet2)
subnets.append(subnet1)
top1 = ensemble_validate(subnets, path, 224, data_loader, batch_size, device)
return top1
def evaluate_ofa_subnet(ofa_net, path, net_config, data_loader, batch_size, device='cuda:0'):
assert 'ks' in net_config and 'd' in net_config and 'e' in net_config
assert len(net_config['ks']) == 20 and len(net_config['e']) == 20 and len(net_config['d']) == 5
ofa_net.set_active_subnet(ks=net_config['ks'], d=net_config['d'], e=net_config['e'])
subnet = ofa_net.get_active_subnet().to(device)
calib_bn(subnet, path, net_config['r'][0], batch_size)
top1 = validate(subnet, path, net_config['r'][0], data_loader, batch_size, device)
return top1
def evaluate_ofa_ensemble_subnet(ofa_net, path, net_config1, net_config2, data_loader, batch_size, device='cuda:0'):
assert 'ks' in net_config1 and 'd' in net_config1 and 'e' in net_config1
assert len(net_config1['ks']) == 20 and len(net_config1['e']) == 20 and len(net_config1['d']) == 5
ofa_net.set_active_subnet(ks=net_config1['ks'], d=net_config1['d'], e=net_config1['e'])
subnet1 = ofa_net.get_active_subnet().to(device)
calib_bn(subnet1, path, net_config1['r'][0], batch_size)
ofa_net.set_active_subnet(ks=net_config2['ks'], d=net_config2['d'], e=net_config2['e'])
subnet2 = ofa_net.get_active_subnet().to(device)
calib_bn(subnet2, path, net_config2['r'][0], batch_size)
assert net_config2['r'][0]==net_config1['r'][0]
subnets = []
subnets.append(subnet2)
subnets.append(subnet1)
top1 = ensemble_validate(subnets, path, net_config2['r'][0], data_loader, batch_size, device)
return top1
def calib_bn(net, path, image_size, batch_size, num_images=2000):
# print('Creating dataloader for resetting BN running statistics...')
dataset = datasets.ImageFolder(
osp.join(
path,
'train'),
transforms.Compose([
transforms.RandomResizedCrop(image_size),
transforms.RandomHorizontalFlip(),
transforms.ColorJitter(brightness=32. / 255., saturation=0.5),
transforms.ToTensor(),
transforms.Normalize(
mean=[
0.485,
0.456,
0.406],
std=[
0.229,
0.224,
0.225]
),
])
)
chosen_indexes = np.random.choice(list(range(len(dataset))), num_images)
sub_sampler = torch.utils.data.sampler.SubsetRandomSampler(chosen_indexes)
data_loader = torch.utils.data.DataLoader(
dataset,
sampler=sub_sampler,
batch_size=batch_size,
num_workers=16,
pin_memory=True,
drop_last=False,
)
# print('Resetting BN running statistics (this may take 10-20 seconds)...')
set_running_statistics(net, data_loader)
def ensemble_validate(nets, path, image_size, data_loader, batch_size=100, device='cuda:0'):
if 'cuda' in device:
print('use cuda')
for net in nets:
net = torch.nn.DataParallel(net).to(device)
else:
for net in nets:
net = net.to(device)
data_loader.dataset.transform = transforms.Compose([
transforms.Resize(int(math.ceil(image_size / 0.875))),
transforms.CenterCrop(image_size),
transforms.ToTensor(),
transforms.Normalize(
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]
),
])
cudnn.benchmark = True
criterion = nn.CrossEntropyLoss().to(device)
for net in nets:
net.eval()
net = net.to(device)
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
with torch.no_grad():
with tqdm(total=len(data_loader), desc='Validate') as t:
for i, (images, labels) in enumerate(data_loader):
images, labels = images.to(device), labels.to(device)
# compute output
n = len(nets)
output = 0
for i, net in enumerate(nets):
if i == 0:
output =net(images)
else:
output+=net(images)
output = output/n
loss = criterion(output, labels)
# measure accuracy and record loss
acc1, acc5 = accuracy(output, labels, topk=(1, 5))
losses.update(loss.item(), images.size(0))
top1.update(acc1[0].item(), images.size(0))
top5.update(acc5[0].item(), images.size(0))
t.set_postfix({
'loss': losses.avg,
'top1': top1.avg,
'top5': top5.avg,
'img_size': images.size(2),
})
t.update(1)
print('Results: loss=%.5f,\t top1=%.3f,\t top5=%.1f' % (losses.avg, top1.avg, top5.avg))
return top1.avg
def validate(net, path, image_size, data_loader, batch_size=100, device='cuda:0'):
if 'cuda' in device:
net = torch.nn.DataParallel(net).to(device)
else:
net = net.to(device)
data_loader.dataset.transform = transforms.Compose([
transforms.Resize(int(math.ceil(image_size / 0.875))),
transforms.CenterCrop(image_size),
transforms.ToTensor(),
transforms.Normalize(
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]
),
])
cudnn.benchmark = True
criterion = nn.CrossEntropyLoss().to(device)
net.eval()
net = net.to(device)
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
with torch.no_grad():
with tqdm(total=len(data_loader), desc='Validate') as t:
for i, (images, labels) in enumerate(data_loader):
images, labels = images.to(device), labels.to(device)
# compute output
output = net(images)
loss = criterion(output, labels)
# measure accuracy and record loss
acc1, acc5 = accuracy(output, labels, topk=(1, 5))
losses.update(loss.item(), images.size(0))
top1.update(acc1[0].item(), images.size(0))
top5.update(acc5[0].item(), images.size(0))
t.set_postfix({
'loss': losses.avg,
'top1': top1.avg,
'top5': top5.avg,
'img_size': images.size(2),
})
t.update(1)
print('Results: loss=%.5f,\t top1=%.1f,\t top5=%.1f' % (losses.avg, top1.avg, top5.avg))
return top1.avg
def evaluate_ofa_specialized(path, data_loader, batch_size=100, device='cuda:0', ensemble=False):
def select_platform_name():
valid_platform_name = [
'pixel1', 'pixel2', 'note10', 'note8', 's7edge', 'lg-g8', '1080ti', 'v100', 'tx2', 'cpu', 'flops'
]
print("Please select a hardware platform from ('pixel1', 'pixel2', 'note10', 'note8', 's7edge', 'lg-g8', '1080ti', 'v100', 'tx2', 'cpu', 'flops')!\n")
while True:
platform_name = input()
platform_name = platform_name.lower()
if platform_name in valid_platform_name:
return platform_name
print("Platform name is invalid! Please select in ('pixel1', 'pixel2', 'note10', 'note8', 's7edge', 'lg-g8', '1080ti', 'v100', 'tx2', 'cpu', 'flops')!\n")
def select_netid(platform_name):
platform_efficiency_map = {
'pixel1': {
143: 'pixel1_lat@[email protected]_finetune@75',
132: 'pixel1_lat@[email protected]_finetune@75',
79: 'pixel1_lat@[email protected]_finetune@75',
58: 'pixel1_lat@[email protected]_finetune@75',
40: 'pixel1_lat@[email protected]_finetune@25',
28: 'pixel1_lat@[email protected]_finetune@25',
20: 'pixel1_lat@[email protected]_finetune@25',
},
'pixel2': {
62: 'pixel2_lat@[email protected]_finetune@25',
50: 'pixel2_lat@[email protected]_finetune@25',
35: 'pixel2_lat@[email protected]_finetune@25',
25: 'pixel2_lat@[email protected]_finetune@25',
},
'note10': {
64: 'note10_lat@[email protected]_finetune@75',
50: 'note10_lat@[email protected]_finetune@75',
41: 'note10_lat@[email protected]_finetune@75',
30: 'note10_lat@[email protected]_finetune@75',
22: 'note10_lat@[email protected]_finetune@25',
16: 'note10_lat@[email protected]_finetune@25',
11: 'note10_lat@[email protected]_finetune@25',
8: 'note10_lat@[email protected]_finetune@25',
},
'note8': {
65: 'note8_lat@[email protected]_finetune@25',
49: 'note8_lat@[email protected]_finetune@25',
31: 'note8_lat@[email protected]_finetune@25',
22: 'note8_lat@[email protected]_finetune@25',
},
's7edge': {
88: 's7edge_lat@[email protected]_finetune@25',
58: 's7edge_lat@[email protected]_finetune@25',
41: 's7edge_lat@[email protected]_finetune@25',
29: 's7edge_lat@[email protected]_finetune@25',
},
'lg-g8': {
24: 'LG-G8_lat@[email protected]_finetune@25',
16: 'LG-G8_lat@[email protected]_finetune@25',
11: 'LG-G8_lat@[email protected]_finetune@25',
8: 'LG-G8_lat@[email protected]_finetune@25',
},
'1080ti': {
27: '1080ti_gpu64@[email protected]_finetune@25',
22: '1080ti_gpu64@[email protected]_finetune@25',
15: '1080ti_gpu64@[email protected]_finetune@25',
12: '1080ti_gpu64@[email protected]_finetune@25',
},
'v100': {
11: 'v100_gpu64@[email protected]_finetune@25',
9: 'v100_gpu64@[email protected]_finetune@25',
6: 'v100_gpu64@[email protected]_finetune@25',
5: 'v100_gpu64@[email protected]_finetune@25',
},
'tx2': {
96: 'tx2_gpu16@[email protected]_finetune@25',
80: 'tx2_gpu16@[email protected]_finetune@25',
47: 'tx2_gpu16@[email protected]_finetune@25',
35: 'tx2_gpu16@[email protected]_finetune@25',
},
'cpu': {
17: 'cpu_lat@[email protected]_finetune@25',
15: 'cpu_lat@[email protected]_finetune@25',
11: 'cpu_lat@[email protected]_finetune@25',
10: 'cpu_lat@[email protected]_finetune@25',
},
'flops': {
595: 'flops@[email protected]_finetune@75',
482: 'flops@[email protected]_finetune@75',
389: 'flops@[email protected]_finetune@75',
}
}
sub_efficiency_map = platform_efficiency_map[platform_name]
if not platform_name == 'flops':
print("Now, please specify a latency constraint for model specialization among", sorted(list(sub_efficiency_map.keys())), 'ms. (Please just input the number.) \n')
else:
print("Now, please specify a FLOPs constraint for model specialization among", sorted(list(sub_efficiency_map.keys())), 'MFLOPs. (Please just input the number.) \n')
while True:
efficiency_constraint = input()
if not efficiency_constraint.isdigit():
print('Sorry, please input an integer! \n')
continue
efficiency_constraint = int(efficiency_constraint)
if not efficiency_constraint in sub_efficiency_map.keys():
print('Sorry, please choose a value from: ', sorted(list(sub_efficiency_map.keys())), '.\n')
continue
return sub_efficiency_map[efficiency_constraint]
if not ensemble:
platform_name = select_platform_name()
net_id = select_netid(platform_name)
net, image_size = ofa_specialized(net_id=net_id, pretrained=True)
validate(net, path, image_size, data_loader, batch_size, device)
else:
nets = []
for i in range(2):
print('{}model'.format(i))
platform_name = select_platform_name()
net_id = select_netid(platform_name)
net, image_size = ofa_specialized(net_id=net_id, pretrained=True)
nets.append(net)
ensemble_validate(nets, path, image_size, data_loader, batch_size, device)
return net_id
net_id = ['pixel1_lat@[email protected]_finetune@75', 'pixel1_lat@[email protected]_finetune@75',
'pixel1_lat@[email protected]_finetune@75', 'pixel1_lat@[email protected]_finetune@75',
'pixel1_lat@[email protected]_finetune@25', 'pixel1_lat@[email protected]_finetune@25',
'pixel1_lat@[email protected]_finetune@25', 'pixel2_lat@[email protected]_finetune@25',
'pixel2_lat@[email protected]_finetune@25', 'pixel2_lat@[email protected]_finetune@25',
'pixel2_lat@[email protected]_finetune@25', 'note10_lat@[email protected]_finetune@75',
'note10_lat@[email protected]_finetune@75', 'note10_lat@[email protected]_finetune@75',
'note10_lat@[email protected]_finetune@25', 'note10_lat@[email protected]_finetune@25',
'note10_lat@[email protected]_finetune@25', 'note8_lat@[email protected]_finetune@25',
'note8_lat@[email protected]_finetune@25', 'note8_lat@[email protected]_finetune@25',
'note8_lat@[email protected]_finetune@25', 's7edge_lat@[email protected]_finetune@25',
's7edge_lat@[email protected]_finetune@25', 's7edge_lat@[email protected]_finetune@25',
's7edge_lat@[email protected]_finetune@25', 'LG-G8_lat@[email protected]_finetune@25',
'LG-G8_lat@[email protected]_finetune@25', 'LG-G8_lat@[email protected]_finetune@25',
'LG-G8_lat@[email protected]_finetune@25', '1080ti_gpu64@[email protected]_finetune@25',
'1080ti_gpu64@[email protected]_finetune@25', '1080ti_gpu64@[email protected]_finetune@25',
'1080ti_gpu64@[email protected]_finetune@25', 'v100_gpu64@[email protected]_finetune@25',
'v100_gpu64@[email protected]_finetune@25', 'v100_gpu64@[email protected]_finetune@25',
'v100_gpu64@[email protected]_finetune@25', 'tx2_gpu16@[email protected]_finetune@25',
'tx2_gpu16@[email protected]_finetune@25', 'tx2_gpu16@[email protected]_finetune@25',
'tx2_gpu16@[email protected]_finetune@25', 'cpu_lat@[email protected]_finetune@25',
'cpu_lat@[email protected]_finetune@25', 'cpu_lat@[email protected]_finetune@25',
'cpu_lat@[email protected]_finetune@25', 'flops@[email protected]_finetune@75',
'flops@[email protected]_finetune@75', 'flops@[email protected]_finetune@75', ]
def evaluate_ofa_space(path, data_loader, batch_size=100, device='cuda:0', ensemble=False):
net_acc=[]
for i, id in enumerate(net_id):
acc=""
for j in range(2, len(id)):
if id[j]=='.':
acc=id[j-2]+id[j-1]+id[j]+id[j+1]
net_acc.append(acc)
id =np.argsort(np.array(net_acc))
new_net_id = copy.deepcopy(net_id)
for i, sortid in enumerate(id):
new_net_id[i] = net_id[sortid]
print('new_net_id', new_net_id)
n = len(net_id)
best_acc = 0
space = []
best_team =[]
for i in range(1, n):
for j in range(i):
nets = []
team = []
team.append(j)
team.append(i)
net, image_size = ofa_specialized(net_id=new_net_id[j], pretrained=True)
nets.append(net)
net, image_size = ofa_specialized(net_id=new_net_id[i], pretrained=True)
nets.append(net)
acc = ensemble_validate(nets, path, image_size, data_loader, batch_size, device)
if acc>best_acc:
best_acc=acc
best_team = team
print('space {} best_acc{}'.format(i+1, best_acc))
space.append(best_acc)
print('space:{}'.format(space))
return net_id[best_team[0]], net_id[best_team[1]]
def evaluate_ofa_best_acc_team(path, data_loader, batch_size=100, device='cuda:0', ensemble=False):
net_acc=[]
for i, id in enumerate(net_id):
acc=""
for j in range(2, len(id)):
if id[j]=='.':
acc=id[j-2]+id[j-1]+id[j]+id[j+1]
net_acc.append(acc)
id =np.argsort(np.array(net_acc))
new_net_id = copy.deepcopy(net_id)
for i, sortid in enumerate(id):
new_net_id[i] = net_id[sortid]
print('new_net_id', new_net_id)
n = len(net_id)
best_acc = 0
space = []
best_team =[]
i = n-1
for j in range(18, n):
nets = []
team = []
team.append(j)
team.append(i)
net, image_size = ofa_specialized(net_id=new_net_id[j], pretrained=True)
nets.append(net)
net, image_size = ofa_specialized(net_id=new_net_id[i], pretrained=True)
nets.append(net)
acc = ensemble_validate(nets, path, image_size, data_loader, batch_size, device)
print('net i:{} netj:{} acc:{}'.format(new_net_id[i], new_net_id[j], acc))
if acc>best_acc:
best_acc=acc
best_team = team
print('space {} best_acc{}'.format(i+1, best_acc))
space.append(best_acc)
print('space:{}'.format(space))
return new_net_id[best_team[0]], new_net_id[best_team[1]]
def evaluate_ofa_random_sample(path, data_loader, batch_size=100, device='cuda:0', ensemble=False):
net_acc=[]
for i, id in enumerate(net_id):
acc=""
for j in range(2, len(id)):
if id[j]=='.':
acc=id[j-2]+id[j-1]+id[j]+id[j+1]
net_acc.append(acc)
id =np.argsort(np.array(net_acc))
new_net_id = copy.deepcopy(net_id)
for i, sortid in enumerate(id):
new_net_id[i] = net_id[sortid]
print('new_net_id', new_net_id)
n = len(net_id)
best_acc = 0
acc_list = []
space = []
best_team =[]
for k in range(20):
nets = []
team = []
i = random.randint(0, n-1)
j = (i + random.randint(1, n-1)) % n
print('i:{} j:{}'.format(i, j))
team.append(j)
team.append(i)
net, image_size = ofa_specialized(net_id=new_net_id[j], pretrained=True)
nets.append(net)
net, image_size = ofa_specialized(net_id=new_net_id[i], pretrained=True)
nets.append(net)
acc = ensemble_validate(nets, path, image_size, data_loader, batch_size, device)
print('net i:{} netj:{} acc:{}'.format(new_net_id[i], new_net_id[j], acc))
acc_list.append(acc)
if acc>best_acc:
best_acc=acc
best_team = team
avg_acc = np.mean(acc_list)
std_acc = np.std(acc_list, ddof=1)
var_acc = np.var(acc_list)
print("avg{} var{} std{}".format(avg_acc, std_acc, var_acc))
print('best_random_team best_acc{}'.format(best_team, best_acc))
space.append(best_acc)
print('space:{}'.format(space))
return new_net_id[best_team[0]], new_net_id[best_team[1]]
sort_net_id=['tx2_gpu16@[email protected]_finetune@25', 'note8_lat@[email protected]_finetune@25', 's7edge_lat@[email protected]_finetune@25',
'cpu_lat@[email protected]_finetune@25', 'LG-G8_lat@[email protected]_finetune@25', 'pixel1_lat@[email protected]_finetune@25',
'note10_lat@[email protected]_finetune@25', 'pixel2_lat@[email protected]_finetune@25', 'v100_gpu64@[email protected]_finetune@25',
'cpu_lat@11ms_top1@72. 0_finetune@25', '1080ti_gpu64@[email protected]_finetune@25', 'note8_lat@[email protected]_finetune@25',
'tx2_gpu16@[email protected]_finetune@25', 'v100_gpu64@[email protected]_finetune@25', 'LG-G8_lat@11ms_to [email protected]_finetune@25',
's7edge_lat@[email protected]_finetune@25', 'pixel1_lat@[email protected]_finetune@25', 'pixel2_lat@[email protected]_finetune@25',
'note10_lat@[email protected]_finetune@25', '1080ti_gpu 64@[email protected]_finetune@25', 'cpu_lat@[email protected]_finetune@25',
's7edge_lat@[email protected]_finetune@25', 'LG-G8_lat@[email protected]_finetune@25', 'pixel2_lat@[email protected]_finetune@25',
'note8_lat@[email protected]_finetune@25', 'pixel1_lat@[email protected]_finetune@25', '1080ti_gpu64@[email protected]_finetune@25',
'v100_gpu64@[email protected]_finetune@25', 'tx2_gpu16@[email protected]_finetune@25', 'note10_lat@[email protected]_finetune@25',
'cpu_lat@[email protected]_finetune@25', 'tx2_gpu16@[email protected]_finetune@25', 'pixel2_lat@[email protected]_finetune@25',
'v100_gpu64@[email protected]_finetune@25', 'note8_lat@[email protected]_finetune@25', 's7edge_lat@[email protected]_finetune@25',
'1080ti_gpu64@[email protected]_finetune@25', 'LG-G8_lat@[email protected]_finetune@25', 'pixel1_lat@[email protected]_finetune@75',
'pixel1_lat@[email protected]_finetune@75', 'flops@[email protected]_finetune@75', 'note10_lat@[email protected]_finetune@75',
'flops@[email protected]_finetune@75', 'note10_lat@[email protected]_finetune@75', 'pixel1_lat@[email protected]_finetune@75',
'flops@[email protected]_finetune@75', 'pixel1_lat@[email protected]_finetune@75', 'note10_lat@[email protected]_finetune@75']
| 44.093333 | 177 | 0.607154 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7,329 | 0.316601 |
7dd999fb131d09d1bf5880249af5cf7d95c80d95 | 8,713 | py | Python | python/housinginsights/sources/cama.py | mrkem598/housing-insights | 05dffebad690bf727cbcbec53128d2fb69166e4c | [
"MIT"
]
| null | null | null | python/housinginsights/sources/cama.py | mrkem598/housing-insights | 05dffebad690bf727cbcbec53128d2fb69166e4c | [
"MIT"
]
| null | null | null | python/housinginsights/sources/cama.py | mrkem598/housing-insights | 05dffebad690bf727cbcbec53128d2fb69166e4c | [
"MIT"
]
| null | null | null | # Script is deprecated, as of September 18, 2017.
# zoneUnitCount now calculated with LoadData's _get_residential_units()
#
from pprint import pprint
import os
import sys
import requests
from collections import OrderedDict
import csv
import datetime
PYTHON_PATH = os.path.abspath(os.path.join(os.path.dirname(__file__), os.pardir, os.pardir))
sys.path.append(PYTHON_PATH)
from housinginsights.sources.base import BaseApiConn
from housinginsights.tools.logger import HILogger
logger = HILogger(name=__file__, logfile="sources.log")
class MarApiConn_2(BaseApiConn):
"""
API Interface to the Master Address Record (MAR) database.
Use public methods to retrieve data.
"""
BASEURL = 'http://citizenatlas.dc.gov/newwebservices/locationverifier.asmx'
def __init__(self, baseurl=None,proxies=None,database_choice=None, debug=False):
super().__init__(MarApiConn_2.BASEURL)
def get_data(self, square, lot, suffix):
"""
Get information on a location based on a simple query string.
:param square: SSL first part
:type location: String.
:param lot: SSL second part
:type location: String.
:param output_type: Output type specified by user.
:type output_type: String.
:param output_file: Output file specified by user.
:type output_file: String
:returns: Json output from the api.
:rtype: String
"""
params = {
'f': 'json',
'Square': square,
'Lot': lot,
'Suffix': suffix
}
result = self.get('/findAddFromSSL2', params=params)
if result.status_code != 200:
err = "An error occurred during request: status {0}"
logger.exception(err.format(result.status_code))
raise
mar_data = result.json()
if mar_data['returnDataset'] == {}:
mar_returns = {'Warning': 'No MAR data availble - property under construction - see AYB year'}
else:
entry = mar_data['returnDataset']['Table1'][0]
mar_returns = {'anc': entry['ANC'],
'census_tract': entry['CENSUS_TRACT'],
'neighborhood_cluster': entry['CLUSTER_'],
'ward': entry['WARD'],
'zip': entry['ZIPCODE']
}
return mar_returns
class CamaApiConn(BaseApiConn):
"""
API Interface to the Computer Assisted Mass Appraisal - Residential (CAMA)
API, to obtain SSL numbers to use as input for the MarApiConn_2 and get
the corresponding housing and bedroom units.
"""
BASEURL = 'https://opendata.arcgis.com/datasets'
def __init__(self):
super().__init__(CamaApiConn.BASEURL)
def get_data(self):
"""
Grabs data from CAMA. Individual CAMA property retrieves zone_type data
from MAR api. Count number of housing units and bedroom units per zone.
Return the count data (in dictionary form) to be processed into csv
by get_csv() method.
"""
logger.info("Starting CAMA")
mar_api = MarApiConn_2()
result = self.get(urlpath='/c5fb3fbe4c694a59a6eef7bf5f8bc49a_25.geojson', params=None)
if result.status_code != 200:
err = "An error occurred during request: status {0}"
raise Exception(err.format(result.status_code))
cama_data = result.json()
logger.info(" Got cama_data. Length:{}".format(len(cama_data['features'])))
"""
Example of: anc_count = [OrderedDict([('zone_type', 'anc'), ('zone', 'ANC 2B'),
('housing_unit_count', 10), ('bedroom_unit_count', 10)], etc)]
"""
zone_types = ['anc', 'census_tract', 'neighborhood_cluster', 'ward', 'zip']
anc_count = []
census_count = []
cluster_count = []
ward_count = []
zipcode_count = []
"""
Take each CAMA property data and retrieve the MAR data.
"""
"""
Certain square values have four digits + a letter. (ex. 8888E)
Square would be the first four digits and suffix would be the letter.
SSL sometimes comes as 8 digit string without spacing in the middle.
"""
"""
CAMA data includes bldgs under construction. CAMA's data includes AYB of 2018
as of June 2017. We eliminate all data points that are under construction and
don't provide any housing units and bedrm at this time.
"""
for index, row in enumerate(cama_data['features']):
if (index % 1000 == 0):
print(" currently at index {}".format(index))
try:
current_year = int(datetime.date.today().strftime('%Y'))
#Skipping none values for units under construction
if row['properties']['AYB'] is not None and int(row['properties']['AYB']) > current_year:
continue
objectid = row['properties']['OBJECTID']
if len(row['properties']['SSL']) == 8:
square = row['properties']['SSL'][:4]
lot = row['properties']['SSL'][4:]
else:
square, lot = row['properties']['SSL'].split()
suffix = ' '
if len(square) > 4:
square = square[:4]
suffix = square[-1]
mar_return = mar_api.get_data(square, lot, suffix)
''' Count the housing units and bedrooms '''
num_units = 0
if row['properties']['NUM_UNITS']: num_units = row['properties']['NUM_UNITS']
if num_units == 0:
num_units = 1
bedrm = row['properties']['BEDRM']
if bedrm == 0: bedrm = 1
if bedrm == None: bedrm = 0
for zone in zone_types:
if zone == 'anc': zone_count = anc_count
elif zone == 'census_tract': zone_count = census_count
elif zone == 'neighborhood_cluster': zone_count = cluster_count
elif zone == 'ward': zone_count = ward_count
elif zone == 'zip': zone_count = zipcode_count
if 'Warning' not in mar_return.keys():
flag = False
for dictionary in zone_count: #dictionary is {'zone_type': 'ANC', 'zone': 'ANC 8A', etc.}
if dictionary['zone'] == mar_return[zone]: #mar_return[ANC] is 'ANC 8A'
dictionary['housing_unit_count'] += num_units
dictionary['bedroom_unit_count'] += bedrm
flag = True
break
if not flag:
zone_count.append( OrderedDict([('zone_type', zone), ('zone', mar_return[zone]), ('housing_unit_count', num_units), ('bedroom_unit_count', bedrm)]) )
except Exception as e:
exc_type, exc_obj, exc_tb = sys.exc_info()
print(exc_type, "line", exc_tb.tb_lineno)
print("Error! SSL: ", row['properties']['SSL'], row['properties']['AYB'])
continue
return {'anc': anc_count, 'census_tract': census_count, 'neighborhood_cluster': cluster_count, 'ward': ward_count, 'zip': zipcode_count}
def get_csv(self):
"""
Takes the returned dictionary from get_data() and convert the information
into csv file and then save the csv file in
housing-insights/data/processed/zoneUnitCount
as zoneUnitCount_2017-05-30.csv.
"""
if not os.path.exists('../../../data/processed/zoneUnitCount'):
os.makedirs('../../../data/processed/zoneUnitCount')
data_processed_zoneUnitCount = os.path.join(PYTHON_PATH, os.pardir, 'data', 'processed', 'zoneUnitCount')
zone_data = self.get_data()
toCSV = []
date = datetime.date.today().strftime('%Y-%m-%d')
filename = os.path.join(data_processed_zoneUnitCount, 'zoneUnitCount_'+date+'.csv')
for key, value in zone_data.items():
toCSV.extend(value)
keys = toCSV[0].keys()
with open(filename, 'w') as output_file:
dict_writer = csv.DictWriter(output_file, keys)
dict_writer.writeheader()
dict_writer.writerows(toCSV)
if __name__ == '__main__':
# Pushes everything from the logger to the command line output as well.
my_api = CamaApiConn()
csvfile = my_api.get_csv()
| 39.247748 | 177 | 0.572019 | 8,011 | 0.919431 | 0 | 0 | 0 | 0 | 0 | 0 | 3,752 | 0.430621 |
7dd9c7a745a3b97ae9face412cad220abf628e7d | 402 | py | Python | certbot_dns_cfproxy/__init__.py | ProfFan/certbot-dns-cfproxy | 999038999642e5ba070e7089d62146b4f05caa46 | [
"Apache-1.1"
]
| 2 | 2019-02-04T06:06:00.000Z | 2022-03-04T09:12:31.000Z | certbot_dns_cfproxy/__init__.py | ProfFan/certbot-dns-cfproxy | 999038999642e5ba070e7089d62146b4f05caa46 | [
"Apache-1.1"
]
| null | null | null | certbot_dns_cfproxy/__init__.py | ProfFan/certbot-dns-cfproxy | 999038999642e5ba070e7089d62146b4f05caa46 | [
"Apache-1.1"
]
| null | null | null | """
The `~certbot_dns_cfproxy.dns_cfproxy` plugin automates the process of
completing a ``dns-01`` challenge (`~acme.challenges.DNS01`) by creating, and
subsequently removing, TXT records using the CFProxy API.
Examples
--------
.. code-block:: bash
:caption: To acquire a certificate for ``example.com``
certbot certonly \\
-a certbot-dns-cfproxy:dns-cfproxy \\
-d example.com
"""
| 23.647059 | 77 | 0.699005 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 401 | 0.997512 |
7ddaf2399cff82d2687308f91fcf9cc720123562 | 1,234 | py | Python | web-app/servers/card-recognize/app.py | woojae9488/HLF_chaincode | 6737f70c1daea766fa567b08454b8b63a7a97d4a | [
"Apache-2.0"
]
| 2 | 2020-02-10T00:08:11.000Z | 2020-02-19T02:06:14.000Z | web-app/servers/card-recognize/app.py | woojae9488/HLF_chaincode | 6737f70c1daea766fa567b08454b8b63a7a97d4a | [
"Apache-2.0"
]
| 14 | 2020-04-26T13:56:38.000Z | 2020-09-23T06:27:53.000Z | web-app/servers/card-recognize/app.py | woojae9488/HLF_chaincode | 6737f70c1daea766fa567b08454b8b63a7a97d4a | [
"Apache-2.0"
]
| 3 | 2020-04-25T11:28:57.000Z | 2022-03-28T12:19:25.000Z | from flask import Flask, make_response, request
from flask_cors import CORS
import json
from config import *
from StudentCard import *
from ApiError import *
App = Flask(__name__)
cors = CORS(App,
resources={r'*': {'origins': ENV.ADDR_API_GATEWAY}},
headers='Content-Type: application/json')
@App.route('/student-card', methods=['POST'])
def studentcard():
print('Process student card routine')
response = {'message': '', 'data': {}}
status = 200
try:
reqData = request.get_json(silent=True)
imgData = reqData.get('imgData')
studentCard = StudentCard(imgData)
studentInfo = studentCard.extractStudentInfo()
response['message'] = 'Success'
response['data'] = studentInfo
except ApiError as e:
print(e)
status = e.status
response['message'] = e.error
except Exception as e:
print(e)
status = 500
response['message'] = 'Server Error'
finally:
responseJSON = json.dumps(response)
print('Finish student card routine')
print(responseJSON)
return make_response(responseJSON, status)
if __name__ == '__main__':
App.run(host='0.0.0.0', port=ENV.PORT)
| 26.826087 | 64 | 0.63047 | 0 | 0 | 0 | 0 | 841 | 0.681524 | 0 | 0 | 226 | 0.183144 |
7ddb8e0adf2de6b7f5b4a9514a61cad048355467 | 2,604 | py | Python | smart_home/power_controller.py | achuchev/-SmartHome-AlexaLambda | 0e8bfe30c76688a209ee4bc8d40016478d537aba | [
"MIT"
]
| null | null | null | smart_home/power_controller.py | achuchev/-SmartHome-AlexaLambda | 0e8bfe30c76688a209ee4bc8d40016478d537aba | [
"MIT"
]
| null | null | null | smart_home/power_controller.py | achuchev/-SmartHome-AlexaLambda | 0e8bfe30c76688a209ee4bc8d40016478d537aba | [
"MIT"
]
| 1 | 2020-06-27T15:20:29.000Z | 2020-06-27T15:20:29.000Z | import logging
from smart_home.mqtt_client import MQTTClient
from smart_home.utils_lambda import get_utc_timestamp, error_response, success_response, get_request_message_id, get_mqtt_topics_from_request, get_request_name, get_friendly_name_from_request
class PowerController(object):
@staticmethod
def handle_request(request):
power_on_desired_state = False
if get_request_name(request) == "TurnOn":
power_on_desired_state = True
logger = logging.getLogger()
logger.info("PowerController: Changing power state of '%s' to PowerOn %s ",
get_friendly_name_from_request(request), power_on_desired_state)
mqtt_topic_set, mqtt_topic_get = get_mqtt_topics_from_request(request)
message_id = get_request_message_id(request)
resp_payload = MQTTClient.publish_wait_for_resp(
mqtt_topic_set, {"messageId": message_id, "status": {"powerOn": power_on_desired_state}}, message_id, mqtt_topic_get)
if resp_payload is None:
return error_response(request)
current_power_on = False
status = resp_payload.get("status")
if status:
current_power_on = status.get("powerOn")
if current_power_on == power_on_desired_state:
return PowerController.__response_success(request, current_power_on)
return error_response(request)
@staticmethod
def handle_report_state(request, status):
logger = logging.getLogger()
logger.info("PowerController: Reporting state of '%s'",
get_friendly_name_from_request(request))
if status:
current_power_on = status.get("powerOn")
logger.info("PowerController: '%s' has PowerOn %s ",
get_friendly_name_from_request(request), current_power_on)
return PowerController.__response_success_property(current_power_on)
@staticmethod
def __response_success_property(current_power_on):
return {
"namespace": "Alexa.PowerController",
"name": "powerState",
"value": "ON" if current_power_on else "OFF",
"timeOfSample": get_utc_timestamp(),
"uncertaintyInMilliseconds": 0
}
@staticmethod
def __response_success(request, current_power_on):
payload = {
"context": {
"properties": [
PowerController.__response_success_property(
current_power_on)
]
}
}
return success_response(request, payload)
| 37.2 | 191 | 0.661674 | 2,347 | 0.901306 | 0 | 0 | 2,294 | 0.880952 | 0 | 0 | 336 | 0.129032 |
7ddd6afc3df36a52da70783ec74e257d9596b945 | 4,082 | py | Python | components/mpas-seaice/testing_and_setup/testcases/advection/plot_testcase.py | Fa-Li/E3SM | a91995093ec6fc0dd6e50114f3c70b5fb64de0f0 | [
"zlib-acknowledgement",
"FTL",
"RSA-MD"
]
| 235 | 2018-04-23T16:30:06.000Z | 2022-03-21T17:53:12.000Z | components/mpas-seaice/testing_and_setup/testcases/advection/plot_testcase.py | Fa-Li/E3SM | a91995093ec6fc0dd6e50114f3c70b5fb64de0f0 | [
"zlib-acknowledgement",
"FTL",
"RSA-MD"
]
| 2,372 | 2018-04-20T18:12:34.000Z | 2022-03-31T23:43:17.000Z | components/mpas-seaice/testing_and_setup/testcases/advection/plot_testcase.py | Fa-Li/E3SM | a91995093ec6fc0dd6e50114f3c70b5fb64de0f0 | [
"zlib-acknowledgement",
"FTL",
"RSA-MD"
]
| 254 | 2018-04-20T20:43:32.000Z | 2022-03-30T20:13:38.000Z | from netCDF4 import Dataset
import matplotlib
import matplotlib.pyplot as plt
from matplotlib.patches import Polygon
from matplotlib.collections import PatchCollection
import matplotlib.cm as cm
import numpy as np
#-------------------------------------------------------------
def plot_subfigure(axis, array, nCells, nEdgesOnCell, verticesOnCell, xCell, yCell, zCell, xVertex, yVertex, zVertex, cmin, cmax, cmap):
xMin = 1.0e30
xMax = -1.0e30
yMin = 1.0e30
yMax = -1.0e30
cmap = plt.get_cmap(cmap)
patches = []
colors = []
for iCell in range(0,nCells):
if (yCell[iCell] > 0.0):
vertices = []
for iVertexOnCell in range(0,nEdgesOnCell[iCell]):
iVertex = verticesOnCell[iCell,iVertexOnCell]
vertices.append((xVertex[iVertex],zVertex[iVertex]))
colors.append(array[iCell])
patches.append(Polygon(vertices))
xMin = min(xMin,xVertex[iVertex])
xMax = max(xMax,xVertex[iVertex])
yMin = min(yMin,zVertex[iVertex])
yMax = max(yMax,zVertex[iVertex])
pc = PatchCollection(patches, cmap=cmap)
pc.set_array(np.array(colors))
pc.set_clim(cmin, cmax)
axis.add_collection(pc)
axis.set_xlim(xMin,xMax)
axis.set_ylim(yMin,yMax)
axis.set_aspect("equal")
axis.ticklabel_format(style='plain')
axis.tick_params(axis='x', \
which='both', \
bottom=False, \
top=False, \
labelbottom=False)
axis.tick_params(axis='y', \
which='both', \
left=False, \
right=False, \
labelleft=False)
#-------------------------------------------------------------
def plot_testcase():
nGrids = [2562,10242,40962,163842]
testTypes = ["cosine_bell","slotted_cylinder"]
methods = ["IR","IR","upwind"]
iTimes = [0,-1,-1]
for nGrid in nGrids:
print("nGrid: ", nGrid)
fig, axes = plt.subplots(3,4)
iTestType = -1
for testType in testTypes:
iTestType += 1
print(" Test type: ", testType)
iMethod = -1
for method, iTime in zip(methods,iTimes):
iMethod += 1
print(" Method: ", method, ", iTime: ", iTime)
filenamein = "./output_%s_%s_%i/output.2000.nc" %(method,testType,nGrid)
filein = Dataset(filenamein,"r")
nCells = len(filein.dimensions["nCells"])
nEdgesOnCell = filein.variables["nEdgesOnCell"][:]
verticesOnCell = filein.variables["verticesOnCell"][:]
xCell = filein.variables["xCell"][:]
yCell = filein.variables["yCell"][:]
zCell = filein.variables["zCell"][:]
xVertex = filein.variables["xVertex"][:]
yVertex = filein.variables["yVertex"][:]
zVertex = filein.variables["zVertex"][:]
verticesOnCell[:] = verticesOnCell[:] - 1
iceAreaCategory = filein.variables["iceAreaCategory"][:]
filein.close()
iceAreaCell = np.sum(iceAreaCategory,axis=(2,3))
plot_subfigure(axes[iMethod,iTestType*2], iceAreaCell[iTime], nCells, nEdgesOnCell, verticesOnCell, xCell, yCell, zCell, xVertex, yVertex, zVertex, 0.0, 1.0, "viridis")
iceAreaCellDiff = iceAreaCell[iTime] - iceAreaCell[0]
if (iMethod != 0):
plot_subfigure(axes[iMethod,iTestType*2+1], iceAreaCellDiff, nCells, nEdgesOnCell, verticesOnCell, xCell, yCell, zCell, xVertex, yVertex, zVertex, -1.0, 1.0, "bwr")
else:
axes[iMethod,iTestType*2+1].axis('off')
plt.savefig("advection_%6.6i.png" %(nGrid),dpi=300)
plt.cla()
plt.close(fig)
#-------------------------------------------------------------------------------
if __name__ == "__main__":
plot_testcase()
| 31.160305 | 184 | 0.533317 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 522 | 0.127878 |
7dddf53059511b42c5f3c624d5d6af6d11789231 | 87 | py | Python | ABC_A/ABC063_A.py | ryosuke0825/atcoder_python | 185cdbe7db44ecca1aaf357858d16d31ce515ddb | [
"MIT"
]
| null | null | null | ABC_A/ABC063_A.py | ryosuke0825/atcoder_python | 185cdbe7db44ecca1aaf357858d16d31ce515ddb | [
"MIT"
]
| null | null | null | ABC_A/ABC063_A.py | ryosuke0825/atcoder_python | 185cdbe7db44ecca1aaf357858d16d31ce515ddb | [
"MIT"
]
| null | null | null | a, b = map(int, input().split())
if a+b >= 10:
print("error")
else:
print(a+b)
| 14.5 | 32 | 0.517241 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7 | 0.08046 |
7ddf431c5c2dcc581f44d2c5411d8380ca8401f0 | 2,278 | py | Python | aeropy/filehandling/paraview.py | belac626/AeroPy | 4f045306427e08b742237b7393ce9602f1072d60 | [
"MIT"
]
| null | null | null | aeropy/filehandling/paraview.py | belac626/AeroPy | 4f045306427e08b742237b7393ce9602f1072d60 | [
"MIT"
]
| null | null | null | aeropy/filehandling/paraview.py | belac626/AeroPy | 4f045306427e08b742237b7393ce9602f1072d60 | [
"MIT"
]
| null | null | null | #### import the simple module from the paraview
from paraview.simple import *
#### disable automatic camera reset on 'Show'
paraview.simple._DisableFirstRenderCameraReset()
network_number = 2
filename = 'test_network'
directory = 'C:\\Users\\leal26\\Documents\\GitHub\\AeroPy\\aeropy\\CST\\'
# get active view
renderView = GetActiveViewOrCreate('RenderView')
assembly = []
for i in range(1,network_number+1):
# create a new 'XML Structured Grid Reader'
test_network_vts = XMLStructuredGridReader(FileName=[directory + filename + str(i)+'.vts'])
# show data in view
test_network_vtsDisplay = Show(test_network_vts, renderView)
# trace defaults for the display properties.
test_network_vtsDisplay.Representation = 'Surface With Edges'
test_network_vtsDisplay.ColorArrayName = [None, '']
test_network_vtsDisplay.OSPRayScaleFunction = 'PiecewiseFunction'
test_network_vtsDisplay.SelectOrientationVectors = 'None'
test_network_vtsDisplay.ScaleFactor = 0.1
test_network_vtsDisplay.SelectScaleArray = 'None'
test_network_vtsDisplay.GlyphType = 'Arrow'
test_network_vtsDisplay.GlyphTableIndexArray = 'None'
test_network_vtsDisplay.DataAxesGrid = 'GridAxesRepresentation'
test_network_vtsDisplay.PolarAxes = 'PolarAxesRepresentation'
test_network_vtsDisplay.ScalarOpacityUnitDistance = 0.3272506722223079
# init the 'PiecewiseFunction' selected for 'OSPRayScaleFunction'
test_network_vtsDisplay.OSPRayScaleFunction.Points = [2.326428429822192, 0.0, 0.5, 0.0, 37.626781425423815, 1.0, 0.5, 0.0]
# reset view to fit data
renderView.ResetCamera()
# update the view to ensure updated data information
renderView.Update()
#### saving camera placements for all active views
# current camera placement for renderView1
renderView.CameraPosition = [0.12476075744808501, 3.1845058646858693, 0.3710215545807592]
renderView.CameraFocalPoint = [0.5, 0.5, 0.0037752263491506906]
renderView.CameraViewUp = [-0.30729811760225784, -0.17101732138568032, 0.9361201539888863]
renderView.CameraParallelScale = 0.7079657120931511
#### uncomment the following to render all views
# RenderAllViews()
# alternatively, if you want to write images, you can use SaveScreenshot(...). | 43.807692 | 127 | 0.763389 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 813 | 0.356892 |
7de07a2c955d17b395e18d20843ee393cc3f7511 | 21,804 | py | Python | pyFIRS/utils.py | Ecotrust/pyFIRS | f4bd8e11b24f125c59b69b04a7c3d11eabc0e81b | [
"BSD-3-Clause"
]
| 3 | 2019-05-01T01:38:05.000Z | 2020-02-06T01:42:00.000Z | pyFIRS/utils.py | Ecotrust/pyFIRS | f4bd8e11b24f125c59b69b04a7c3d11eabc0e81b | [
"BSD-3-Clause"
]
| 1 | 2019-09-19T00:56:58.000Z | 2019-09-19T00:56:58.000Z | pyFIRS/utils.py | Ecotrust/pyFIRS | f4bd8e11b24f125c59b69b04a7c3d11eabc0e81b | [
"BSD-3-Clause"
]
| null | null | null | import glob
import json
import os
import subprocess
import time
import xml.etree.ElementTree as ET
from xml.etree.ElementTree import ParseError
import geopandas as gpd
import rasterio
import numpy as np
from shapely.geometry import Polygon
class PipelineError(RuntimeError):
def __init__(self, message):
self.message = message
def listlike(arg):
'''Checks whether an argument is list-like, returns boolean'''
return not hasattr(arg, "strip") and (hasattr(arg, "__getitem__")
or hasattr(arg, "__iter__"))
def clean_dir(dir_to_clean, file_extensions):
'''Deletes files with specified extension(s) from a directory.
This function is intended to help cleanup outputs from command line
tools that we do not want to keep. Files to be deleted will be
identified using a wildcard with that file extension in dir_to_clean.
Parameters
----------
dir_to_clean: string, path
path to directory to delete files from
file_extension: string or list-like of strings
file extensions that will be used for identifying files to remove,
such as ['.tfw', '.kml'].
'''
if listlike(file_extensions):
for ext in file_extensions:
to_rem = glob.glob(os.path.join(dir_to_clean, '*{}'.format(ext)))
for file in to_rem:
os.remove(file)
print("Removed {:,d} files with extension {}.".format(
len(to_rem), ext))
elif type(file_extension) == str:
to_rem = glob.glob(os.path.join(dir_to_clean, '*{}'.format(ext)))
for file in to_rem:
os.remove(file)
print("Removed {:,d} files with extension {}.".format(
len(to_rem), ext))
else:
raise (TypeError,
'file_extensions needs to be a string or list-like of strings.')
def clean_buffer_polys(poly_shp,
tile_shp,
odir,
simp_tol=None,
simp_topol=None):
"""Removes polygons within the buffer zone of a tile.
This function removes polygons from a shapefile that fall in the buffered
area of point cloud tile. When building footprints or tree crowns (for
example) are delineated from a point cloud, a buffer around the tile is
generally be used to avoid edge effects. This tool computes the centroid of
each polygon and determines whether it falls within the bounds of the
unbuffered tile. It outputs a new shapefile containing only those polygons
whose centroids fall within the unbuffered tile.
The polygons may be simplified using optional arguments simp_tol and
simp_topol to reduce the number of points that define their boundaries.
Parameters
----------
polygons_shp: string, path to shapefile (required)
A shapefile containing the polygons delineated within a buffered tile.
tile_shp: string, path to shapefile (required)
A shapefile containing the bounds of the tile WITHOUT buffers
odir: string, path to directory (required)
Path to the output directory for the new shapefile
simp_tol = numeric,
Tolerance level for simplification. All points within a simplified
geometry will be no more than simp_tol from the original.
simp_topol = boolean (optional)
Whether or not to preserve topology of polygons. If False, a quicker
algorithm will be used, but may produce self-intersecting or otherwise
invalid geometries.
"""
fname = os.path.basename(poly_shp)
outfile = os.path.join(odir, fname)
os.makedirs(odir, exist_ok=True)
tile_boundary = gpd.read_file(tile_shp)
polys = gpd.read_file(poly_shp)
# boolean indicator of whether each polygon falls within tile boundary
clean_polys_ix = polys.centroid.within(tile_boundary.loc[0].geometry)
# retrieve the polygons within the boundary
clean_polys = polys[clean_polys_ix]
if simp_tol:
clean_polys = clean_polys.simplify(simp_tol, simp_topol)
if len(clean_polys) > 0:
clean_polys.to_file(outfile)
def clip_tile_from_shp(in_raster, in_shp, odir, buffer=0):
'''Clips a raster image to the bounding box of a shapefile.
The input raster will be clipped using a rasterio command line tool. The
output raster will have the same name and file type as the input raster, and
will be written to the output directory, odir. The process is executed using
subprocess.run().
Parameters
----------
in_raster: string, path to file
raster image to be clipped
in_shp: string, path to file
shapefile from which bounding box is calculated to clip the raster
odir: string, path
output directory where clipped raster will be stored
buffer: numeric
additional buffer to add to total bounding box of shapefile when
clipping the raster
Returns
-------
proc_clip: CompletedProcess
The result of executing subprocess.run using the rio clip command.
'''
basename = os.path.basename(in_raster)
# read the shapefile using geopandas and calculate its bounds
gdf = gpd.read_file(in_shp)
tile_bnds = ' '.join(str(x) for x in gdf.buffer(buffer).total_bounds)
# create the output directory if it doesn't already exist
os.makedirs(odir, exist_ok=True)
outfile = os.path.join(odir, basename)
# clip the raster
proc_clip = subprocess.run(
['rio', 'clip', in_raster, outfile, '--bounds', tile_bnds],
stderr=subprocess.PIPE,
stdout=subprocess.PIPE)
return proc_clip
def convert_project(infile, outfile, crs):
'''Converts a raster to another format and specifies its projection.
Uses rasterio command line tool executed using subprocess. The file
generated will have the same name and be in the same folder as the input
file.
Parameters
----------
infile: string, path to file
input raster to be converted
outfile: string, path to file
output raster to be generated
crs: string
specification of coordinate reference system to use following rasterio
command line tool (RIO) formatting (e.g., 'EPSG:3857')
Returns
-------
proc_convert: CompletedProcess
result of executing subprocess.run using rio convert
proc_project: CompletedProcess
result of executing subprocess.run using rio edit-info
'''
# convert the file to the new format
proc_convert = subprocess.run(['rio', 'convert', infile, outfile],
stderr=subprocess.PIPE,
stdout=subprocess.PIPE)
# add the projection info
proc_project = subprocess.run(['rio', 'edit-info', '--crs', crs, outfile],
stderr=subprocess.PIPE,
stdout=subprocess.PIPE)
return proc_convert, proc_project
def validation_summary(xml_dir, verbose=False):
'''
Generates a summary of validation results for a directory of lidar files
Parameters
----------
xml_dir : string, path to directory
directory containing xml files produced by LASvalidate
verbose : boolean
whether or not to include the messages describing why any files
produced warning or failed validation.
Returns
-------
summary_report : a printed report
'''
xmls = glob.glob(os.path.join(xml_dir, '*.xml'))
passed = 0
warnings = 0
failed = 0
parse_errors = 0
warning_messages = []
failed_messages = []
for validation_report in xmls:
try:
tile_id = os.path.basename(validation_report).split('.')[0]
tree = ET.parse(validation_report)
root = tree.getroot()
result = root.find('report').find('summary').text.strip()
if result == 'pass':
passed += 1
else:
variable = root.find('report').find('details').find(
result).find('variable').text
note = root.find('report').find('details').find(result).find(
'note').text
if result == 'fail':
failed += 1
failed_messages.append('{} -> {} | {} : {}'.format(
tile_id, result, variable, note))
elif result == 'warning':
warnings += 1
warning_messages.append('{} -> {} | {} : {}'.format(
tile_id, result, variable, note))
except ParseError:
parse_errors += 1
summary = '''LASvalidate Summary
====================
Passed: {:,d}
Failed: {:,d}
Warnings: {:,d}
ParseErrors: {:,d}
'''.format(passed, failed, warnings, parse_errors)
details = '''Details
========
{}
{}
'''.format('\n'.join(failed_messages), '\n'.join(warning_messages))
print(summary)
if verbose:
print(details)
def move_invalid_tiles(xml_dir, dest_dir):
'''Moves lidar data that fail validation checks into a new directory
Parameters
----------
xml_dir : string, path to directory
where the xml reports produced by LASvalidate can be found
dest_dir : str, path to directory
where you would like the point cloud and associated files to be moved
Returns
-------
A printed statement about how many tiles were moved.
'''
xmls = glob.glob(os.path.join(xml_dir, '*.xml'))
invalid_dir = dest_dir
num_invalid = 0
for validation_report in xmls:
tile_id = os.path.basename(validation_report).split('.')[0]
tree = ET.parse(validation_report)
root = tree.getroot()
result = root.find('report').find('summary').text.strip()
if result == 'fail':
# move the lidar file to a different folder
os.makedirs(invalid_dir, exist_ok=True)
for invalid_file in glob.glob(
os.path.join(xml_dir, tile_id + '*')):
basename = os.path.basename(invalid_file)
os.rename(invalid_file, os.path.join(invalid_dir, basename))
num_invalid += 1
print('Moved files for {} invalid tiles to {}'.format(
num_invalid, invalid_dir))
def get_bbox_as_poly(infile, epsg=None):
"""Uses PDAL's info tool to extract the bounding box of a file as a
shapely Polygon. If an EPSG code is provided, a GeoDataFrame is returned.
Parameters
----------
infile : str, path to file
path to input file that PDAL can read
epsg : int
EPSG code defining the coordinate reference system. Optional.
Returns
-------
bbox_poly : Polygon or GeoDataFrame
By default (no EPSG is provided), a shapely Polygon with the bounding
box as its coordinates is returned. If an EPSG code is specified,
bbox_poly is returned as a GeoPandas GeoDataFrame.
"""
result = subprocess.run(['pdal', 'info', infile],
stderr=subprocess.PIPE,
stdout=subprocess.PIPE)
json_result = json.loads(result.stdout.decode())
coords = json_result['stats']['bbox']['native']['boundary']['coordinates']
geometry = Polygon(*coords)
if epsg:
bbox_poly = gpd.GeoDataFrame(
geometry=[geometry], crs={'init': 'epsg:{}'.format(epsg)})
else:
bbox_poly = Polygon(*coords)
return bbox_poly
def fname(path):
"""returns the filename as basename split from extension.
Parameters
-----------
path : str, path to file
filepath from which filename will be sliced
Returns
--------
filename : str
name of file, split from extension
"""
filename = os.path.basename(path).split('.')[0]
return filename
def annulus(inner_radius, outer_radius, dtype=np.uint8):
"""Generates a flat, donut-shaped (annular) structuring element.
A pixel is within the neighborhood if the euclidean distance between
it and the origin falls between the inner and outer radii (inclusive).
Parameters
----------
inner_radius : int
The inner radius of the annular structuring element
outer_radius : int
The outer radius of the annular structuring element
dtype : data-type
The data type of the structuring element
Returns
-------
selem : ndarray
The structuring element where elements of the neighborhood are 1
and 0 otherwise
"""
L = np.arange(-outer_radius, outer_radius + 1)
X, Y = np.meshgrid(L, L)
selem = np.array(
((X**2 + Y**2) <= outer_radius**2) * (
(X**2 + Y**2) >= inner_radius**2),
dtype=dtype)
return selem
def inspect_failures(failed_dir):
"""Prints error messages reported for tiles that failed in the lidar
processing pipeline.
Parameters
----------
failed_dir : string, path to directory
path to directory containing text files indicating any tiles which
failed processing
"""
failed = glob.glob(os.path.join(failed_dir, '*.txt'))
for filename in failed:
with open(filename) as f:
print([line for line in f.readlines() if line.rstrip() != ''])
print('----------------------')
def processing_summary(all_tiles, already_finished, processing_tiles,
finished_dir, failed_dir, start_time):
"""Prints a summary indicating progress of a lidar processing pipeline.
Parameters
----------
all_tiles : list-like
all tiles within a lidar acquisition
already_finished : list-like
tiles which were successfully processed in a previous execution of the
processing pipeline
processing_tiles : list-like
tiles which are being processed during the currently executing pipeline
finished_dir : string, path to directory
path to directory containing text files indicating any tiles which have
finished processing
failed_dir : string, path to directory
path to directory containing text files indicating any tiles which
failed processing
start_time : float
time the pipeline execution began, produced by time.time()
"""
failed = glob.glob(os.path.join(failed_dir, '*.txt'))
finished = glob.glob(os.path.join(finished_dir, '*.txt'))
summary = '''
Processing Summary
-------------------
{:>5,d} tiles in acquisition
{:>5,d} tiles previously finished in acquisition
{:>5,d} tiles being processed in this run
{:>5,d} tiles from this run finished
{:>5,d} tiles failed
'''.format(
len(all_tiles), len(already_finished), len(processing_tiles),
len(finished) - (len(all_tiles) - len(processing_tiles)), len(failed))
total_percent_unfinished = int(70 * (1 - len(finished) / len(all_tiles)))
total_percent_finished = int(70 * len(finished) / len(all_tiles))
total_percent_failed = int(70 * len(failed) / len(all_tiles))
this_run_unfinished = int(70 - 70*(len(finished) - (len(all_tiles) - \
len(processing_tiles))) / len(processing_tiles))
this_run_finished = int(70*(len(finished) - (len(all_tiles) - \
len(processing_tiles))) / len(processing_tiles))
progress_bars = '|' + '=' * this_run_finished + ' '* this_run_unfinished +\
'!' * total_percent_failed + '| {:.1%} this run\n'.format((len(finished)\
- (len(all_tiles) - len(processing_tiles))) / len(processing_tiles)) + \
'|' + '=' * total_percent_finished + ' ' * total_percent_unfinished + '!' \
* total_percent_failed + '| {:.1%} total'.format(len(finished) / \
len(all_tiles))
print(summary)
print(progress_bars)
time_to_complete(start_time, len(processing_tiles),
len(finished) - (len(all_tiles) - len(processing_tiles)))
def print_dhms(s):
"""Prints number of days, hours, minutes, and seconds
represented by number of seconds provided as input.
Parameters
----------
s : numeric
seconds
"""
days = s // (24 * 3600)
s = s % (24 * 3600)
hours = s // 3600
s %= 3600
minutes = s // 60
s %= 60
seconds = s
if days > 0:
print(f'{days:2.0f}d {hours:2.0f}h {minutes:2.0f}m {seconds:2.0f}s')
elif hours > 0:
print(f' {hours:2.0f}h {minutes:2.0f}m {seconds:2.0f}s')
else:
print(f' {minutes:2.0f}m {seconds:2.0f}s')
def time_to_complete(start_time, num_jobs, jobs_completed):
"""Prints elapsed time and estimated time of completion.
Parameters
----------
start_time : float
time the pipeline execution began, produced by time.time()
num_jobs : int
total number of jobs to be completed
jobs_completed : int
number of jobs completed so far
"""
if jobs_completed == 0:
print('\nNo jobs completed yet.')
else:
time_now = time.time()
elapsed = time_now - start_time
prop_complete = jobs_completed / num_jobs
est_completion = elapsed / prop_complete
time_left = est_completion - elapsed
print('\nelapsed: ', end='\t')
print_dhms(elapsed)
print('remaining: ', end='\t')
print_dhms(time_left)
def make_buffered_fishnet(xmin, ymin, xmax, ymax, crs, spacing=1000,
buffer=50):
"""Makes a GeoDataFrame with a fishnet grid that has overlapping edges.
Converts an existing lidar tiling scheme into one that has overlapping
tiles and which is aligned with a grid based on the spacing parameter.
Parameters
----------
xmin, ymin, xmax, ymax : numeric
Values indicating the extent of the existing lidar data.
crs : Coordinate Reference System
Must be readable by GeoPandas to create a GeoDataFrame.
spacing : int
Length and width of tiles in new tiling scheme prior to buffering
buffer : int
Amount of overlap between neighboring tiles.
"""
xmin, ymin = (
np.floor(np.array([xmin, ymin]) // spacing) * spacing).astype(int)
xmax, ymax = (
np.ceil(np.array([xmax, ymax]) // spacing) * spacing).astype(int) + spacing
xx, yy = np.meshgrid(
np.arange(xmin, xmax + spacing, spacing),
np.arange(ymin, ymax + spacing, spacing))
xx_leftbuff = xx[:, :-1] - buffer
xx_rightbuff = xx[:, 1:] + buffer
yy_downbuff = yy[:-1, :] - buffer
yy_upbuff = yy[1:, :] + buffer
ll = np.stack((
xx_leftbuff[1:, :].ravel(), # skip top row
yy_downbuff[:, :-1].ravel())).T # skip right-most column
ul = np.stack((
xx_leftbuff[:-1, :].ravel(), # skip bottom row
yy_upbuff[:, :-1].ravel())).T # skip right-most column
ur = np.stack((
xx_rightbuff[:-1, :].ravel(), # skip bottom row
yy_upbuff[:, 1:].ravel())).T # skip left-most column
lr = np.stack((
xx_rightbuff[1:, :].ravel(), # skip top row
yy_downbuff[:, 1:].ravel())).T # skip left-most column
buff_fishnet = np.stack([ll, ul, ur, lr])
polys = [
Polygon(buff_fishnet[:, i, :]) for i in range(buff_fishnet.shape[1])
]
ll_names = [x for x in (ll + buffer).astype(int).astype(str)]
tile_ids = [
'_'.join(tile) + '_{}'.format(str(spacing)) for tile in ll_names
]
buff_fishnet_gdf = gpd.GeoDataFrame(geometry=polys, crs=crs)
buff_fishnet_gdf['tile_id'] = tile_ids
return buff_fishnet_gdf.set_index('tile_id')
def get_intersecting_tiles(src_tiles, new_tiles):
"""Identifies tiles from src that intersect tiles in new_tiles.
This function is intended to identify the files which should be read for
retiling a lidar acquisition into the new_tiles layout.
src_tiles is expected to have a 'file_name' field.
Parameters
----------
src_tiles : GeoDataFrame
Original tiling scheme for lidar acquisition
new_tiles : GeoDataFrame
New tiling scheme for lidar acquisition, such as one created by the
make_buffered_fishnet function
Returns
-------
joined_tiles : GeoDataFrame
Each row shows a tile from new_tiles that intersected with one or more
tiles from src_tiles. The list of tiles from src_tiles that intersect
each tile in new_tiles are formatted as a space-delimited string.
"""
joined = gpd.sjoin(new_tiles, src_tiles)
joined_tiles = joined.groupby(level=0)['file_name'].apply(list).apply(
' '.join).to_frame()
joined_tiles.index.name = 'tile_id'
joined_tiles = joined_tiles.rename({
'file_name': 'intersecting_files'
},
axis=1)
return joined_tiles
def parse_coords_from_tileid(tile_id):
"""Get the coordinates of the lower left corner of the tile, assuming the
tile has been named in the pattern {XMIN}_{YMIN}_{LENGTH}.
Parameters
----------
tile_id : string
assumed tile_id follows the naming convention of {LLX}_{LLY}_{LENGTH}
where:
LLX = x-coordinate of lower-left corner of tile (in projected units)
LLY = y-coordinate of lower-left corner of tile (in projected units)
LENGTH = length of the raster (in projected units), assumed to be a
square tile shape
Returns
-------
llx, lly, length : int
x- and y- coordinates of lower-left corner and length of raster
"""
tile_parts = tile_id.split('_')
if len(tile_parts) == 2:
llx, lly = [int(coord) for coord in tile_parts]
length = 1000 # assumed tile width if not explicit in tile_id
elif len(tile_parts) == 3:
llx, lly, length = [int(coord) for coord in tile_parts]
return llx, lly, length
| 34.175549 | 83 | 0.631948 | 98 | 0.004495 | 0 | 0 | 0 | 0 | 0 | 0 | 11,727 | 0.537837 |
7de18177bc8f9c705a1427b2d13f1d6f74890139 | 1,308 | py | Python | test/test_message.py | Smac01/Stego | 0bcf94642871e611b6731676591a571ff40ce4a0 | [
"MIT"
]
| null | null | null | test/test_message.py | Smac01/Stego | 0bcf94642871e611b6731676591a571ff40ce4a0 | [
"MIT"
]
| null | null | null | test/test_message.py | Smac01/Stego | 0bcf94642871e611b6731676591a571ff40ce4a0 | [
"MIT"
]
| null | null | null | #!/usr/bin/env python3
import unittest
import sys
sys.path.insert(0, '.')
from random import choice
from PIL import Image
from stego.encoder import embed
from stego.decoder import extract, _decompress, IncorrectPassword
from stego.base import make_array, as_string, extract_metadata
images = ['test/rgba.png', 'test/cmyk.tiff', 'test/greyscale.bmp']
image = choice(images)
message = b'Pixels -> smallest unit(small colored square) that constitutes an images.'
key = b'my_secret_key'
def test_embed(message, password):
imageobj = Image.open(image)
embed(imageobj, message, password)
def test_extract(password):
imageobj = Image.open(image)
img_data = make_array(imageobj.getdata())
exif = extract_metadata(img_data)
content = as_string(img_data[slice(24, exif.size)])
if password:
content = _decompress(content, key=password)
else:
content = _decompress(content)
return content
class SampleTestMessage(unittest.TestCase):
def test_message(self):
test_embed(message, None)
content = test_extract(None)
self.assertEqual(message, content)
def test_message_with_encryption(self):
test_embed(message,key)
content = test_extract(key)
self.assertEqual(message, content)
self.assertRaises(IncorrectPassword,test_extract, b'random')
if __name__ == '__main__':
unittest.main() | 25.647059 | 86 | 0.769113 | 363 | 0.277523 | 0 | 0 | 0 | 0 | 0 | 0 | 187 | 0.142966 |
7de18971fbc5070d8520ba99ee3a0260bc65c1f5 | 638 | py | Python | tests/test_simulation_utils.py | burgersmoke/epysurv | 606aaf1d50a8d39473dc245e78e5b2eb122c2ba3 | [
"MIT"
]
| 8 | 2019-07-29T09:44:43.000Z | 2021-06-29T20:54:57.000Z | tests/test_simulation_utils.py | burgersmoke/epysurv | 606aaf1d50a8d39473dc245e78e5b2eb122c2ba3 | [
"MIT"
]
| 31 | 2019-03-29T23:05:33.000Z | 2021-10-04T16:26:46.000Z | tests/test_simulation_utils.py | burgersmoke/epysurv | 606aaf1d50a8d39473dc245e78e5b2eb122c2ba3 | [
"MIT"
]
| 4 | 2019-09-25T12:54:22.000Z | 2021-08-11T06:40:45.000Z | import pandas as pd
from rpy2 import robjects
from epysurv.simulation.utils import add_date_time_index_to_frame, r_list_to_frame
def test_add_date_time_index_to_frame():
df = add_date_time_index_to_frame(pd.DataFrame({"a": [1, 2, 3]}))
freq = pd.infer_freq(df.index)
assert freq == "W-MON"
def test_r_list_to_frame():
example_r_list = robjects.r("simulated = list(n_cases = 1:10)")
as_frame = r_list_to_frame(example_r_list, ["n_cases"])
expected_frame = pd.DataFrame(
{"n_cases": list(range(1, 11)), "timestep": list(range(1, 11))}
)
pd.testing.assert_frame_equal(as_frame, expected_frame)
| 29 | 82 | 0.716301 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 72 | 0.112853 |
7de40d6d686a2a728856437129c7cee7420fe46e | 95 | py | Python | assemble/tool/assemble_CodeBlockUnixMake.py | vbloodv/blood | 1c6f6c54e937e8d7064f72f32cbbcc8fdaa2677d | [
"MIT"
]
| 2 | 2015-08-27T20:02:04.000Z | 2015-08-31T12:33:13.000Z | assemble/tool/assemble_CodeBlockUnixMake.py | vbloodv/blood | 1c6f6c54e937e8d7064f72f32cbbcc8fdaa2677d | [
"MIT"
]
| null | null | null | assemble/tool/assemble_CodeBlockUnixMake.py | vbloodv/blood | 1c6f6c54e937e8d7064f72f32cbbcc8fdaa2677d | [
"MIT"
]
| null | null | null | import cmake
cmake.buildCmake(
'CodeBlockUnixMake',
'../../',
'../../assemble/'
)
| 11.875 | 24 | 0.536842 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 44 | 0.463158 |
7de5842a34fa630d341aa9d9c697b199184bb9ae | 655 | py | Python | examples/python/bunny_pieline.py | Willyzw/vdbfusion | ca9107a3f44e43629b149ea80c9cd21d9f274baa | [
"MIT"
]
| 119 | 2022-02-08T15:25:25.000Z | 2022-03-29T12:16:35.000Z | examples/python/bunny_pieline.py | arenas7307979/vdbfusion | 7ed8d3142b4b6e164633516f0ed435e1065e5212 | [
"MIT"
]
| 10 | 2022-02-09T07:54:23.000Z | 2022-03-25T03:12:47.000Z | examples/python/bunny_pieline.py | arenas7307979/vdbfusion | 7ed8d3142b4b6e164633516f0ed435e1065e5212 | [
"MIT"
]
| 11 | 2022-02-08T15:33:44.000Z | 2022-02-22T02:47:27.000Z | #!/usr/bin/env python3
# @file cow_pipeline.py
# @author Ignacio Vizzo [[email protected]]
#
# Copyright (c) 2021 Ignacio Vizzo, all rights reserved
import argh
from datasets import BunnyGeneratedDataset as Dataset
from vdbfusion_pipeline import VDBFusionPipeline as Pipeline
def main(
data_source: str,
config: str = "config/bunny.yaml",
visualize: bool = False,
):
"""Help here!"""
dataset = Dataset(data_source, apply_pose=True)
pipeline = Pipeline(dataset, config, map_name="bunny")
pipeline.run()
pipeline.visualize() if visualize else None
if __name__ == "__main__":
argh.dispatch_command(main)
| 25.192308 | 60 | 0.708397 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 209 | 0.319084 |
7de74902240dafd5d3ece0f149442d4593ed9d43 | 1,091 | py | Python | tests/test_dashboard_generator_generate_widget.py | phelewski/aws-codepipeline-dashboard | c32fbfb01b383be9b5f813fac4ed36074e3ddc7e | [
"MIT"
]
| null | null | null | tests/test_dashboard_generator_generate_widget.py | phelewski/aws-codepipeline-dashboard | c32fbfb01b383be9b5f813fac4ed36074e3ddc7e | [
"MIT"
]
| 5 | 2021-04-02T18:12:58.000Z | 2021-05-21T12:15:30.000Z | tests/test_dashboard_generator_generate_widget.py | phelewski/aws-codepipeline-dashboard | c32fbfb01b383be9b5f813fac4ed36074e3ddc7e | [
"MIT"
]
| null | null | null | import os
import pytest
from dashboard_generator import DashboardGenerator
def test_generate_widget_ensure_return_value_is_dict(env_variables):
response = DashboardGenerator()._generate_widget(y=1, period=60, pipeline='foo')
assert type(response) == dict
def test_generate_widget_ensure_values_are_used_properly_in_widget(env_variables):
y = 1
period = 60
pipeline = 'foo'
dimension = 'PipelineName'
response = DashboardGenerator()._generate_widget(y, period, pipeline)
for metric in response['properties']['metrics']:
if 'SuccessCount' in metric:
assert metric == [
'Pipeline',
'SuccessCount',
dimension,
pipeline,
{
'color': '#000000',
'label': 'Success Count',
'stat': 'Sum'
}
]
assert response['properties']['region'] == os.environ['AWS_REGION']
assert response['properties']['title'] == pipeline
assert response['properties']['period'] == period
| 29.486486 | 84 | 0.6022 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 203 | 0.186068 |
7de837001eba6d36074503fa3a70a1bcb083d08b | 795 | py | Python | opencadd/tests/structure/test_superposition_mda.py | pipaj97/opencadd | 4fcf090bd612a22df9d617473ae458316a4cb4b6 | [
"MIT"
]
| 39 | 2020-08-14T07:33:21.000Z | 2022-03-30T02:05:19.000Z | opencadd/tests/structure/test_superposition_mda.py | Allend95/opencadd | 1fde238e3cf8e5e47e8266a504d9df0196505e97 | [
"MIT"
]
| 94 | 2020-06-29T12:47:46.000Z | 2022-02-13T19:16:25.000Z | opencadd/tests/structure/test_superposition_mda.py | Allend95/opencadd | 1fde238e3cf8e5e47e8266a504d9df0196505e97 | [
"MIT"
]
| 11 | 2020-11-11T17:12:38.000Z | 2022-03-21T09:23:39.000Z | """
Tests for opencadd.structure.superposition.engines.mda
"""
import pytest
from opencadd.structure.core import Structure
from opencadd.structure.superposition.engines.mda import MDAnalysisAligner
def test_mda_instantiation():
aligner = MDAnalysisAligner()
def test_mda_calculation():
aligner = MDAnalysisAligner()
structures = [Structure.from_pdbid(pdb_id) for pdb_id in ["4u3y", "4u40"]]
result = aligner.calculate(structures)
# Check API compliance
assert "superposed" in result
assert "scores" in result
assert "rmsd" in result["scores"]
assert "metadata" in result
# Check RMSD values
# TODO: pytest.approx is not working reliably - check with Dennis too, he has the same problem
assert pytest.approx(result["scores"]["rmsd"], 1.989)
| 28.392857 | 98 | 0.733333 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 267 | 0.335849 |
7de96af0dde6dfcdb1cd866b9ae2a4a947f6d2c2 | 274 | py | Python | examples/python/hello2.py | redcodestudios/legion_script | 565fb9bc6fe1dd9d9cf1a2111fd4e4cef4cd3565 | [
"MIT"
]
| 13 | 2020-10-21T16:34:50.000Z | 2022-03-16T14:37:30.000Z | examples/python/hello2.py | redcodestudios/legion_script | 565fb9bc6fe1dd9d9cf1a2111fd4e4cef4cd3565 | [
"MIT"
]
| null | null | null | examples/python/hello2.py | redcodestudios/legion_script | 565fb9bc6fe1dd9d9cf1a2111fd4e4cef4cd3565 | [
"MIT"
]
| null | null | null | import engine
print("Python: Script 2")
class Rotation(metaclass=engine.MetaComponent):
def __init__(self):
self.trans = 5
result = engine.query(Color)
print("Python: Query colors from Script 2")
for c in result:
c.string()
print("--------------------")
| 18.266667 | 47 | 0.638686 | 94 | 0.343066 | 0 | 0 | 0 | 0 | 0 | 0 | 76 | 0.277372 |
7de9ef0f997041de89ca689516f1669065c3dc15 | 4,389 | py | Python | io_scene_halo/file_tag/import_tag.py | AerialDave144/Halo-Asset-Blender-Development-Toolset | f1b0c0b22806ebabaf0126ad864896193c02307f | [
"MIT"
]
| null | null | null | io_scene_halo/file_tag/import_tag.py | AerialDave144/Halo-Asset-Blender-Development-Toolset | f1b0c0b22806ebabaf0126ad864896193c02307f | [
"MIT"
]
| null | null | null | io_scene_halo/file_tag/import_tag.py | AerialDave144/Halo-Asset-Blender-Development-Toolset | f1b0c0b22806ebabaf0126ad864896193c02307f | [
"MIT"
]
| null | null | null | # ##### BEGIN MIT LICENSE BLOCK #####
#
# MIT License
#
# Copyright (c) 2022 Steven Garcia
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# ##### END MIT LICENSE BLOCK #####
import bpy
from ..global_functions import tag_format
from ..file_model import build_scene as build_scene_model
from ..file_physics import build_scene as build_scene_physics
from ..file_animation import build_scene as build_scene_animation
from ..file_collision import build_scene as build_scene_collision
from ..file_level import build_scene as build_scene_level
from ..file_camera_track import build_scene as build_camera_track
from ..file_model.process_file_mode_retail import process_file_mode_retail as process_mode
from ..file_model.process_file_mod2_retail import process_file_mod2_retail as process_mod2
from ..file_collision.process_file_retail import process_file_retail as process_collision_retail
from ..file_physics.process_file_retail import process_file_retail as process_physics_retail
from ..file_animation.process_file_retail import process_file_retail as process_animation_retail
from ..file_level.h1.process_file_retail import process_file_retail as process_level_retail
from ..file_level.h2.process_file import process_file_retail as process_h2_level
from ..file_camera_track.process_file_retail import process_file_retail as process_camera_track_retail
def load_file(context, file_path, fix_rotations, report):
input_stream = open(file_path, "rb")
if tag_format.check_file_size(input_stream) < 64: # Size of the header for all tags
input_stream.close()
report({'ERROR'}, "File size does not meet the minimum amount required. File is either not a tag or corrupted")
return {'CANCELLED'}
tag_group, group_is_valid = tag_format.check_group(input_stream)
if not group_is_valid:
input_stream.close()
report({'ERROR'}, "File does not have a valid tag class. Make sure you are importing a tag supported by the toolset")
return {'CANCELLED'}
if tag_group == "mode" or tag_group == "mod2":
build_scene = build_scene_model
if tag_group == "mode":
ASSET = process_mode(input_stream, report)
else:
ASSET = process_mod2(input_stream, report)
elif tag_group == "coll":
build_scene = build_scene_collision
ASSET = process_collision_retail(input_stream, report)
elif tag_group == "phys":
build_scene = build_scene_physics
ASSET = process_physics_retail(input_stream, report)
elif tag_group == "antr":
build_scene = build_scene_animation
ASSET = process_animation_retail(input_stream, report)
elif tag_group == "sbsp":
build_scene = build_scene_level
ASSET = process_level_retail(input_stream, tag_format, report)
elif tag_group == "psbs":
build_scene = build_scene_level
ASSET = process_h2_level(input_stream, tag_format, report)
elif tag_group == "trak":
build_scene = build_camera_track
ASSET = process_camera_track_retail(input_stream, report)
else:
input_stream.close()
report({'ERROR'}, "Not implemented")
return {'CANCELLED'}
input_stream.close()
build_scene.build_scene(context, ASSET, fix_rotations, report)
return {'FINISHED'}
if __name__ == '__main__':
bpy.ops.import_scene.model()
| 39.1875 | 125 | 0.75188 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,533 | 0.349282 |
7deaa318807e1bc9f791206624ba77111ef68405 | 1,537 | py | Python | pines/smartread.py | jpn--/pine | 3980a9f0b09dd36b2fed7e52750847637be5f067 | [
"MIT"
]
| 2 | 2017-08-09T02:42:37.000Z | 2020-06-16T14:14:16.000Z | pines/smartread.py | jpn--/pine | 3980a9f0b09dd36b2fed7e52750847637be5f067 | [
"MIT"
]
| null | null | null | pines/smartread.py | jpn--/pine | 3980a9f0b09dd36b2fed7e52750847637be5f067 | [
"MIT"
]
| null | null | null |
import gzip, os, struct, zipfile, io
class SmartFileReader(object):
def __init__(self, file, *args, **kwargs):
if file[-3:]=='.gz':
with open(file, 'rb') as f:
f.seek(-4, 2)
self._filesize = struct.unpack('I', f.read(4))[0]
self.file = gzip.open(file, *args, **kwargs)
elif file[-4:]=='.zip':
zf = zipfile.ZipFile(file, 'r')
zf_info = zf.infolist()
if len(zf_info)!=1:
raise TypeError("zip archive files must contain a single member file for SmartFileReader")
zf_info = zf_info[0]
self.file = zf.open(zf_info.filename, 'r', *args, **kwargs)
self._filesize = zf_info.file_size
else:
self.file = open(file, 'rt', *args, **kwargs)
self._filesize = os.fstat(self.file.fileno()).st_size
def __getattr__(self, name):
return getattr(self.file, name)
def __setattr__(self, name, value):
if name in ['file', 'percentread', '_filesize']:
return object.__setattr__(self, name, value)
return setattr(self.file, name, value)
def __delattr__(self, name):
return delattr(self.file, name)
def percentread(self):
try:
return (float(self.file.tell())/float(self._filesize)*100)
except io.UnsupportedOperation:
return 1.0-(float(self.file._left)/float(self._filesize)*100)
def __iter__(self):
return self.file.__iter__()
def bytesread(self):
try:
b = float(self.file.tell())
except:
return "error in bytesread"
labels = ['B','KB','MB','GB','TB']
scale = 0
while scale < 4 and b > 1024:
b /= 1024
scale += 1
return "{:.2f}{}".format(b,labels[scale])
| 30.137255 | 94 | 0.657775 | 1,494 | 0.972023 | 0 | 0 | 0 | 0 | 0 | 0 | 180 | 0.117111 |
7dec49d76923ecb9be4b17a5cb4c2e8eb1b3a170 | 510 | py | Python | lib/clckwrkbdgr/time.py | umi0451/dotfiles | c618811be788d995fe01f6a16b355828d7efdd36 | [
"MIT"
]
| 2 | 2017-04-16T14:54:17.000Z | 2020-11-12T04:15:00.000Z | lib/clckwrkbdgr/time.py | clckwrkbdgr/dotfiles | 292dac8c3211248b490ddbae55fe2adfffcfcf58 | [
"MIT"
]
| null | null | null | lib/clckwrkbdgr/time.py | clckwrkbdgr/dotfiles | 292dac8c3211248b490ddbae55fe2adfffcfcf58 | [
"MIT"
]
| null | null | null | from __future__ import absolute_import
from time import *
import datetime
import six
def get_utctimestamp(mtime=None): # pragma: no cover
""" Converts local mtime (timestamp) to integer UTC timestamp.
If mtime is None, returns current UTC time.
"""
if mtime is None:
if six.PY2:
return int((datetime.datetime.now() - datetime.datetime(1970, 1, 1)).total_seconds())
return int(datetime.datetime.utcnow().timestamp())
return int(calendar.timegm(datetime.datetime.fromtimestamp(mtime).timetuple()))
| 31.875 | 88 | 0.754902 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 130 | 0.254902 |
7dee18b24646c7a504a2809135076549b5a544fa | 135 | py | Python | configutator/__version.py | innovate-invent/configutator | 372b45c44a10171b8518e61f2a7974969304c33a | [
"MIT"
]
| null | null | null | configutator/__version.py | innovate-invent/configutator | 372b45c44a10171b8518e61f2a7974969304c33a | [
"MIT"
]
| 1 | 2017-09-22T05:52:54.000Z | 2017-09-22T05:52:54.000Z | configutator/__version.py | innovate-invent/configutator | 372b45c44a10171b8518e61f2a7974969304c33a | [
"MIT"
]
| null | null | null | __version__ = [1, 0, 2]
__versionstr__ = '.'.join([str(i) for i in __version__])
if __name__ == '__main__':
print(__versionstr__) | 22.5 | 56 | 0.666667 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 13 | 0.096296 |
7dee5b01ddca7ca6f3f444bdaf770ca84c443c68 | 572 | py | Python | tests/integration/test_serialise.py | csiro-easi/eo-datasets | 7805c569763f828cb0ace84c93932bddb882a6a3 | [
"Apache-2.0"
]
| null | null | null | tests/integration/test_serialise.py | csiro-easi/eo-datasets | 7805c569763f828cb0ace84c93932bddb882a6a3 | [
"Apache-2.0"
]
| null | null | null | tests/integration/test_serialise.py | csiro-easi/eo-datasets | 7805c569763f828cb0ace84c93932bddb882a6a3 | [
"Apache-2.0"
]
| null | null | null | from pathlib import Path
from typing import Dict
from eodatasets3 import serialise
from .common import assert_same, dump_roundtrip
def test_valid_document_works(tmp_path: Path, example_metadata: Dict):
generated_doc = dump_roundtrip(example_metadata)
# Do a serialisation roundtrip and check that it's still identical.
reserialised_doc = dump_roundtrip(
serialise.to_doc(serialise.from_doc(generated_doc))
)
assert_same(generated_doc, reserialised_doc)
assert serialise.from_doc(generated_doc) == serialise.from_doc(reserialised_doc)
| 30.105263 | 84 | 0.791958 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 67 | 0.117133 |
7deee6c010d48a8d2b8631423560a24cab9c77a0 | 4,369 | py | Python | src/plot/plot-bb/plot_methods.py | bcrafton/speed_read | 3e9c0c873e49e4948a216aae14ec0d4654d1a62c | [
"MIT"
]
| null | null | null | src/plot/plot-bb/plot_methods.py | bcrafton/speed_read | 3e9c0c873e49e4948a216aae14ec0d4654d1a62c | [
"MIT"
]
| null | null | null | src/plot/plot-bb/plot_methods.py | bcrafton/speed_read | 3e9c0c873e49e4948a216aae14ec0d4654d1a62c | [
"MIT"
]
| 2 | 2020-11-08T12:51:23.000Z | 2021-12-02T23:16:48.000Z |
import numpy as np
import matplotlib.pyplot as plt
####################
def merge_dicts(list_of_dicts):
results = {}
for d in list_of_dicts:
for key in d.keys():
if key in results.keys():
results[key].append(d[key])
else:
results[key] = [d[key]]
return results
####################
comp_pJ = 22. * 1e-12 / 32. / 16.
num_layers = 6
num_comparator = 8
results = np.load('results.npy', allow_pickle=True).item()
y_mean = np.zeros(shape=(2, 2, 2, 2, num_layers))
y_std = np.zeros(shape=(2, 2, 2, 2, num_layers))
y_mac_per_cycle = np.zeros(shape=(2, 2, 2, 2, num_layers))
y_mac_per_pJ = np.zeros(shape=(2, 2, 2, 2, num_layers))
cycle = np.zeros(shape=(2, 2, 2, 2, num_layers))
nmac = np.zeros(shape=(2, 2, 2, 2, num_layers))
array = np.zeros(shape=(2, 2, 2, 2, num_layers))
y_ron = np.zeros(shape=(2, 2, 2, 2, num_layers))
y_roff = np.zeros(shape=(2, 2, 2, 2, num_layers))
y_adc = np.zeros(shape=(2, 2, 2, 2, num_layers, num_comparator))
y_energy = np.zeros(shape=(2, 2, 2, 2, num_layers))
array_util = np.zeros(shape=(2, 2, 2, 2, num_layers))
for key in sorted(results.keys()):
(skip, cards, alloc, profile) = key
alloc = 1 if alloc == 'block' else 0
layer_results = results[key]
max_cycle = 0
for layer in range(num_layers):
rdict = merge_dicts(layer_results[layer])
############################
y_mean[skip][cards][alloc][profile][layer] = np.mean(rdict['mean'])
y_std[skip][cards][alloc][profile][layer] = np.mean(rdict['std'])
############################
y_ron[skip][cards][alloc][profile][layer] = np.sum(rdict['ron'])
y_roff[skip][cards][alloc][profile][layer] = np.sum(rdict['roff'])
y_adc[skip][cards][alloc][profile][layer] = np.sum(rdict['adc'], axis=0)
y_energy[skip][cards][alloc][profile][layer] += y_ron[skip][cards][alloc][profile][layer] * 2e-16
y_energy[skip][cards][alloc][profile][layer] += y_roff[skip][cards][alloc][profile][layer] * 2e-16
y_energy[skip][cards][alloc][profile][layer] += np.sum(y_adc[skip][cards][alloc][profile][layer] * np.array([1,2,3,4,5,6,7,8]) * comp_pJ)
y_mac_per_cycle[skip][cards][alloc][profile][layer] = np.sum(rdict['nmac']) / np.sum(rdict['cycle'])
y_mac_per_pJ[skip][cards][alloc][profile][layer] = np.sum(rdict['nmac']) / 1e12 / np.sum(y_energy[skip][cards][alloc][profile][layer])
############################
cycle[skip][cards][alloc][profile][layer] = np.mean(rdict['cycle'])
nmac[skip][cards][alloc][profile][layer] = np.mean(rdict['nmac'])
array[skip][cards][alloc][profile][layer] = np.mean(rdict['array'])
############################
max_cycle = max(max_cycle, np.mean(rdict['cycle']))
############################
for layer in range(num_layers):
rdict = merge_dicts(layer_results[layer])
############################
y_cycle = np.mean(rdict['cycle'])
y_stall = np.mean(rdict['stall'])
y_array = np.mean(rdict['array'])
array_util[skip][cards][alloc][profile][layer] = (y_array * y_cycle - y_stall) / (y_array * max_cycle)
############################
####################
layers = np.array(range(1, 6+1))
skip_none = int(np.max(cycle[1, 0, 0, 0]))
skip_layer = int(np.max(cycle[1, 0, 0, 1]))
skip_block = int(np.max(cycle[1, 0, 1, 1]))
cards_none = int(np.max(cycle[1, 1, 0, 0]))
cards_layer = int(np.max(cycle[1, 1, 0, 1]))
cards_block = int(np.max(cycle[1, 1, 1, 1]))
height = [skip_none, skip_layer, skip_block, cards_none, cards_layer, cards_block]
x = ['skip/none', 'skip/layer', 'skip/block', 'cards/none', 'cards/layer', 'cards/block']
####################
plt.rcParams.update({'font.size': 12})
####################
plt.cla()
plt.clf()
plt.close()
plt.ylabel('# Cycles')
# plt.xlabel('Method')
plt.xticks(range(len(x)), x, rotation=45)
width = 0.2
plt.bar(x=x, height=height, width=width)
ax = plt.gca()
for i, h in enumerate(height):
# print (i, h)
ax.text(i - width, h + np.min(height)*0.02, str(h), fontdict={'size': 12})
fig = plt.gcf()
fig.set_size_inches(9, 5)
plt.tight_layout()
fig.savefig('cycles.png', dpi=300)
####################
| 29.721088 | 145 | 0.559396 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 578 | 0.132296 |
7def97525f164a67b3206cba17eb1da34b5d6234 | 3,154 | py | Python | build/getversion.py | timgates42/subversion | 0f088f530747140c6783c2eeb77ceff8e8613c42 | [
"Apache-2.0"
]
| 3 | 2017-01-03T03:20:56.000Z | 2018-12-24T22:05:09.000Z | build/getversion.py | timgates42/subversion | 0f088f530747140c6783c2eeb77ceff8e8613c42 | [
"Apache-2.0"
]
| 3 | 2016-06-12T17:02:25.000Z | 2019-02-03T11:08:18.000Z | build/getversion.py | timgates42/subversion | 0f088f530747140c6783c2eeb77ceff8e8613c42 | [
"Apache-2.0"
]
| 3 | 2017-01-21T00:15:13.000Z | 2020-11-04T07:23:50.000Z | #!/usr/bin/env python
#
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
#
#
# getversion.py - Parse version numbers from C header files.
#
import os
import re
import sys
import traceback
__all__ = ['Parser', 'Result']
class Result:
pass
class Parser:
def __init__(self):
self.patterns = {}
def search(self, define_name, value_name):
'Add the name of a define to the list of search pattenrs.'
self.patterns[define_name] = value_name
def parse(self, file):
'Parse the file, extracting defines into a Result object.'
stream = open(file, 'rt')
result = Result()
regex = re.compile(r'^\s*#\s*define\s+(\w+)\s+(\d+)')
for line in stream.readlines():
match = regex.match(line)
if match:
try:
name = self.patterns[match.group(1)]
except:
continue
setattr(result, name, int(match.group(2)))
stream.close()
return result
def svn_extractor(parser, include_file):
'''Pull values from svn.version.h'''
p.search('SVN_VER_MAJOR', 'major')
p.search('SVN_VER_MINOR', 'minor')
p.search('SVN_VER_PATCH', 'patch')
try:
r = p.parse(include_file)
except IOError:
typ, val, tb = sys.exc_info()
msg = ''.join(traceback.format_exception_only(typ, val))
usage_and_exit(msg)
sys.stdout.write("%d.%d.%d" % (r.major, r.minor, r.patch))
def sqlite_extractor(parser, include_file):
'''Pull values from sqlite3.h'''
p.search('SQLITE_VERSION_NUMBER', 'version')
try:
r = p.parse(include_file)
except IOError:
typ, val, tb = sys.exc_info()
msg = ''.join(traceback.format_exception_only(typ, val))
usage_and_exit(msg)
major = r.version / 1000000
minor = (r.version - (major * 1000000)) / 1000
micro = (r.version - (major * 1000000) - (minor * 1000))
sys.stdout.write("%d.%d.%d" % (major, minor, micro))
extractors = {
'SVN' : svn_extractor,
# 'SQLITE' : sqlite_extractor, # not used
}
def usage_and_exit(msg):
if msg:
sys.stderr.write("%s\n\n" % msg)
sys.stderr.write("usage: %s [SVN|SQLITE] [header_file]\n" % \
os.path.basename(sys.argv[0]))
sys.stderr.flush()
sys.exit(1)
if __name__ == '__main__':
if len(sys.argv) == 3:
extractor = extractors[sys.argv[1]]
include_file = sys.argv[2]
else:
usage_and_exit("Incorrect number of arguments")
# Extract and print the version number
p = Parser()
extractor(p, include_file)
| 27.426087 | 63 | 0.675333 | 697 | 0.220989 | 0 | 0 | 0 | 0 | 0 | 0 | 1,388 | 0.440076 |
7df019fabd0e51d78fd4be6cb16ea6706b50e9be | 1,136 | py | Python | redis/p02-vote/client/c02.py | JoseIbanez/testing | 4d6ff310cd63a8b2f8e1abcfbea0f17b23220021 | [
"MIT"
]
| 1 | 2016-09-15T03:58:30.000Z | 2016-09-15T03:58:30.000Z | redis/p02-vote/client/c02.py | JoseIbanez/testing | 4d6ff310cd63a8b2f8e1abcfbea0f17b23220021 | [
"MIT"
]
| 1 | 2020-09-13T08:44:50.000Z | 2020-09-13T08:44:50.000Z | redis/p02-vote/client/c02.py | JoseIbanez/testing | 4d6ff310cd63a8b2f8e1abcfbea0f17b23220021 | [
"MIT"
]
| null | null | null | #!/usr/bin/python
import httplib
import random
import argparse
import sys
#Get options
parser = argparse.ArgumentParser(
description='Testing vote app')
parser.add_argument(
'-port',
type=int,
help='port of server',
default=8000)
parser.add_argument(
'-host',
type=str,
help='server name/ip',
default="localhost")
args = parser.parse_args()
#Color table
colorList = ["blue", "orange", "red", "green", "yellow" ]
colorSize = len(colorList) - 1
#Connect with server
conn = httplib.HTTPConnection(args.host, args.port)
#initial request
conn.request("GET", "/")
r1 = conn.getresponse()
#print(r1.status, r1.reason)
print(r1.read())
#vote loop
count = 0
while count < 100 :
count = count + 1
nColor = random.randint(0, colorSize)
conn.request("GET", "/v1/vote?color="+colorList[nColor])
r1 = conn.getresponse()
#print(r1.read())
print
# view current results
conn.request("GET", "/v1/listVotes")
r1 = conn.getresponse()
print(r1.read())
conn.request("GET", "/v1/listWorkers")
r1 = conn.getresponse()
print(r1.read())
conn.close() | 17.476923 | 60 | 0.647007 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 335 | 0.294894 |
7df1653c3e5e087f0be1c1ff434e58035664ddf1 | 8,161 | py | Python | vm_manager/vm_functions/admin_functionality.py | NeCTAR-RC/bumblebee | 8ba4c543695c83ea1ca532012203f05189438e23 | [
"Apache-2.0"
]
| 3 | 2021-11-19T10:45:17.000Z | 2022-02-15T21:57:58.000Z | vm_manager/vm_functions/admin_functionality.py | NeCTAR-RC/bumblebee | 8ba4c543695c83ea1ca532012203f05189438e23 | [
"Apache-2.0"
]
| null | null | null | vm_manager/vm_functions/admin_functionality.py | NeCTAR-RC/bumblebee | 8ba4c543695c83ea1ca532012203f05189438e23 | [
"Apache-2.0"
]
| null | null | null | from uuid import UUID
import django_rq
import logging
from datetime import datetime, timezone, timedelta
from django.core.mail import mail_managers
from django.db.models import Count
from django.db.models.functions import TruncDay
from django.http import HttpResponse, HttpResponseRedirect, Http404
from django.shortcuts import render
from operator import itemgetter
from researcher_workspace.utils import offset_month_and_year
from vm_manager.models import Instance, Resize, Volume
from vm_manager.utils.utils import get_nectar
from vm_manager.vm_functions.resize_vm import downsize_expired_supersized_vms
from vm_manager.utils.Check_ResearchDesktop_Availability import \
check_availability
logger = logging.getLogger(__name__)
def test_function(request):
if not request.user.is_superuser:
raise Http404()
return HttpResponse(_generate_weekly_availability_report(),
content_type='text/plain')
def admin_worker(request):
if not request.user.is_superuser:
raise Http404()
return HttpResponse("do something", content_type='text/plain')
def db_check(request):
if not request.user.is_superuser:
raise Http404()
n = get_nectar()
nova_servers = n.nova.servers.list()
cinder_volumes = n.cinder.volumes.list()
db_deleted_instances = Instance.objects.exclude(deleted=None) \
.values_list('id', flat=True)
deleted_instances = [
(server.id, server.name, server.metadata.get('environment', ''))
for server in nova_servers
if UUID(server.id) in db_deleted_instances]
db_deleted_volumes = Volume.objects.exclude(deleted=None) \
.values_list('id', flat=True)
deleted_volumes = [
(volume.id, volume.name, volume.metadata.get('environment',
volume.name[-1]))
for volume in cinder_volumes
if UUID(volume.id) in db_deleted_volumes]
db_instances = Instance.objects.filter(deleted=None) \
.values_list('id', flat=True)
missing_instances = [
(server.id, server.name, server.metadata.get('environment', ''))
for server in nova_servers if UUID(server.id) not in db_instances]
db_volumes = Volume.objects.filter(deleted=None) \
.values_list('id', flat=True)
missing_volumes = [
(volume.id, volume.name, volume.metadata.get('environment',
volume.name[-1]))
for volume in cinder_volumes if UUID(volume.id) not in db_volumes]
return render(request, 'vm_manager/db_check.html',
{'missing_instances': missing_instances,
'missing_volumes': missing_volumes,
'deleted_instances': deleted_instances,
'deleted_volumes': deleted_volumes, })
def start_downsizing_cron_job(requesting_feature):
scheduler = django_rq.get_scheduler('default')
# Uncomment this line temporarily to activate this function
"""scheduler.cron("59 13 * * *", downsize_expired_supersized_vms, requesting_feature)"""
# Set cron job to 13:59 UTC as the server runs on UTC
# and that translates to 23:59 AEST or 0:59 AEDT (during daylight savings)
return scheduler.get_jobs(with_times=True)
def _generate_weekly_availability_report():
try:
availability = check_availability()
mail_managers("Weekly Availability Report", availability)
except Exception as e:
logger.error(
f"The Check_ResearchDesktop_Availability script returned: {e}.")
def vm_report_for_csv(reporting_months, operating_systems):
now = datetime.now(timezone.utc)
# A dict of zero values for the last year and this month so far
date_list = [
(offset_month_and_year(month_offset, now.month, now.year), 0)
for month_offset in range(reporting_months, 0, -1)]
start_date = datetime(day=1, month=date_list[0][0][0],
year=date_list[0][0][1], tzinfo=timezone.utc)
empty_date_dict = dict(date_list)
results = []
# table of peak number of simultaneous vms of each OS
data_lists = [
[operating_system, empty_date_dict.copy()]
for operating_system in operating_systems]
for operating_system, instance_count in data_lists:
date_counts = _get_vm_info(operating_system)['vm_count']
for date_count in date_counts:
date_count["simple_date"] = (
date_count["date"].month, date_count["date"].year)
for date in instance_count:
date_counts_from_this_month = [
date_count["count"] for date_count in date_counts
if date_count["simple_date"] == date]
if date_counts_from_this_month:
instance_count[date] = max(date_counts_from_this_month)
results.append({"name": "Peak VMs per month", "values": data_lists})
# table of number of resizes per month
data_lists = [
[operating_system, empty_date_dict.copy()]
for operating_system in operating_systems]
for operating_system, resize_count in data_lists:
resizes = Resize.objects.filter(
instance__boot_volume__operating_system=operating_system,
requested__gte=start_date)
for resize in resizes:
resize.start = (resize.requested.month
+ 12 * resize.requested.year)
if resize.expired():
resize.end = resize.expired()
else:
resize.end = datetime.now(timezone.utc)
resize.end = resize.end.month + 12 * resize.end.year
for (month, year) in resize_count.keys():
resize_count_month = month + 12 * year
for resize in resizes:
if resize.start <= resize_count_month <= resize.end:
resize_count[(month, year)] += 1
results.append({"name": "Boosts", "values": data_lists})
return results
def vm_report_for_page(operating_system):
vm_count = Instance.objects.filter(deleted=None,
boot_volume__operating_system=
operating_system).count()
vm_info = _get_vm_info(operating_system)
return {'vm_count': {operating_system: vm_count},
'vm_info': {operating_system: vm_info}}
def _get_vm_info(operating_system):
vms = Instance.objects.filter(boot_volume__operating_system=
operating_system).order_by('created')
error_dates = vms.filter(error_flag__isnull=False) \
.order_by('error_flag') \
.annotate(date=TruncDay('error_flag')) \
.values('date') \
.annotate(errored_count=Count('id')) \
.order_by('date')
deleted = [
{'date': vm.deleted, 'count': -1}
for vm in vms.order_by('deleted') if vm.deleted]
created = [{'date': vm.created, 'count': 1} for vm in vms]
# `sorted` uses timsort, which means that for sorting two concatenated
# sorted lists, it actually just merges the two lists in O(n)
vm_count = sorted(created + deleted, key=itemgetter('date'))
count = 0
for date_obj in vm_count:
count += date_obj['count']
date_obj['count'] = count
resizes = Resize.objects.filter(instance__boot_volume__operating_system=
operating_system)
resize_list = [
resize.expired() for resize in resizes if resize.expired()]
downsized = [
{'date': expiry, 'count': -1} for expiry in sorted(resize_list)]
supersized = [
{'date': resize.requested, 'count': 1} for resize in resizes]
resize_count = sorted(downsized + supersized, key=itemgetter('date'))
count = 0
for date_obj in resize_count:
count += date_obj['count']
date_obj['count'] = count
return {'vm_count': vm_count,
'error_dates': error_dates,
'resizes': resize_count}
| 41.01005 | 92 | 0.638892 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,179 | 0.144468 |
7df1d68440ff66d19aea439b6b82acb020e94a8f | 365 | py | Python | scripts/af_renameSG.py | aaronfang/small-Scripts | 890b10ab19fa9cdf2415aaf2dc08b81cc64fc79d | [
"MIT"
]
| 1 | 2018-03-08T16:34:00.000Z | 2018-03-08T16:34:00.000Z | scripts/af_renameSG.py | aaronfang/personal_scripts | 890b10ab19fa9cdf2415aaf2dc08b81cc64fc79d | [
"MIT"
]
| null | null | null | scripts/af_renameSG.py | aaronfang/personal_scripts | 890b10ab19fa9cdf2415aaf2dc08b81cc64fc79d | [
"MIT"
]
| null | null | null | # rename shading group name to material name but with SG ended
import pymel.core as pm
import re
selSG = pm.ls(sl=True,fl=True)
for SG in selSG:
curMat = pm.listConnections(SG,d=1)
for mat in curMat:
if pm.nodeType(mat) == 'blinn' or pm.nodeType(mat) == 'lambert':
sgNM = re.split("_mat",str(mat))[0]+"SG"
pm.rename(SG,sgNM)
| 33.181818 | 72 | 0.632877 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 88 | 0.241096 |
8145be52efea74b03f5b43c1e53cd198f1584621 | 729 | py | Python | conftest.py | berpress/MT5WT | 10826f974cd5aef14e8771e18ca0314f27a902e3 | [
"Apache-2.0"
]
| null | null | null | conftest.py | berpress/MT5WT | 10826f974cd5aef14e8771e18ca0314f27a902e3 | [
"Apache-2.0"
]
| 1 | 2019-11-26T18:12:24.000Z | 2019-11-26T18:12:24.000Z | conftest.py | berpress/MT5WT | 10826f974cd5aef14e8771e18ca0314f27a902e3 | [
"Apache-2.0"
]
| null | null | null | import pytest
from common.common import NETTING_ACCOUNT
from fixture.application import Application
@pytest.fixture(scope="session")
def app(request):
base_url = request.config.getoption("--base_url")
fixture = Application(base_url)
fixture.wd.maximize_window()
fixture.wd.implicitly_wait(10)
yield fixture
fixture.destroy()
def pytest_addoption(parser):
parser.addoption(
"--base_url",
action="store",
default="https://trade.mql5.com/trade",
help="base_url",
)
@pytest.fixture()
def auth_netting(app):
if not app.auth.is_auth(NETTING_ACCOUNT.login):
app.auth.open()
app.auth.auth_terminal(NETTING_ACCOUNT.login, NETTING_ACCOUNT.password)
| 24.3 | 79 | 0.699588 | 0 | 0 | 215 | 0.294925 | 444 | 0.609053 | 0 | 0 | 80 | 0.109739 |
81473d4251ec4022292a6f33a138c31e6bf74550 | 3,855 | py | Python | chapps/tests/test_util/test_util.py | easydns/chapps | 3bb54e795de1f91c6cf749f9aeb16b1211584809 | [
"MIT"
]
| 1 | 2022-03-23T13:36:43.000Z | 2022-03-23T13:36:43.000Z | chapps/tests/test_util/test_util.py | easydns/chapps | 3bb54e795de1f91c6cf749f9aeb16b1211584809 | [
"MIT"
]
| 2 | 2022-02-25T18:12:15.000Z | 2022-03-07T22:32:13.000Z | chapps/tests/test_util/test_util.py | easydns/chapps | 3bb54e795de1f91c6cf749f9aeb16b1211584809 | [
"MIT"
]
| null | null | null | """CHAPPS Utilities Tests
.. todo::
Write tests for :class:`~chapps.util.VenvDetector`
"""
import pytest
from pprint import pprint as ppr
from chapps.util import AttrDict, PostfixPolicyRequest
pytestmark = pytest.mark.order(1)
class Test_AttrDict:
def test_attr_dict_return_int(self, mock_config_dict):
ad = AttrDict(mock_config_dict)
assert ad.intval == int(mock_config_dict["intval"])
def test_attr_dict_return_float(self, mock_config_dict):
ad = AttrDict(mock_config_dict)
assert ad.floatval == float(mock_config_dict["floatval"])
def test_attr_dict_return_string(self, mock_config_dict):
ad = AttrDict(mock_config_dict)
assert ad.stringval == mock_config_dict["stringval"]
def test_return_boolean(self, mock_config_dict):
ad = AttrDict(mock_config_dict)
assert ad.boolean == bool(mock_config_dict["boolean"])
class Test_PostfixPolicyRequest:
def test_instantiate_ppr(self, postfix_policy_request_message):
"""
:GIVEN: a policy data payload from Postfix
:WHEN: a new ppr object is instantiated from it
:THEN: a new ppr object should be returned containing a copy of that data
"""
pprp = postfix_policy_request_message()
new_ppr = PostfixPolicyRequest(pprp)
for i, l in enumerate(new_ppr._payload):
assert l == pprp[i]
def test_attribute(self, postfix_policy_request_message):
"""
:GIVEN: a ppr object with contents
:WHEN: an attribute is requested
:THEN: its value (from the payload) should be returned
"""
pprp = postfix_policy_request_message()
new_ppr = PostfixPolicyRequest(pprp)
for k, v in [l.split("=") for l in pprp[0:-2]]:
assert getattr(new_ppr, k, None) == v
def test_dereference(self, postfix_policy_request_message):
"""
:GIVEN: a ppr object with contents
:WHEN: an attribute is dereferenced
:THEN: its value (from the payload) should be returned
"""
pprp = postfix_policy_request_message()
new_ppr = PostfixPolicyRequest(pprp)
for k, v in [l.split("=") for l in pprp[0:-2]]:
assert new_ppr[k] == v
def test_iterable(self, postfix_policy_request_message):
"""
:GIVEN: a ppr object with contents
:WHEN: an iterable is requested (as with items())
:THEN: a dict-iterator should be returned, containing the payload data
"""
pprp = postfix_policy_request_message()
new_ppr = PostfixPolicyRequest(pprp)
for k, v in new_ppr.items():
assert f"{k}={v}" in pprp
def test_len(self, postfix_policy_request_message):
""":GIVEN: a ppr object with contents
:WHEN: asked for length
:THEN: the number of parameters from the payload should be returned
:NB: (the payload ends with an extra blank line)
"""
pprp = postfix_policy_request_message()
new_ppr = PostfixPolicyRequest(pprp)
assert len(new_ppr) == len([l for l in pprp if len(l) > 0])
def test_recipients(self, postfix_policy_request_message):
""":GIVEN: a PPR w/ more than one recipient listed
:WHEN: the pseudo-attribute `recipients` is accessed
:THEN: a list should be returned with one element per recipient
"""
new_ppr = PostfixPolicyRequest(
postfix_policy_request_message(
"[email protected]",
[
"[email protected]",
"[email protected]",
"[email protected]",
],
)
)
r = new_ppr.recipients
assert type(r) == list
assert len(r) == 3
assert r[0] == "[email protected]"
| 33.521739 | 82 | 0.628275 | 3,616 | 0.938003 | 0 | 0 | 0 | 0 | 0 | 0 | 1,399 | 0.362905 |
8147a9961c8020efb6e931a049f2c9b13c430d08 | 84 | py | Python | PMMH/apps/game/map/admin.py | metinberkkaratas/ProjectMagic-MightofHeroes | 578697e637aba0f18b4f83762bf1c87fb20db2ee | [
"MIT"
]
| null | null | null | PMMH/apps/game/map/admin.py | metinberkkaratas/ProjectMagic-MightofHeroes | 578697e637aba0f18b4f83762bf1c87fb20db2ee | [
"MIT"
]
| 4 | 2021-03-19T02:37:45.000Z | 2022-02-10T11:18:04.000Z | PMMH/apps/game/map/admin.py | metinberkkaratas/ProjectMagic-MightofHeroes | 578697e637aba0f18b4f83762bf1c87fb20db2ee | [
"MIT"
]
| 1 | 2019-10-21T20:32:20.000Z | 2019-10-21T20:32:20.000Z | from django.contrib import admin
from .models import Map
admin.site.register(Map)
| 14 | 32 | 0.797619 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
8148c634d7eb81e51ee23984bd4ad754b8ff54d8 | 816 | py | Python | models/__init__.py | pgodet/star_flow | cedb96ff339d11abf71d12d09e794593a742ccce | [
"Apache-2.0"
]
| 10 | 2020-11-17T12:55:00.000Z | 2022-01-13T07:23:55.000Z | models/__init__.py | pgodet/star_flow | cedb96ff339d11abf71d12d09e794593a742ccce | [
"Apache-2.0"
]
| 1 | 2021-01-02T22:46:07.000Z | 2021-01-02T22:46:07.000Z | models/__init__.py | pgodet/star_flow | cedb96ff339d11abf71d12d09e794593a742ccce | [
"Apache-2.0"
]
| 1 | 2021-01-26T10:53:02.000Z | 2021-01-26T10:53:02.000Z | from . import pwcnet
from . import pwcnet_irr
from . import pwcnet_occ_joint
from . import pwcnet_irr_occ_joint
from . import tr_flow
from . import tr_features
from . import IRR_PWC
from . import IRR_PWC_occ_joint
from . import STAR
PWCNet = pwcnet.PWCNet
PWCNet_irr = pwcnet_irr.PWCNet
PWCNet_occ_joint = pwcnet_occ_joint.PWCNet
PWCNet_irr_occ_joint = pwcnet_irr_occ_joint.PWCNet
TRFlow = tr_flow.TRFlow
TRFlow_occjoint = tr_flow.TRFlow_occjoint
TRFlow_irr = tr_flow.TRFlow_irr
TRFlow_irr_occjoint = tr_flow.TRFlow_irr_occjoint
TRFeat = tr_features.TRFeat
TRFeat_occjoint = tr_features.TRFeat_occjoint
TRFeat_irr_occjoint = tr_features.TRFeat_irr_occjoint
# -- With refinement ---
IRR_PWC = IRR_PWC.PWCNet
IRR_occ_joint = IRR_PWC_occ_joint.PWCNet
StarFlow = STAR.StarFlow
| 24 | 53 | 0.792892 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 24 | 0.029412 |
81497b94baefb829f896f3159d41b2c5f1e8542b | 2,386 | py | Python | blkdiscovery/blkid.py | jaredeh/blkdiscovery | b2a73f12adfe784157783d94dbdb17a79810a680 | [
"MIT"
]
| null | null | null | blkdiscovery/blkid.py | jaredeh/blkdiscovery | b2a73f12adfe784157783d94dbdb17a79810a680 | [
"MIT"
]
| null | null | null | blkdiscovery/blkid.py | jaredeh/blkdiscovery | b2a73f12adfe784157783d94dbdb17a79810a680 | [
"MIT"
]
| null | null | null | import os
import re
#hack for python2 support
try:
from .blkdiscoveryutil import *
except:
from blkdiscoveryutil import *
class Blkid(BlkDiscoveryUtil):
def parse_line(self,line):
details = {}
diskline = line.split(':',1)
if len(diskline) < 2:
return
path = diskline[0]
for match in re.finditer('(\S+)\=\"([^\"]+)\"',diskline[1]):
details[match.group(1)] = match.group(2)
return path, details
def find_disks(self,output):
disklist = []
blockdevices = []
for disk in os.listdir("/sys/block"):
blockdevices.append('/dev/' + disk)
for path, details in output.items():
if path in blockdevices:
disklist.append(path)
continue
m1 = re.search('(p\d+$)',path)
m2 = re.search('(\d+$)',path)
if not m2:
disklist.append(path)
continue
if m1:
match = m1
else:
match = m2
disk = path.rsplit(match.group(1))[0]
if disk in disklist:
continue
if not disk in blockdevices:
continue
disklist.append(disk)
return disklist
def details(self):
retval = {}
rawdata = self.call_blkid()
disklist = self.find_disks(rawdata)
#we need to call blkid with a disk to get the partition info, weird
for path in disklist:
output = self.call_blkid(path)
if not output.get(path):
continue
retval[path] = output[path]
return retval
def call_blkid(self,device=None):
retval = {}
self.subprocess_check_output(["blkid", '-g'])
cmdarray = ["blkid", '-o', 'full']
if device:
cmdarray.append(device)
rawoutput = self.subprocess_check_output(cmdarray)
for line in rawoutput.splitlines():
path, details = self.parse_line(line)
retval[path] = details
return self.stringify(retval)
if __name__ == '__main__':
import pprint
pp = pprint.PrettyPrinter(indent=4)
l = Blkid()
devdata = l.call_blkid()
pp.pprint(devdata)
disks = l.find_disks(devdata)
pp.pprint(disks)
details = l.details()
pp.pprint(details)
| 29.45679 | 75 | 0.538139 | 1,996 | 0.836547 | 0 | 0 | 0 | 0 | 0 | 0 | 190 | 0.079631 |
81498134e7422fff5c181158b8b48f05fe97fab6 | 6,217 | py | Python | cosa/analyzers/bmc_ltl.py | zsisco/CoSA | b7a5107fcbae9b3ed3726fbcf9240b39252ef551 | [
"BSD-3-Clause"
]
| 52 | 2018-02-26T19:01:03.000Z | 2022-02-24T08:30:00.000Z | cosa/analyzers/bmc_ltl.py | zsisco/CoSA | b7a5107fcbae9b3ed3726fbcf9240b39252ef551 | [
"BSD-3-Clause"
]
| 28 | 2018-06-07T22:18:23.000Z | 2020-10-27T01:21:01.000Z | cosa/analyzers/bmc_ltl.py | zsisco/CoSA | b7a5107fcbae9b3ed3726fbcf9240b39252ef551 | [
"BSD-3-Clause"
]
| 12 | 2018-08-16T20:02:46.000Z | 2021-01-20T18:17:45.000Z | # Copyright 2018 Cristian Mattarei
#
# Licensed under the modified BSD (3-clause BSD) License.
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from pysmt.shortcuts import And, Or, Solver, TRUE, FALSE, Not, EqualsOrIff, Implies, Iff, Symbol, BOOL
from cosa.utils.logger import Logger
from cosa.utils.formula_mngm import substitute, get_free_variables
from cosa.representation import TS
from cosa.encoders.ltl import LTLEncoder, verification_type
from cosa.problem import VerificationStatus, VerificationType
from cosa.analyzers.mcsolver import TraceSolver, BMCSolver, VerificationStrategy
from cosa.analyzers.bmc_temporal import BMCTemporal
from cosa.analyzers.bmc_safety import BMCSafety
class BMCLTL(BMCTemporal, BMCSafety):
hts = None
config = None
TraceID = 0
total_time = 0.0
tracefile = None
def __init__(self, hts, config):
BMCSolver.__init__(self, hts, config)
self.enc = LTLEncoder()
def unroll(self, trans, invar, k_end, k_start=0, gen_list=False):
Logger.log("Unroll from %s to %s"%(k_start, k_end), 2)
fwd = k_start <= k_end
time_function = self.at_time if fwd else self.at_ptime
(k_start, k_end) = (min(k_start, k_end), max(k_start, k_end))
formula = []
t = k_start
while t < k_end:
to_t = t+1 if fwd else t
formula.append(time_function(trans, t))
formula.append(time_function(invar, to_t))
Logger.log("Add trans, k=%s"%t, 2)
t += 1
if gen_list:
return formula
return And(formula)
def _init_v_time(self, vars, k):
self.vars_time = []
for t in range(k+1):
vars_at_t = []
for v in vars:
vars_at_t.append((v, TS.get_timed_name(v, t)))
self.vars_time.append((t, dict(vars_at_t)))
self.vars_time = dict(self.vars_time)
def ltl(self, prop, k, k_min=0):
if self.config.strategy != VerificationStrategy.LTL:
(vtype, prop) = verification_type(self.enc.to_nnf(prop))
if vtype == VerificationType.SAFETY:
return self.safety(prop, k, k_min)
if vtype == VerificationType.LIVENESS:
return self.liveness(prop, k, k_min)
if vtype == VerificationType.EVENTUALLY:
return self.eventually(prop, k, k_min)
return self.ltl_generic(prop, k, k_min)
def ltl_generic(self, prop, k, k_min=0):
lemmas = self.hts.lemmas
self._init_at_time(self.hts.vars, k)
self._init_v_time(self.hts.vars, k)
(t, model) = self.solve(self.hts, prop, k, lemmas)
if model == True:
return (VerificationStatus.TRUE, None, t)
elif model is not None:
model = self._remap_model(self.hts.vars, model, t)
trace = self.generate_trace(model, t, get_free_variables(prop), find_loop=True)
return (VerificationStatus.FALSE, trace, t)
else:
return (VerificationStatus.UNK, None, t)
def solve(self, hts, prop, k, lemmas=None):
if lemmas is not None:
(hts, res) = self.add_lemmas(hts, prop, lemmas)
if res:
Logger.log("Lemmas imply the property", 1)
Logger.log("", 0, not(Logger.level(1)))
return (0, True)
hts.reset_formulae()
return self.solve_inc(hts, prop, k)
def all_simple_loopbacks(self, vars, k):
lvars = list(vars)
vars_k = [TS.get_timed(v, k) for v in lvars]
loopback = []
eqvar = None
heqvars = None
peqvars = FALSE()
for i in range(k):
vars_i = [TS.get_timed(v, i) for v in lvars]
eq_k_i = And([EqualsOrIff(vars_k[j], vars_i[j]) for j in range(len(lvars))])
loopback.append(eq_k_i)
loopback.append(FALSE())
return loopback
def solve_inc(self, hts, prop, k, all_vars=True):
if all_vars:
relevant_vars = hts.vars
else:
relevant_vars = hts.state_vars | hts.input_vars | hts.output_vars
init = hts.single_init()
trans = hts.single_trans()
invar = hts.single_invar()
init = And(init, invar)
init_0 = self.at_time(init, 0)
nprop = self.enc.to_nnf(Not(prop))
self._reset_assertions(self.solver)
self._add_assertion(self.solver, init_0)
for t in range(1, k+1, 1):
trans_t = self.unroll(trans, invar, t)
self._add_assertion(self.solver, trans_t)
lb = self.all_simple_loopbacks(relevant_vars, t)
self._push(self.solver)
self._push(self.solver)
nprop_k = self.enc.encode(nprop, 0, t)
self._add_assertion(self.solver, And(nprop_k, Not(Or(lb))))
if self._solve(self.solver):
Logger.log("Counterexample (no-loop) found with k=%s"%(t), 1)
model = self._get_model(self.solver)
return (t, model)
nltlprop = []
self._pop(self.solver)
for l in range(t+1):
nprop_l = self.enc.encode_l(nprop, 0, t, l)
nltlprop.append(And(lb[l], nprop_l))
self._add_assertion(self.solver, Or(nltlprop))
if self._solve(self.solver):
Logger.log("Counterexample (with-loop) found with k=%s"%(t), 1)
model = self._get_model(self.solver)
return (t, model)
else:
Logger.log("No counterexample found with k=%s"%(t), 1)
Logger.msg(".", 0, not(Logger.level(1)))
self._pop(self.solver)
return (k-1, None)
| 32.212435 | 102 | 0.579218 | 5,238 | 0.842529 | 0 | 0 | 0 | 0 | 0 | 0 | 596 | 0.095866 |
81499daf35ebda15aaf719c6405036c7a52b937c | 4,623 | py | Python | paleomix/nodes/bowtie2.py | MikkelSchubert/paleomix | 5c6414060088ba178ff1c400bdbd45d2f6b1aded | [
"MIT"
]
| 33 | 2015-04-08T10:44:19.000Z | 2021-11-01T14:23:40.000Z | paleomix/nodes/bowtie2.py | MikkelSchubert/paleomix | 5c6414060088ba178ff1c400bdbd45d2f6b1aded | [
"MIT"
]
| 41 | 2015-07-17T12:46:16.000Z | 2021-10-13T06:47:25.000Z | paleomix/nodes/bowtie2.py | MikkelSchubert/paleomix | 5c6414060088ba178ff1c400bdbd45d2f6b1aded | [
"MIT"
]
| 19 | 2015-01-23T07:09:39.000Z | 2021-04-06T09:30:21.000Z | #!/usr/bin/python3
#
# Copyright (c) 2012 Mikkel Schubert <[email protected]>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
from typing import Any, Iterable, Optional, Type, Union
import paleomix.common.versions as versions
from paleomix.common.command import (
AtomicCmd,
InputFile,
OptionsType,
OutputFile,
ParallelCmds,
TempOutputFile,
)
from paleomix.node import CommandNode, Node, NodeError
from paleomix.nodes.bwa import (
_get_max_threads,
_get_node_description,
_new_cleanup_command,
)
BOWTIE2_VERSION = versions.Requirement(
call=("bowtie2", "--version"),
regexp=r"version (\d+\.\d+\.\d+)",
specifiers=">=2.3.0",
)
class Bowtie2IndexNode(CommandNode):
def __init__(self, input_file: str, dependencies: Iterable[Node] = ()):
command = _bowtie2_template(
(
"bowtie2-build",
InputFile(input_file),
TempOutputFile(input_file),
),
reference=input_file,
iotype=OutputFile,
)
CommandNode.__init__(
self,
command=command,
description="creating Bowtie2 index for %s" % (input_file,),
dependencies=dependencies,
)
class Bowtie2Node(CommandNode):
def __init__(
self,
input_file_1: str,
input_file_2: Optional[str],
output_file: str,
reference: str,
threads: int = 2,
mapping_options: OptionsType = {},
cleanup_options: OptionsType = {},
dependencies: Iterable[Node] = (),
):
aln = _bowtie2_template(
["bowtie2"],
reference=reference,
stdout=AtomicCmd.PIPE,
)
fixed_options: OptionsType = {
"--threads": _get_max_threads(reference, threads),
"-x": reference,
}
if input_file_1 and not input_file_2:
fixed_options["-U"] = input_file_1
elif input_file_1 and input_file_2:
fixed_options["-1"] = input_file_1
fixed_options["-2"] = input_file_2
else:
raise NodeError(
"Input 1, OR both input 1 and input 2 must "
"be specified for Bowtie2 node"
)
aln.merge_options(
user_options=mapping_options,
fixed_options=fixed_options,
)
cleanup = _new_cleanup_command(
stdin=aln,
in_reference=reference,
out_bam=output_file,
max_threads=fixed_options["--threads"],
paired_end=input_file_1 and input_file_2,
options=cleanup_options,
)
description = _get_node_description(
name="Bowtie2",
input_files_1=input_file_1,
input_files_2=input_file_2,
reference=reference,
)
CommandNode.__init__(
self,
command=ParallelCmds([aln, cleanup]),
description=description,
threads=threads,
dependencies=dependencies,
)
def _bowtie2_template(
call: Any,
reference: str,
iotype: Union[Type[InputFile], Type[OutputFile]] = InputFile,
**kwargs: Any
):
return AtomicCmd(
call,
extra_files=[
iotype(reference + postfix)
for postfix in (
".1.bt2",
".2.bt2",
".3.bt2",
".4.bt2",
".rev.1.bt2",
".rev.2.bt2",
)
],
requirements=[BOWTIE2_VERSION],
**kwargs
)
| 30.019481 | 79 | 0.605667 | 2,402 | 0.519576 | 0 | 0 | 0 | 0 | 0 | 0 | 1,405 | 0.303915 |
8149af93bae69dfead5a5f9ae4e810455dcc5116 | 976 | py | Python | modu_01/04_02_lab.py | 94JuHo/study_for_deeplearning | ababf482b6a24d94b5f860ea9a68e34fe324d182 | [
"MIT"
]
| null | null | null | modu_01/04_02_lab.py | 94JuHo/study_for_deeplearning | ababf482b6a24d94b5f860ea9a68e34fe324d182 | [
"MIT"
]
| null | null | null | modu_01/04_02_lab.py | 94JuHo/study_for_deeplearning | ababf482b6a24d94b5f860ea9a68e34fe324d182 | [
"MIT"
]
| null | null | null | import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' #내 맥북에서 발생되는 에러를 없애기 위한 코드
import tensorflow as tf
#using matrix
x_data = [[73., 80., 75.], [93., 88., 93.,], [89., 91., 90.], [96., 98., 100.], [73., 66., 70.]]
y_data = [[152.], [185.], [180.], [196.], [142.]]
X = tf.placeholder(tf.float32, shape=[None, 3]) #n개의 데이터가 들어올 것이다. tensorflow에서는 none으로 표현한다.
Y = tf.placeholder(tf.float32, shape=[None, 1])
W = tf.Variable(tf.random_normal([3, 1]), name='weight')
b = tf.Variable(tf.random_normal([1]), name='bias')
hypothesis = tf.matmul(X, W)+b
cost = tf.reduce_mean(tf.square(hypothesis - Y))
optimizer = tf.train.GradientDescentOptimizer(learning_rate=1e-5)
train = optimizer.minimize(cost)
sess = tf.Session()
sess.run(tf.global_variables_initializer())
for step in range(2001):
cost_val, hy_val, _ = sess.run([cost, hypothesis, train], feed_dict={X: x_data, Y: y_data})
if step % 10 == 0:
print(step, "Cost:", cost_val, "\nPrediction:\n", hy_val)
| 32.533333 | 96 | 0.655738 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 227 | 0.214962 |
814a3026ad57c4136a0d1ae0e01b8c8cbeaa23f5 | 3,291 | py | Python | freezer/storage/fslike.py | kwu83tw/freezer | 5aaab1e70bf957436d888fbc3fce7af8d25eb124 | [
"MIT"
]
| 141 | 2015-10-18T02:53:47.000Z | 2022-03-10T11:31:30.000Z | freezer/storage/fslike.py | kwu83tw/freezer | 5aaab1e70bf957436d888fbc3fce7af8d25eb124 | [
"MIT"
]
| 1 | 2016-10-31T01:56:10.000Z | 2016-10-31T01:56:10.000Z | freezer/storage/fslike.py | kwu83tw/freezer | 5aaab1e70bf957436d888fbc3fce7af8d25eb124 | [
"MIT"
]
| 50 | 2015-10-27T12:16:08.000Z | 2022-02-14T07:14:01.000Z | # (c) Copyright 2014,2015 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import abc
from oslo_serialization import jsonutils as json
from freezer.storage import physical
class FsLikeStorage(physical.PhysicalStorage, metaclass=abc.ABCMeta):
_type = 'fslike'
def __init__(self, storage_path,
max_segment_size, skip_prepare=False):
super(FsLikeStorage, self).__init__(
storage_path=storage_path,
max_segment_size=max_segment_size,
skip_prepare=skip_prepare)
def prepare(self):
self.create_dirs(self.storage_path)
def info(self):
pass
def write_backup(self, rich_queue, backup):
"""
Stores backup in storage
:type rich_queue: freezer.utils.streaming.RichQueue
:type backup: freezer.storage.base.Backup
"""
backup = backup.copy(storage=self)
path = backup.data_path
self.create_dirs(path.rsplit('/', 1)[0])
with self.open(path, mode='wb') as \
b_file:
for message in rich_queue.get_messages():
b_file.write(message)
def backup_blocks(self, backup):
"""
:param backup:
:type backup: freezer.storage.base.Backup
:return:
"""
with self.open(backup.data_path, 'rb') as backup_file:
while True:
chunk = backup_file.read(self.max_segment_size)
if len(chunk):
yield chunk
else:
break
@abc.abstractmethod
def open(self, filename, mode):
"""
:type filename: str
:param filename:
:type mode: str
:param mode:
:return:
"""
pass
def add_stream(self, stream, package_name, headers=None):
"""
:param stream: data
:param package_name: path
:param headers: backup metadata information
:return:
"""
split = package_name.rsplit('/', 1)
# create backup_basedir
backup_basedir = "{0}/{1}".format(self.storage_path,
package_name)
self.create_dirs(backup_basedir)
# define backup_data_name
backup_basepath = "{0}/{1}".format(backup_basedir,
split[0])
backup_metadata = "%s/metadata" % backup_basedir
# write backup to backup_basepath
with self.open(backup_basepath, 'wb') as backup_file:
for el in stream:
backup_file.write(el)
# write data matadata to backup_metadata
with self.open(backup_metadata, 'wb') as backup_meta:
backup_meta.write(json.dumps(headers))
| 32.264706 | 74 | 0.606806 | 2,575 | 0.782437 | 409 | 0.124278 | 207 | 0.062899 | 0 | 0 | 1,322 | 0.401702 |
814afb82812c77d3cd59d4373c1636829f4ef2dc | 1,558 | py | Python | rubicon/repository/asynchronous/s3.py | gforsyth/rubicon | b77c0bdfc73d3f5666f76c83240706c10cd2e88c | [
"Apache-2.0"
]
| null | null | null | rubicon/repository/asynchronous/s3.py | gforsyth/rubicon | b77c0bdfc73d3f5666f76c83240706c10cd2e88c | [
"Apache-2.0"
]
| null | null | null | rubicon/repository/asynchronous/s3.py | gforsyth/rubicon | b77c0bdfc73d3f5666f76c83240706c10cd2e88c | [
"Apache-2.0"
]
| null | null | null | from rubicon.repository.asynchronous import AsynchronousBaseRepository
from rubicon.repository.utils import json
class S3Repository(AsynchronousBaseRepository):
"""The asynchronous S3 repository uses `asyncio` to
persist Rubicon data to a remote S3 bucket.
S3 credentials can be specified via environment variables
or the credentials file in '~/.aws'.
Parameters
----------
root_dir : str
The full S3 path (including 's3://') to persist
Rubicon data to.
loop : asyncio.unix_events._UnixSelectorEventLoop, optional
The event loop the asynchronous calling program is running on.
It should not be necessary to provide this parameter in
standard asynchronous operating cases.
"""
PROTOCOL = "s3"
async def _connect(self):
"""Asynchronously connect to the underlying S3 persistence layer.
Note
----
This function must be run before any other that reaches
out to S3. It is implicitly called by such functions.
"""
await self.filesystem._connect()
async def _persist_bytes(self, bytes_data, path):
"""Asynchronously persists the raw bytes `bytes_data`
to the S3 bucket defined by `path`.
"""
await self.filesystem._pipe_file(path, bytes_data)
async def _persist_domain(self, domain, path):
"""Asynchronously persists the Rubicon object `domain`
to the S3 bucket defined by `path`.
"""
await self.filesystem._pipe_file(path, json.dumps(domain))
| 33.869565 | 73 | 0.67715 | 1,442 | 0.925546 | 0 | 0 | 0 | 0 | 763 | 0.48973 | 1,041 | 0.668164 |
814b91426dfa04f0937c2eaf434296d7b486ca56 | 1,660 | py | Python | examples/multidata_example.py | zssherman/ACT | db87008aa6649d3d21b79ae97ea0f11d7f1f1935 | [
"BSD-3-Clause"
]
| 62 | 2020-01-13T19:48:49.000Z | 2022-03-22T07:56:37.000Z | examples/multidata_example.py | zssherman/ACT | db87008aa6649d3d21b79ae97ea0f11d7f1f1935 | [
"BSD-3-Clause"
]
| 215 | 2020-01-07T20:17:11.000Z | 2022-03-31T18:49:57.000Z | examples/multidata_example.py | zssherman/ACT | db87008aa6649d3d21b79ae97ea0f11d7f1f1935 | [
"BSD-3-Clause"
]
| 16 | 2020-01-13T21:25:55.000Z | 2022-03-26T18:01:29.000Z | """
==================================================
Example on how to plot multiple datasets at a time
==================================================
This is an example of how to download and
plot multiple datasets at a time.
.. image:: ../../multi_ds_plot1.png
"""
import act
import matplotlib.pyplot as plt
# Place your username and token here
username = ''
token = ''
act.discovery.download_data(username, token, 'sgpceilC1.b1', '2019-01-01', '2019-01-07')
# Read in CEIL data and correct it
ceil_ds = act.io.armfiles.read_netcdf('sgpceilC1.b1/sgpceilC1.b1.201901*.nc')
ceil_ds = act.corrections.ceil.correct_ceil(ceil_ds, -9999.)
# Read in the MET data
met_ds = act.io.armfiles.read_netcdf(
act.tests.sample_files.EXAMPLE_MET_WILDCARD)
# You can use tuples if the datasets in the tuple contain a
# datastream attribute. This is required in all ARM datasets.
display = act.plotting.TimeSeriesDisplay(
(ceil_ds, met_ds), subplot_shape=(2, ), figsize=(15, 10))
display.plot('backscatter', 'sgpceilC1.b1', subplot_index=(0, ))
display.plot('temp_mean', 'sgpmetE13.b1', subplot_index=(1, ))
display.day_night_background('sgpmetE13.b1', subplot_index=(1, ))
plt.show()
# You can also use a dictionary so that you can customize
# your datastream names to something that may be more useful.
display = act.plotting.TimeSeriesDisplay(
{'ceiliometer': ceil_ds, 'met': met_ds},
subplot_shape=(2, ), figsize=(15, 10))
display.plot('backscatter', 'ceiliometer', subplot_index=(0, ))
display.plot('temp_mean', 'met', subplot_index=(1, ))
display.day_night_background('met', subplot_index=(1, ))
plt.show()
ceil_ds.close()
met_ds.close()
| 32.54902 | 88 | 0.689759 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 816 | 0.491566 |
814bbe8913aa4c1ed64cfd661e62c150faebc750 | 805 | py | Python | gpdata.py | masenov/bullet | be7148c93e3bf8111923063b599f2e9f7ea929b8 | [
"Zlib"
]
| null | null | null | gpdata.py | masenov/bullet | be7148c93e3bf8111923063b599f2e9f7ea929b8 | [
"Zlib"
]
| null | null | null | gpdata.py | masenov/bullet | be7148c93e3bf8111923063b599f2e9f7ea929b8 | [
"Zlib"
]
| null | null | null | flat_x = x.flatten()
flat_y = y.flatten()
flat_z = z.flatten()
size = flat_x.shape[0]
filename = 'landscapeData.h'
open(filename, 'w').close()
f = open(filename, 'a')
f.write('#include "LinearMath/btScalar.h"\n#define Landscape01VtxCount 4\n#define Landscape01IdxCount 4\nbtScalar Landscape01Vtx[] = {\n')
for i in range(size):
f.write(str(flat_x[i])+'f,'+str(flat_y[i])+'f,'+str(flat_z[i])+'f,\n')
f.write('};\n')
f.write('btScalar Landscape01Nml[] = {\n')
for i in range(size):
f.write('1.0f,1.0f,1.0f,\n')
f.write('};\n')
f.write('btScalar Landscape01Tex[] = {\n')
for i in range(size):
f.write('1.0f,1.0f,1.0f,\n')
f.write('};\n')
f.write('unsigned short Landscape01Idx[] = {\n')
for i in range(size):
f.write(str(i)+','+str(i+1)+','+str(i+2)+',\n')
f.write('};\n')
f.close()
| 23.676471 | 138 | 0.62236 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 344 | 0.427329 |
814d356177b5fffd6e85621ee2f5863452f63451 | 2,776 | py | Python | samples/create_project.py | zuarbase/server-client-python | 1e5e02a550727d72fa90c3d8e4caa4c5f416dc74 | [
"CC0-1.0",
"MIT"
]
| 470 | 2016-09-14T23:38:48.000Z | 2022-03-31T07:59:53.000Z | samples/create_project.py | zuarbase/server-client-python | 1e5e02a550727d72fa90c3d8e4caa4c5f416dc74 | [
"CC0-1.0",
"MIT"
]
| 772 | 2016-09-09T18:15:44.000Z | 2022-03-31T22:01:08.000Z | samples/create_project.py | zuarbase/server-client-python | 1e5e02a550727d72fa90c3d8e4caa4c5f416dc74 | [
"CC0-1.0",
"MIT"
]
| 346 | 2016-09-10T00:05:00.000Z | 2022-03-30T18:55:47.000Z | ####
# This script demonstrates how to use the Tableau Server Client
# to create new projects, both at the root level and how to nest them using
# parent_id.
#
#
# To run the script, you must have installed Python 3.6 or later.
####
import argparse
import logging
import sys
import tableauserverclient as TSC
def create_project(server, project_item):
try:
project_item = server.projects.create(project_item)
print('Created a new project called: %s' % project_item.name)
return project_item
except TSC.ServerResponseError:
print('We have already created this project: %s' % project_item.name)
sys.exit(1)
def main():
parser = argparse.ArgumentParser(description='Create new projects.')
# Common options; please keep those in sync across all samples
parser.add_argument('--server', '-s', required=True, help='server address')
parser.add_argument('--site', '-S', help='site name')
parser.add_argument('--token-name', '-p', required=True,
help='name of the personal access token used to sign into the server')
parser.add_argument('--token-value', '-v', required=True,
help='value of the personal access token used to sign into the server')
parser.add_argument('--logging-level', '-l', choices=['debug', 'info', 'error'], default='error',
help='desired logging level (set to error by default)')
# Options specific to this sample
# This sample has no additional options, yet. If you add some, please add them here
args = parser.parse_args()
# Set logging level based on user input, or error by default
logging_level = getattr(logging, args.logging_level.upper())
logging.basicConfig(level=logging_level)
tableau_auth = TSC.PersonalAccessTokenAuth(args.token_name, args.token_value, site_id=args.site)
server = TSC.Server(args.server, use_server_version=True)
with server.auth.sign_in(tableau_auth):
# Use highest Server REST API version available
server.use_server_version()
# Without parent_id specified, projects are created at the top level.
top_level_project = TSC.ProjectItem(name='Top Level Project')
top_level_project = create_project(server, top_level_project)
# Specifying parent_id creates a nested projects.
child_project = TSC.ProjectItem(name='Child Project', parent_id=top_level_project.id)
child_project = create_project(server, child_project)
# Projects can be nested at any level.
grand_child_project = TSC.ProjectItem(name='Grand Child Project', parent_id=child_project.id)
grand_child_project = create_project(server, grand_child_project)
if __name__ == '__main__':
main()
| 40.823529 | 101 | 0.699568 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,145 | 0.412464 |
814e51bb73ef3a0faf2172d4b70fb37c15405587 | 2,146 | py | Python | tests/test_threading.py | nmandery/rasterio | ba5e90c487bd1930f52e57dba999e889b4df9ade | [
"BSD-3-Clause"
]
| 1,479 | 2015-01-10T12:35:07.000Z | 2021-10-18T16:17:15.000Z | tests/test_threading.py | nmandery/rasterio | ba5e90c487bd1930f52e57dba999e889b4df9ade | [
"BSD-3-Clause"
]
| 1,819 | 2015-01-06T21:56:25.000Z | 2021-10-20T02:28:27.000Z | tests/test_threading.py | nmandery/rasterio | ba5e90c487bd1930f52e57dba999e889b4df9ade | [
"BSD-3-Clause"
]
| 509 | 2015-01-06T20:59:12.000Z | 2021-10-18T14:14:57.000Z | from threading import Thread
import time
import unittest
import rasterio as rio
from rasterio.env import get_gdal_config
class TestThreading(unittest.TestCase):
def test_multiopen(self):
"""
Open a file from different threads.
Regression test for issue #986
"""
def func(delay):
try:
with rio.open('tests/data/RGB.byte.tif'):
time.sleep(delay)
except Exception as err:
global exceptions
exceptions.append(err)
global exceptions
exceptions = []
t1 = Thread(target=func, args=(0.1,))
t2 = Thread(target=func, args=(0,))
with rio.Env():
t1.start()
t2.start() # potential error if Env manages globals unsafely
t1.join()
t2.join()
assert not exceptions
def test_reliability(self):
"""Allow for nondeterminism of race condition"""
for i in range(3):
self.test_multiopen()
def test_child_thread_inherits_env():
"""A new thread inherit's the main thread's env"""
def func():
with rio.Env(lol='wut'):
assert get_gdal_config('lol') == 'wut'
# The next config option will have been set in the main thread.
assert get_gdal_config('FROM_MAIN') is True
t1 = Thread(target=func)
with rio.Env(FROM_MAIN=True):
t1.start()
assert get_gdal_config('FROM_MAIN') is True
assert get_gdal_config('lol') is None
t1.join()
def test_child_thread_isolation():
"""Child threads have isolated environments"""
def func(key, value, other_key):
env = {key: value}
with rio.Env(**env):
assert get_gdal_config(key) == value
# The other key is one set in another child thread.
assert get_gdal_config(other_key) is None
t1 = Thread(target=func, args=('is_t1', True, 'is_t2'))
t2 = Thread(target=func, args=('is_t2', True, 'is_t1'))
t1.start()
t2.start()
t1.join()
t2.join()
if __name__ == '__main__':
unittest.main()
| 25.855422 | 75 | 0.584809 | 917 | 0.427307 | 0 | 0 | 0 | 0 | 0 | 0 | 511 | 0.238117 |
81507c4f325c3f7f550df8daa74e43be479e3fc4 | 4,600 | py | Python | dm_construction/environments_test.py | frangipane/dm_construction | c84dcbd13ef6896a57da04fe62be85297178552a | [
"Apache-2.0"
]
| 25 | 2020-07-16T12:35:07.000Z | 2022-03-25T11:02:54.000Z | dm_construction/environments_test.py | frangipane/dm_construction | c84dcbd13ef6896a57da04fe62be85297178552a | [
"Apache-2.0"
]
| 2 | 2021-01-11T11:40:21.000Z | 2021-06-15T12:43:28.000Z | dm_construction/environments_test.py | LaudateCorpus1/dm_construction | f9d59f6ccb8818b71f971387704f2db8f2b3323a | [
"Apache-2.0"
]
| 7 | 2020-08-20T13:04:37.000Z | 2021-11-19T18:55:09.000Z | #!/usr/bin/python
#
# Copyright 2020 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Tests the open source construction environments."""
from absl import flags
from absl.testing import absltest
from absl.testing import parameterized
import dm_construction
import numpy as np
FLAGS = flags.FLAGS
flags.DEFINE_string("backend", "docker", "")
def _make_random_action(action_spec, observation):
"""Makes a random action given an action spec and observation."""
# Sample the random action.
action = {}
for name, spec in action_spec.items():
if name == "Index":
value = np.random.randint(observation["n_edge"])
elif spec.dtype in (np.int32, np.int64, int):
value = np.random.randint(spec.minimum, spec.maximum + 1)
else:
value = np.random.uniform(spec.minimum, spec.maximum)
action[name] = value
return action
def _random_unroll(env, seed=1234, num_steps=10, difficulty=5,
random_choice_before_reset=False):
"""Take random actions in the given environment."""
np.random.seed(seed)
action_spec = env.action_spec()
if random_choice_before_reset:
np.random.choice([8], p=[1.])
timestep = env.reset(difficulty=difficulty)
trajectory = [timestep]
actions = [None]
for _ in range(num_steps):
if timestep.last():
if random_choice_before_reset:
np.random.choice([8], p=[1.])
timestep = env.reset(difficulty=difficulty)
action = _make_random_action(action_spec, timestep.observation)
timestep = env.step(action)
trajectory.append(timestep)
actions.append(action)
return trajectory, actions
class TestEnvironments(parameterized.TestCase):
def _make_environment(
self, problem_type, curriculum_sample, wrapper_type, backend_type=None):
"""Make the new version of the construction task."""
if backend_type is None:
backend_type = FLAGS.backend
return dm_construction.get_environment(
problem_type,
unity_environment=self._unity_envs[backend_type],
wrapper_type=wrapper_type,
curriculum_sample=curriculum_sample)
@classmethod
def setUpClass(cls):
super(TestEnvironments, cls).setUpClass()
# Construct the unity environment.
cls._unity_envs = {
"docker": dm_construction.get_unity_environment("docker"),
}
@classmethod
def tearDownClass(cls):
super(TestEnvironments, cls).tearDownClass()
for env in cls._unity_envs.values():
env.close()
@parameterized.named_parameters(
("covering", "covering"),
("covering_hard", "covering_hard"),
("connecting", "connecting"),
("silhouette", "silhouette"),
("marble_run", "marble_run"))
def test_discrete_relative_environments_curriculum_sample(self, name):
"""Smoke test for discrete relative wrapper with curriculum_sample=True."""
env = self._make_environment(name, True, "discrete_relative")
_random_unroll(env, difficulty=env.core_env.max_difficulty)
@parameterized.named_parameters(
("covering", "covering"),
("covering_hard", "covering_hard"),
("connecting", "connecting"),
("silhouette", "silhouette"),
("marble_run", "marble_run"))
def test_continuous_absolute_environments_curriculum_sample(self, name):
"""Smoke test for continuous absolute wrapper w/ curriculum_sample=True."""
env = self._make_environment(name, True, "continuous_absolute")
_random_unroll(env, difficulty=env.core_env.max_difficulty)
@parameterized.named_parameters(
("connecting_additional_layer", "connecting", "additional_layer"),
("connecting_mixed_height_targets", "connecting", "mixed_height_targets"),
("silhouette_double_the_targets", "silhouette", "double_the_targets"),)
def test_generalization_modes(self, name, generalization_mode):
"""Smoke test for discrete relative wrapper with curriculum_sample=True."""
env = self._make_environment(name, False, "discrete_relative")
_random_unroll(env, difficulty=generalization_mode)
if __name__ == "__main__":
absltest.main()
| 35.658915 | 80 | 0.722609 | 2,391 | 0.519783 | 0 | 0 | 1,894 | 0.411739 | 0 | 0 | 1,677 | 0.364565 |
8150b4a9e126831d7b3a5289d0e53064e11cb629 | 225 | py | Python | Modulo_3/semana 2/imagenes/imagen.py | rubens233/cocid_python | 492ebdf21817e693e5eb330ee006397272f2e0cc | [
"MIT"
]
| null | null | null | Modulo_3/semana 2/imagenes/imagen.py | rubens233/cocid_python | 492ebdf21817e693e5eb330ee006397272f2e0cc | [
"MIT"
]
| null | null | null | Modulo_3/semana 2/imagenes/imagen.py | rubens233/cocid_python | 492ebdf21817e693e5eb330ee006397272f2e0cc | [
"MIT"
]
| null | null | null | from tkinter import *
ventana = Tk()
ventana.geometry("500x500")
ventana.title('PythonGuides')
img = PhotoImage(file='./logo.png')
img = img.subsample(3, 3)
Label( ventana, image=img ).pack(fill="both")
ventana.mainloop() | 18.75 | 45 | 0.711111 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 41 | 0.182222 |
815225f9552d4d71ea93b38bd616e126479cf8c1 | 476 | py | Python | htdfsdk/utils.py | youngqqcn/htdfsdk | c22f213a967c8233bb6ccfb01bf148112efd44db | [
"MIT"
]
| 2 | 2021-01-21T01:46:29.000Z | 2021-03-12T05:59:19.000Z | htdfsdk/utils.py | youngqqcn/htdfsdk | c22f213a967c8233bb6ccfb01bf148112efd44db | [
"MIT"
]
| null | null | null | htdfsdk/utils.py | youngqqcn/htdfsdk | c22f213a967c8233bb6ccfb01bf148112efd44db | [
"MIT"
]
| null | null | null | #coding:utf8
#author: yqq
#date: 2020/12/15 下午5:38
#descriptions:
from decimal import Decimal, getcontext
# getcontext()
def htdf_to_satoshi(amount_htdf: [float, int, str]) -> int:
return int(Decimal(str(amount_htdf)) * (10 ** 8))
if __name__ == '__main__':
assert htdf_to_satoshi(139623.71827296) == 13962371827296
assert htdf_to_satoshi('139623.71827296') == 13962371827296
assert htdf_to_satoshi(13962371827296) == 13962371827296 * 10 ** 8
pass
| 21.636364 | 70 | 0.714286 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 107 | 0.222917 |
81523ae13c659215630baf70c984ec0ce5e2200e | 1,213 | py | Python | hanzi_font_deconstructor/scripts/create_training_data.py | chanind/hanzi-font-deconstructor | ce41b2a5c0e66b8a83d6c734678446d1d32a18b7 | [
"MIT"
]
| null | null | null | hanzi_font_deconstructor/scripts/create_training_data.py | chanind/hanzi-font-deconstructor | ce41b2a5c0e66b8a83d6c734678446d1d32a18b7 | [
"MIT"
]
| null | null | null | hanzi_font_deconstructor/scripts/create_training_data.py | chanind/hanzi-font-deconstructor | ce41b2a5c0e66b8a83d6c734678446d1d32a18b7 | [
"MIT"
]
| null | null | null | from dataclasses import asdict
from hanzi_font_deconstructor.common.generate_training_data import (
STROKE_VIEW_BOX,
get_training_input_svg_and_masks,
)
from os import path, makedirs
from pathlib import Path
import shutil
import argparse
PROJECT_ROOT = Path(__file__).parents[2]
DEST_FOLDER = PROJECT_ROOT / "data"
parser = argparse.ArgumentParser(
description="Generate training data for a model to deconstruct hanzi into strokes"
)
parser.add_argument("--max-strokes-per-img", default=5, type=int)
parser.add_argument("--total-images", default=50, type=int)
args = parser.parse_args()
if __name__ == "__main__":
# create and empty the dest folder
if path.exists(DEST_FOLDER):
shutil.rmtree(DEST_FOLDER)
makedirs(DEST_FOLDER)
makedirs(DEST_FOLDER / "sample_svgs")
# create the data
data = {
"viewbox": STROKE_VIEW_BOX,
"imgs": [],
}
for i in range(args.total_images):
(img_svg, stroke_masks) = get_training_input_svg_and_masks(256)
label = f"{i}-{len(stroke_masks)}"
with open(DEST_FOLDER / "sample_svgs" / f"{label}.svg", "w") as img_file:
img_file.write(img_svg)
print(".")
print("Done!")
| 29.585366 | 86 | 0.698269 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 270 | 0.222589 |
815535942d00809101f7b9f361c4f256b557f56f | 1,321 | py | Python | examples/generated_sample_regression.py | micheleantonazzi/gibson-dataset | cb5fc81061bbda1a653d6fc7b625b14c8a517f3c | [
"MIT"
]
| 3 | 2021-10-31T17:43:50.000Z | 2022-03-21T08:55:01.000Z | examples/generated_sample_regression.py | micheleantonazzi/gibson-dataset | cb5fc81061bbda1a653d6fc7b625b14c8a517f3c | [
"MIT"
]
| null | null | null | examples/generated_sample_regression.py | micheleantonazzi/gibson-dataset | cb5fc81061bbda1a653d6fc7b625b14c8a517f3c | [
"MIT"
]
| null | null | null | from generic_dataset.data_pipeline import DataPipeline
from generic_dataset.generic_sample import synchronize_on_fields
from generic_dataset.sample_generator import SampleGenerator
import numpy as np
import generic_dataset.utilities.save_load_methods as slm
pipeline_rgb_to_gbr = DataPipeline().add_operation(lambda data, engine: (data[:, :, [2, 1, 0]], engine))
@synchronize_on_fields(field_names={'field_3'}, check_pipeline=False)
def field_3_is_positive(sample) -> bool:
return sample.get_field_3() > 0
# To model a regression problem, label_set parameter must be empty
GeneratedSampleRegression = SampleGenerator(name='GeneratedSampleRegression', label_set=set()).add_dataset_field(field_name='rgb_image', field_type=np.ndarray, save_function=slm.save_compressed_numpy_array, load_function=slm.load_compressed_numpy_array) \
.add_dataset_field(field_name='bgr_image', field_type=np.ndarray, save_function=slm.save_cv2_image_bgr, load_function=slm.load_cv2_image_bgr) \
.add_field(field_name='field_3', field_type=int) \
.add_custom_pipeline(method_name='create_pipeline_convert_rgb_to_bgr', elaborated_field='rgb_image', final_field='bgr_image', pipeline=pipeline_rgb_to_gbr) \
.add_custom_method(method_name='field_3_is_positive', function=field_3_is_positive) \
.generate_sample_class() | 62.904762 | 255 | 0.824375 | 0 | 0 | 0 | 0 | 146 | 0.110522 | 0 | 0 | 212 | 0.160484 |
8156afb1df8b300172b241e218362d0df0d09d97 | 297 | py | Python | setup.py | dilayercelik/neural-networks-tfw1 | 8f8100bad59d2d57ada7b8a7efb16544f805c9bd | [
"MIT"
]
| null | null | null | setup.py | dilayercelik/neural-networks-tfw1 | 8f8100bad59d2d57ada7b8a7efb16544f805c9bd | [
"MIT"
]
| null | null | null | setup.py | dilayercelik/neural-networks-tfw1 | 8f8100bad59d2d57ada7b8a7efb16544f805c9bd | [
"MIT"
]
| null | null | null | from setuptools import setup
setup(name='neural_networks_tfw1',
version='0.1',
description='Implementing Neural Networks with Tensorflow',
packages=['neural_networks_tfw1'],
author='Dilay Fidan Ercelik',
author_email='[email protected]',
zip_safe=False)
| 29.7 | 65 | 0.707071 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 141 | 0.474747 |
8157314b8c5e999d455ae8518882f282b75cc228 | 4,581 | py | Python | half_json/json_util.py | half-pie/half-json | d8064e90ac769547c22db11bcbe47fcb4f1eb600 | [
"MIT"
]
| 4 | 2020-08-04T15:14:25.000Z | 2021-08-18T18:29:03.000Z | half_json/json_util.py | half-pie/half-json | d8064e90ac769547c22db11bcbe47fcb4f1eb600 | [
"MIT"
]
| 1 | 2019-06-04T15:01:31.000Z | 2019-06-04T15:01:31.000Z | half_json/json_util.py | half-pie/half-json | d8064e90ac769547c22db11bcbe47fcb4f1eb600 | [
"MIT"
]
| 3 | 2019-06-01T14:16:32.000Z | 2021-06-25T10:10:47.000Z | # coding=utf8
import re
import json.decoder
from collections import namedtuple
from json.decoder import JSONDecoder
from json.scanner import py_make_scanner
from json.decoder import py_scanstring
class JSONDecodeError(object):
def __init__(self, parser, message):
self.message = message
self.parser = parser
def __eq__(self, err):
return err.parser == self.parser and self.message in err.message
class errors(object):
StringInvalidUXXXXEscape = JSONDecodeError("py_scanstring", "Invalid \\uXXXX escape")
# 2 different case
StringUnterminatedString = JSONDecodeError("py_scanstring", "Unterminated string starting at")
StringInvalidControlCharacter = JSONDecodeError("py_scanstring", "Invalid control character")
StringInvalidEscape = JSONDecodeError("py_scanstring", "Invalid \\escape")
ObjectExceptColon = JSONDecodeError("JSONObject", "Expecting ':' delimiter")
ObjectExceptObject = JSONDecodeError("JSONObject", "Expecting object")
# 2 different case
ObjectExceptKey = JSONDecodeError("JSONObject", "Expecting property name enclosed in double quotes")
ObjectExceptComma = JSONDecodeError("JSONObject", "Expecting ',' delimiter")
ArrayExceptObject = JSONDecodeError("JSONArray", "Expecting object")
ArrayExceptComma = JSONDecodeError("JSONArray", "Expecting ',' delimiter")
@classmethod
def get_decode_error(cls, parser, message):
err = JSONDecodeError(parser, message)
for _, value in cls.__dict__.items():
if isinstance(value, JSONDecodeError):
if err == value:
return value
return None
"""
01 先不看,不研究
02 badcase: " --> "" success
03 控制符 pass
04 unicode \\u 的 pass
05 同上
06 object 后面没有跟随 " , badcase: {abc":1} --> {"abc":1}
07 object key 后面没有 : , badcase: {"abc"1} --> {"abc":1}
08 object 开始检测 Value 收到 StopIteration
08.1 要么后面没有了
08.2 要么后面不是 "/{/[/n[ull]/t[rue]/f[alse]/number/NaN/Infinity/-Infinity 开头的东西
-- 08.1 后面补上 null}
-- 08.2 无脑补一个 "
09 object 解析完一个 pair 后,下一个不是}, 期待一个 ','
badcase {"k":1"s":2}
10 在 09 的基础上解析完{"k":1, 发现下一个不是 ", 这个后面再优化(暂时和 06 一致)
badcase {"k":1,x":2}
11 array 开始检测 Value 收到 StopIteration
11.1 要么后面没有了,补上]
11.2 同 08.2,无脑补一个{ 看看
12 array 解析完前一个 object, 需要一个 ,
这里 nextchar 既不是 ] 也不是, 代表这个 nextchar 的 end 也已经+1 了,所以减 2
"""
def errmsg_inv(e):
assert isinstance(e, ValueError)
message = e.message
idx = message.rindex(':')
errmsg, left = message[:idx], message[idx + 1:]
numbers = re.compile(r'\d+').findall(left)
parser = e.__dict__.get("parser", "")
result = {
"parsers": e.__dict__.get("parsers", []),
"error": errors.get_decode_error(parser, errmsg),
"lineno": int(numbers[0]),
"colno": int(numbers[1]),
}
if len(numbers) == 3:
result["pos"] = int(numbers[2])
if len(numbers) > 3:
result["endlineno"] = int(numbers[2])
result["endcolno"] = int(numbers[3])
result["pos"] = int(numbers[4])
result["end"] = int(numbers[5])
return result
def record_parser_name(parser):
def new_parser(*args, **kwargs):
try:
return parser(*args, **kwargs)
except Exception as e:
if "parser" not in e.__dict__:
e.__dict__["parser"] = parser.__name__
if "parsers" not in e.__dict__:
e.__dict__["parsers"] = []
e.__dict__["parsers"].append(parser.__name__)
raise e
return new_parser
def make_decoder():
json.decoder.scanstring = record_parser_name(py_scanstring)
decoder = JSONDecoder()
decoder.parse_object = record_parser_name(decoder.parse_object)
decoder.parse_array = record_parser_name(decoder.parse_array)
decoder.parse_string = record_parser_name(py_scanstring)
decoder.parse_object = record_parser_name(decoder.parse_object)
decoder.scan_once = py_make_scanner(decoder)
return decoder
decoder = make_decoder()
DecodeResult = namedtuple('DecodeResult', ['success', 'exception', 'err_info'])
def decode_line(line):
try:
obj, end = decoder.scan_once(line, 0)
ok = end == len(line)
return DecodeResult(success=ok, exception=None, err_info=(obj, end))
except StopIteration as e:
return DecodeResult(success=False, exception=e, err_info=None)
except ValueError as e:
err_info = errmsg_inv(e)
return DecodeResult(success=False, exception=e, err_info=err_info)
| 32.260563 | 104 | 0.652696 | 2,519 | 0.515871 | 0 | 0 | 290 | 0.05939 | 0 | 0 | 1,681 | 0.344256 |
8157a3f9c8c5a1a22e0af65a58d5048d55b4c514 | 1,963 | py | Python | capa-system/capaSystem.py | slumbermachine/capatimelapse | 446e2a276b8ab0cf8d2f2292858cf2b540eb4748 | [
"MIT"
]
| 2 | 2017-06-13T20:49:50.000Z | 2019-04-09T10:14:24.000Z | capa-system/capaSystem.py | slumbermachine/capatimelapse | 446e2a276b8ab0cf8d2f2292858cf2b540eb4748 | [
"MIT"
]
| null | null | null | capa-system/capaSystem.py | slumbermachine/capatimelapse | 446e2a276b8ab0cf8d2f2292858cf2b540eb4748 | [
"MIT"
]
| null | null | null | #!/usr/bin/python
#####################################################################
# Name : capaSystem.py
# Description : Read system data and update db for web display
# Environment : Tested under Raspberry Pi Rasbian Jessie Summer 17
# Author : Steve Osteen [email protected]
######################################################################
import MySQLdb
import sys
import time
from subprocess import Popen, PIPE
import logging
import logging.handlers
log = logging.getLogger('CapaTimeLapseLog')
log.setLevel(logging.DEBUG) # prod: logging.ERROR
handler = logging.handlers.SysLogHandler(address='/dev/log')
formatter = logging.Formatter('%(name)-12s %(levelname)-8s %(message)s')
handler.setFormatter(formatter)
log.addHandler(handler)
def get_temp():
t1 = Popen(["cat","/sys/class/thermal/thermal_zone0/temp"], stdout=PIPE)
output = t1.communicate()[0]
clean = output.rstrip()
clean = (float(clean) / 1000)
return clean
def get_batt():
try:
t1 = Popen(['/usr/local/bin/lifepo4wered-cli', 'get', 'vbat'], stdout=PIPE)
output = t1.communicate()[0]
clean = output.rstrip()
return clean
except Exception:
return "0"
def insert_db(temp, battery):
try:
db = MySQLdb.connect("localhost", "monitor", "23rdqw", "system")
except Exception as e:
log.critical('Error accessing database: %s', e)
sys.exit('Error accessing database')
try:
cursor=db.cursor()
line = "INSERT INTO tempdat values(0,CURRENT_DATE(),CURRENT_TIME(), %s, %s)" %(temp, battery)
cursor.execute(line)
db.commit()
except Exception as e:
db.rollback()
log.critical('Error in database submission: %s', e)
db.close()
def main():
while True:
battery = get_batt()
temp = get_temp()
insert_db(temp, battery)
time.sleep(60)
if __name__ == '__main__':
main()
| 30.671875 | 101 | 0.596536 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 765 | 0.38971 |
815971e46cc7b24062e27d29f7d4f09a3aec13fb | 3,770 | py | Python | wagtail/wagtailadmin/tasks.py | willcodefortea/wagtail | 2723b85ed8f356bde89d9541105b8cea4812d6a1 | [
"BSD-3-Clause"
]
| null | null | null | wagtail/wagtailadmin/tasks.py | willcodefortea/wagtail | 2723b85ed8f356bde89d9541105b8cea4812d6a1 | [
"BSD-3-Clause"
]
| null | null | null | wagtail/wagtailadmin/tasks.py | willcodefortea/wagtail | 2723b85ed8f356bde89d9541105b8cea4812d6a1 | [
"BSD-3-Clause"
]
| null | null | null | from django.template.loader import render_to_string
from django.core.mail import send_mail
from django.conf import settings
from django.contrib.auth import get_user_model
from django.db.models import Q
from wagtail.wagtailcore.models import PageRevision, GroupPagePermission
from wagtail.wagtailusers.models import UserProfile
# The following will check to see if we can import task from celery -
# if not then we definitely haven't installed it
try:
from celery.decorators import task
NO_CELERY = False
except:
NO_CELERY = True
# However, we could have installed celery for other projects. So we will also
# check if we have defined the BROKER_URL setting. If not then definitely we
# haven't configured it.
if NO_CELERY or not hasattr(settings, 'BROKER_URL'):
# So if we enter here we will define a different "task" decorator that
# just returns the original function and sets its delay attribute to
# point to the original function: This way, the send_notification
# function will be actually called instead of the the
# send_notification.delay()
def task(f):
f.delay=f
return f
def users_with_page_permission(page, permission_type, include_superusers=True):
# Get user model
User = get_user_model()
# Find GroupPagePermission records of the given type that apply to this page or an ancestor
ancestors_and_self = list(page.get_ancestors()) + [page]
perm = GroupPagePermission.objects.filter(permission_type=permission_type, page__in=ancestors_and_self)
q = Q(groups__page_permissions=perm)
# Include superusers
if include_superusers:
q |= Q(is_superuser=True)
return User.objects.filter(is_active=True).filter(q).distinct()
@task
def send_notification(page_revision_id, notification, excluded_user_id):
# Get revision
revision = PageRevision.objects.get(id=page_revision_id)
# Get list of recipients
if notification == 'submitted':
# Get list of publishers
recipients = users_with_page_permission(revision.page, 'publish')
elif notification in ['rejected', 'approved']:
# Get submitter
recipients = [revision.user]
else:
return
# Get list of email addresses
email_addresses = [
recipient.email for recipient in recipients
if recipient.email and recipient.id != excluded_user_id and getattr(UserProfile.get_for_user(recipient), notification + '_notifications')
]
# Return if there are no email addresses
if not email_addresses:
return
# Get email subject and content
template = 'wagtailadmin/notifications/' + notification + '.html'
rendered_template = render_to_string(template, dict(revision=revision, settings=settings)).split('\n')
email_subject = rendered_template[0]
email_content = '\n'.join(rendered_template[1:])
# Get from email
if hasattr(settings, 'WAGTAILADMIN_NOTIFICATION_FROM_EMAIL'):
from_email = settings.WAGTAILADMIN_NOTIFICATION_FROM_EMAIL
elif hasattr(settings, 'DEFAULT_FROM_EMAIL'):
from_email = settings.DEFAULT_FROM_EMAIL
else:
from_email = 'webmaster@localhost'
# Send email
send_mail(email_subject, email_content, from_email, email_addresses)
@task
def send_email_task(email_subject, email_content, email_addresses, from_email=None):
if not from_email:
if hasattr(settings, 'WAGTAILADMIN_NOTIFICATION_FROM_EMAIL'):
from_email = settings.WAGTAILADMIN_NOTIFICATION_FROM_EMAIL
elif hasattr(settings, 'DEFAULT_FROM_EMAIL'):
from_email = settings.DEFAULT_FROM_EMAIL
else:
from_email = 'webmaster@localhost'
send_mail(email_subject, email_content, from_email, email_addresses)
| 36.960784 | 145 | 0.734218 | 0 | 0 | 0 | 0 | 2,026 | 0.537401 | 0 | 0 | 1,183 | 0.313793 |
815a258e49c9c6abc6816370b4272cf95e62bbe1 | 3,506 | py | Python | app/map_sup_enrich_compose.py | onap/sdc-dcae-d-tosca-lab | b0120c1671e8987387ccae4f21793ceb303f471c | [
"Apache-2.0"
]
| 1 | 2021-10-15T19:47:42.000Z | 2021-10-15T19:47:42.000Z | app/map_sup_enrich_compose.py | onap/archive-sdc-dcae-d-tosca-lab | b0120c1671e8987387ccae4f21793ceb303f471c | [
"Apache-2.0"
]
| null | null | null | app/map_sup_enrich_compose.py | onap/archive-sdc-dcae-d-tosca-lab | b0120c1671e8987387ccae4f21793ceb303f471c | [
"Apache-2.0"
]
| 1 | 2021-10-15T19:47:34.000Z | 2021-10-15T19:47:34.000Z | #Author: Shu Shi
#emaiL: [email protected]
from toscalib.tosca_workbook import ToscaWorkBook
from toscalib.tosca_builder import ToscaBuilder
import getopt, sys, json, logging
def usage():
print('OPTIONS:')
print('\t-h|--help: print this help message')
print('\t-i|--input: The home folder where all spec files are')
print('\t-o|--output: the output file name')
print('\t-v|--value: the json value file')
def main():
try:
opts, args = getopt.getopt(sys.argv[1:], "hi:o:v:", ["help", "input=", "output=", "value="])
except getopt.GetoptError as err:
# print help information and exit:
logging.error( str(err)) # will print something like "option -a not recognized"
usage()
sys.exit(2)
spec_prefix = None
output_file = None
value_file = None
for o, a in opts:
if o in ("-h", "--help"):
usage()
sys.exit()
elif o in ("-i", "--input"):
spec_prefix = a
elif o in ("-o", "--output"):
output_file = a
elif o in ("-v", "--value"):
value_file = a
else:
logging.error( 'Unrecognized option: ' + o)
usage()
sys.exit(2)
if spec_prefix is None or output_file is None:
logging.error( 'Incorrect arguments!')
usage()
sys.exit(2)
model_prefix = './data/tosca_model'
meta_model = './data/meta_model/meta_tosca_schema.yaml'
for ms in ['map', 'enrich', 'supplement']:
builder = ToscaBuilder()
builder.import_schema(meta_model)
builder.import_spec(spec_prefix+'/dcae-event-proc/dcae-event-proc-cdap-' + ms+ '\\' + ms+ '_spec.json')
builder.create_node_type()
builder.export_schema(model_prefix+'/' + ms + '/schema.yaml')
builder.import_schema(model_prefix+'/' + ms + '/schema.yaml')
builder.create_model(ms)
builder.export_model(model_prefix+'/' + ms + '/template.yaml')
builder.create_translate(ms)
builder.export_translation(model_prefix+'/' + ms + '/translate.yaml')
workbook = ToscaWorkBook()
workbook._import_dir(model_prefix)
workbook._import_dir('./data/shared_model/')
workbook._use('map','NO_PREFIX')
workbook._use('supplement','NO_PREFIX')
workbook._use('enrich','NO_PREFIX')
if value_file is not None:
try:
with open(value_file) as data_file:
data = json.load(data_file)
for ms in ['map', 'enrich', 'supplement']:
# if data.has_key(ms):
if ms in data:
prop_sec = data[ms]
for key in prop_sec.keys():
workbook._assign(ms, key, prop_sec[key])
except err :
logging.error( "Unable to read " +value_file)
logging.error( str(err))
workbook._add_shared_node([{'dcae.capabilities.cdapHost':'cdap_host'}, {'dcae.capabilities.dockerHost': 'docker_host'}, {'dcae.capabilities.composition.host': 'composition_virtual'}])
workbook._assign('supplement', 'stream_publish_0', 'map')
workbook._assign('enrich', 'stream_publish_0', 'supplement')
workbook.tran_db = workbook.db
workbook._export_yaml('event_proc.yaml', 'no_expand,main')
workbook._export_yaml(output_file, 'cloudify,main')
if __name__ == "__main__":
main() | 35.414141 | 187 | 0.582145 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,097 | 0.312892 |
815a83e6c63111824a00c57370e405a878ff8494 | 2,872 | py | Python | gollama/backend/tests/test_api/test_shorthand.py | benjaminhubbell/gollama | 193e9eddf26d295b9a34474ae7fb93e2a91ef73a | [
"MIT"
]
| 1 | 2020-08-26T19:02:25.000Z | 2020-08-26T19:02:25.000Z | gollama/backend/tests/test_api/test_shorthand.py | benjaminhubbell/gollama | 193e9eddf26d295b9a34474ae7fb93e2a91ef73a | [
"MIT"
]
| 18 | 2020-06-05T06:42:22.000Z | 2021-06-04T23:51:19.000Z | gollama/backend/tests/test_api/test_shorthand.py | benjaminhubbell/gollama | 193e9eddf26d295b9a34474ae7fb93e2a91ef73a | [
"MIT"
]
| 3 | 2020-08-17T02:58:11.000Z | 2020-08-18T00:03:00.000Z | from django.test import TestCase
from rest_framework.test import APIClient
class TestShortHand(TestCase):
def setUp(self) -> None:
self.client = APIClient()
def test_get(self):
response_json = self.client.post('/api/v1/shorthand/', {'label': 'foo', 'url': 'http://bar.com'}).json()
response = self.client.get(f'/api/v1/shorthand/{response_json["id"]}/')
self.assertEqual(200, response.status_code)
self.assertEqual({
'id': 1,
'label': 'foo',
'url': 'http://bar.com'
}, response.json())
def test_list(self):
self.client.post('/api/v1/shorthand/', {'label': 'foo', 'url': 'http://bar.com'})
self.client.post('/api/v1/shorthand/', {'label': 'bar', 'url': 'http://foo.com'})
response = self.client.get('/api/v1/shorthand/')
self.assertEqual(200, response.status_code)
response_json = response.json()
self.assertEqual(2, len(response_json))
def test_create(self):
response = self.client.post('/api/v1/shorthand/', {'label': 'foo', 'url': 'http://bar.com'})
self.assertEqual(201, response.status_code)
response_json = response.json()
self.assertEqual({
'id': 1,
'label': 'foo',
'url': 'http://bar.com'
}, response_json)
def test_create_fail_duplicate(self):
response = self.client.post('/api/v1/shorthand/', {'label': 'foo', 'url': 'http://bar.com'})
self.assertEqual(201, response.status_code)
response = self.client.post('/api/v1/shorthand/', {'label': 'foo', 'url': 'http://bar.com'})
self.assertEqual(400, response.status_code)
def test_update(self):
response = self.client.post('/api/v1/shorthand/', {'label': 'foo', 'url': 'http://bar.com'})
self.assertEqual(201, response.status_code)
response_json = response.json()
self.assertEqual('http://bar.com', response_json['url'])
response = self.client.patch(f'/api/v1/shorthand/{response_json["id"]}/', {'url': 'https://bar.com'})
self.assertEqual(200, response.status_code)
response_json = response.json()
self.assertEqual('https://bar.com', response_json['url'])
def test_delete(self):
response = self.client.post('/api/v1/shorthand/', {'label': 'foo', 'url': 'http://bar.com'})
self.assertEqual(201, response.status_code)
response_json = response.json()
self.assertEqual({
'id': 1,
'label': 'foo',
'url': 'http://bar.com'
}, response_json)
response = self.client.delete(f'/api/v1/shorthand/{response_json["id"]}/')
self.assertEqual(204, response.status_code)
response = self.client.get(f'/api/v1/shorthand/{response_json["id"]}/')
self.assertEqual(404, response.status_code)
| 43.515152 | 112 | 0.597493 | 2,794 | 0.972841 | 0 | 0 | 0 | 0 | 0 | 0 | 792 | 0.275766 |
815b05fc82778af22f4d9f4104ce255be5442149 | 1,937 | py | Python | lusidtools/lpt/qry_scopes.py | fossabot/lusid-python-tools | 93b2fa8085a0a6550d12d036bd89248aba6e5718 | [
"MIT"
]
| 1 | 2020-04-27T12:27:23.000Z | 2020-04-27T12:27:23.000Z | lusidtools/lpt/qry_scopes.py | entityoneuk/lusid-python-tools | ee13d92673d01cfc9f7c427ed053e7a1e8d64973 | [
"MIT"
]
| null | null | null | lusidtools/lpt/qry_scopes.py | entityoneuk/lusid-python-tools | ee13d92673d01cfc9f7c427ed053e7a1e8d64973 | [
"MIT"
]
| null | null | null | import pandas as pd
import dateutil
from lusidtools.lpt import lpt
from lusidtools.lpt import lse
from lusidtools.lpt import stdargs
from .either import Either
import re
import urllib.parse
rexp = re.compile(r".*page=([^=']{10,}).*")
TOOLNAME = "scopes"
TOOLTIP = "List scopes"
def parse(extend=None, args=None):
return (
stdargs.Parser("Get Scopes", ["filename", "limit"])
.add("--portfolios", action="store_true")
.extend(extend)
.parse(args)
)
def process_args(api, args):
results = []
def fetch_page(page_token):
return api.call.list_portfolios(page=page_token)
def got_page(result):
if args.portfolios:
df = lpt.to_df(
result,
["id.scope", "id.code", "is_derived", "type", "parent_portfolio_id"],
)
df.columns = ["Scope", "Portfolio", "Derived", "Type", "Parent"]
else:
df = (
pd.DataFrame({"Scopes": [v.id.scope for v in result.content.values]})
.groupby("Scopes")
.size()
.reset_index()
)
results.append(df)
links = [l for l in result.content.links if l.relation == "NextPage"]
if len(links) > 0:
match = rexp.match(links[0].href)
if match:
return urllib.parse.unquote(match.group(1))
return None
page = Either(None)
while True:
page = fetch_page(page.right).bind(got_page)
if page.is_left():
return page
if page.right == None:
break
return lpt.trim_df(
pd.concat(results, ignore_index=True, sort=False),
args.limit,
sort=["Scope", "Portfolio"] if args.portfolios else "Scopes",
)
# Standalone tool
def main(parse=parse, display_df=lpt.display_df):
return lpt.standard_flow(parse, lse.connect, process_args, display_df)
| 26.534247 | 85 | 0.575116 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 268 | 0.138358 |
815d2bb0d4f56879066adfa37185b3b120de6583 | 8,457 | py | Python | qqbot/qqbotcls.py | skarl-api/qqbot | 825ce91c080f4a315860e26df70d687a4ded7159 | [
"MIT"
]
| null | null | null | qqbot/qqbotcls.py | skarl-api/qqbot | 825ce91c080f4a315860e26df70d687a4ded7159 | [
"MIT"
]
| null | null | null | qqbot/qqbotcls.py | skarl-api/qqbot | 825ce91c080f4a315860e26df70d687a4ded7159 | [
"MIT"
]
| 1 | 2020-03-30T08:06:24.000Z | 2020-03-30T08:06:24.000Z | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
QQBot -- A conversation robot base on Tencent's SmartQQ
Website -- https://github.com/pandolia/qqbot/
Author -- [email protected]
"""
import sys, os
p = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
if p not in sys.path:
sys.path.insert(0, p)
import sys, subprocess, time
from apscheduler.schedulers.background import BackgroundScheduler
from collections import defaultdict
from qqbot.qconf import QConf
from qqbot.utf8logger import INFO, CRITICAL, ERROR, WARN
from qqbot.qsession import QLogin, RequestError
from qqbot.exitcode import RESTART, POLL_ERROR, FRESH_RESTART
from qqbot.common import StartDaemonThread, Import
from qqbot.qterm import QTermServer
from qqbot.mainloop import MainLoop, Put
from qqbot.groupmanager import GroupManager
def runBot(botCls, qq, user):
if sys.argv[-1] == '--subprocessCall':
isSubprocessCall = True
sys.argv.pop()
else:
isSubprocessCall = False
if isSubprocessCall:
bot = botCls()
bot.Login(qq, user)
bot.Run()
else:
conf = QConf(qq, user)
if sys.argv[0].endswith('py') or sys.argv[0].endswith('pyc'):
args = [sys.executable] + sys.argv
else:
args = sys.argv
args = args + ['--mailAuthCode', conf.mailAuthCode]
args = args + ['--qq', conf.qq]
args = args + ['--subprocessCall']
while True:
p = subprocess.Popen(args)
pid = p.pid
code = p.wait()
if code == 0:
INFO('QQBot 正常停止')
sys.exit(code)
elif code == RESTART:
args[-2] = conf.LoadQQ(pid)
INFO('5 秒后重新启动 QQBot (自动登陆)')
time.sleep(5)
elif code == FRESH_RESTART:
args[-2] = ''
INFO('5 秒后重新启动 QQBot (手工登陆)')
time.sleep(5)
else:
CRITICAL('QQBOT 异常停止(code=%s)', code)
if conf.restartOnOffline:
args[-2] = conf.LoadQQ(pid)
INFO('30秒后重新启动 QQBot (自动登陆)')
time.sleep(30)
else:
sys.exit(code)
def RunBot(botCls=None, qq=None, user=None):
try:
runBot((botCls or QQBot), qq, user)
except KeyboardInterrupt:
sys.exit(1)
class QQBot(GroupManager):
def Login(self, qq=None, user=None):
session, contactdb, self.conf = QLogin(qq, user)
# main thread
self.SendTo = session.SendTo
self.groupKick = session.GroupKick
self.groupSetAdmin = session.GroupSetAdmin
self.groupShut = session.GroupShut
self.groupSetCard = session.GroupSetCard
# main thread
self.List = contactdb.List
self.Update = contactdb.Update
self.StrOfList = contactdb.StrOfList
self.ObjOfList = contactdb.ObjOfList
self.findSender = contactdb.FindSender
self.firstFetch = contactdb.FirstFetch
self.Delete = contactdb.db.Delete
self.Modify = contactdb.db.Modify
# child thread 1
self.poll = session.Copy().Poll
# child thread 2
self.termForver = QTermServer(self.conf.termServerPort).Run
def Run(self):
QQBot.initScheduler(self)
import qqbot.qslots as _x; _x
for plugin in self.conf.plugins:
self.Plug(plugin)
if self.conf.startAfterFetch:
self.firstFetch()
self.onStartupComplete()
StartDaemonThread(self.pollForever)
StartDaemonThread(self.termForver, self.onTermCommand)
StartDaemonThread(self.intervalForever)
MainLoop()
def Stop(self):
sys.exit(0)
def Restart(self):
self.conf.StoreQQ()
sys.exit(RESTART)
def FreshRestart(self):
sys.exit(FRESH_RESTART)
# child thread 1
def pollForever(self):
while True:
try:
result = self.poll()
except RequestError:
self.conf.StoreQQ()
Put(sys.exit, POLL_ERROR)
break
except:
ERROR('qsession.Poll 方法出错', exc_info=True)
else:
Put(self.onPollComplete, *result)
def onPollComplete(self, ctype, fromUin, membUin, content):
if ctype == 'timeout':
return
contact, member, nameInGroup = \
self.findSender(ctype, fromUin, membUin, self.conf.qq)
if self.detectAtMe(nameInGroup, content):
INFO('有人 @ 我:%s[%s]' % (contact, member))
content = '[@ME] ' + content.replace('@'+nameInGroup, '')
else:
content = content.replace('@ME', '@Me')
if ctype == 'buddy':
INFO('来自 %s 的消息: "%s"' % (contact, content))
else:
INFO('来自 %s[%s] 的消息: "%s"' % (contact, member, content))
self.onQQMessage(contact, member, content)
def detectAtMe(self, nameInGroup, content):
return nameInGroup and ('@'+nameInGroup) in content
# child thread 5
def intervalForever(self):
while True:
time.sleep(300)
Put(self.onInterval)
slotsTable = {
'onQQMessage': [],
'onInterval': [],
'onStartupComplete': []
}
plugins = set()
@classmethod
def AddSlot(cls, func):
cls.slotsTable[func.__name__].append(func)
return func
@classmethod
def unplug(cls, moduleName, removeJob=True):
for slots in cls.slotsTable.values():
i = 0
while i < len(slots):
if slots[i].__module__ == moduleName:
slots[i] = slots[-1]
slots.pop()
else:
i += 1
if removeJob:
for job in cls.schedTable.pop(moduleName, []):
job.remove()
cls.plugins.discard(moduleName)
@classmethod
def Unplug(cls, moduleName):
if moduleName not in cls.plugins:
result = '警告:试图卸载未安装的插件 %s' % moduleName
WARN(result)
else:
cls.unplug(moduleName)
result = '成功:卸载插件 %s' % moduleName
INFO(result)
return result
@classmethod
def Plug(cls, moduleName):
cls.unplug(moduleName)
try:
module = Import(moduleName)
except (Exception, SystemExit) as e:
result = '错误:无法加载插件 %s ,%s: %s' % (moduleName, type(e), e)
ERROR(result)
else:
cls.unplug(moduleName, removeJob=False)
names = []
for slotName in cls.slotsTable.keys():
if hasattr(module, slotName):
cls.slotsTable[slotName].append(getattr(module, slotName))
names.append(slotName)
if (not names) and (moduleName not in cls.schedTable):
result = '警告:插件 %s 中没有定义回调函数或定时任务' % moduleName
WARN(result)
else:
cls.plugins.add(moduleName)
jobs = cls.schedTable.get(moduleName,[])
jobNames = [f.func.__name__ for f in jobs]
result = '成功:加载插件 %s(回调函数%s、定时任务%s)' % \
(moduleName, names, jobNames)
INFO(result)
return result
@classmethod
def Plugins(cls):
return list(cls.plugins)
scheduler = BackgroundScheduler(daemon=True)
schedTable = defaultdict(list)
@classmethod
def initScheduler(cls, bot):
cls._bot = bot
cls.scheduler.start()
@classmethod
def AddSched(cls, **triggerArgs):
def wrapper(func):
job = lambda: Put(func, cls._bot)
job.__name__ = func.__name__
j = cls.scheduler.add_job(job, 'cron', **triggerArgs)
cls.schedTable[func.__module__].append(j)
return func
return wrapper
def wrap(slots):
return lambda *a,**kw: [f(*a, **kw) for f in slots[:]]
for name, slots in QQBot.slotsTable.items():
setattr(QQBot, name, wrap(slots))
QQBotSlot = QQBot.AddSlot
QQBotSched = QQBot.AddSched
if __name__ == '__main__':
bot = QQBot()
bot.Login(user='hcj')
gl = bot.List('group')
ml = bot.List(gl[0])
m = ml[0]
| 29.262976 | 78 | 0.551614 | 5,887 | 0.675037 | 0 | 0 | 2,633 | 0.301915 | 0 | 0 | 994 | 0.113978 |
815d74b7b1790e6b997d1b97f06db916a5d075c4 | 65 | py | Python | tests/gear_scripts/buitin_runtime_func_GearsBuilder.py | jsam/redgrease | 245755b34bce287c63abb6624478cdf8189816b6 | [
"MIT"
]
| 17 | 2021-02-26T23:03:39.000Z | 2022-01-26T11:21:49.000Z | tests/gear_scripts/buitin_runtime_func_GearsBuilder.py | jsam/redgrease | 245755b34bce287c63abb6624478cdf8189816b6 | [
"MIT"
]
| 87 | 2021-02-16T08:54:59.000Z | 2021-08-18T07:21:39.000Z | tests/gear_scripts/buitin_runtime_func_GearsBuilder.py | jsam/redgrease | 245755b34bce287c63abb6624478cdf8189816b6 | [
"MIT"
]
| 3 | 2021-04-21T07:57:43.000Z | 2021-10-04T09:13:14.000Z | from redgrease import GearsBuilder
gb = GearsBuilder()
gb.run()
| 13 | 34 | 0.769231 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.