hexsha
stringlengths 40
40
| size
int64 5
2.06M
| ext
stringclasses 11
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
251
| max_stars_repo_name
stringlengths 4
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
251
| max_issues_repo_name
stringlengths 4
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
251
| max_forks_repo_name
stringlengths 4
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 1
1.05M
| avg_line_length
float64 1
1.02M
| max_line_length
int64 3
1.04M
| alphanum_fraction
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
be0fe0b9b27dc32e6433b7115d2cc69a3736598b
| 362 |
py
|
Python
|
custom_components/snowtire/__init__.py
|
borys-kupar/smart-home
|
f9c5ac949106e09278b97f49d5e08f0d495b24ef
|
[
"MIT"
] | 128 |
2021-03-04T21:54:04.000Z
|
2022-03-17T22:53:20.000Z
|
custom_components/snowtire/__init__.py
|
borys-kupar/smart-home
|
f9c5ac949106e09278b97f49d5e08f0d495b24ef
|
[
"MIT"
] | 4 |
2021-03-07T21:18:12.000Z
|
2021-09-24T13:09:39.000Z
|
custom_components/snowtire/__init__.py
|
borys-kupar/smart-home
|
f9c5ac949106e09278b97f49d5e08f0d495b24ef
|
[
"MIT"
] | 15 |
2021-03-05T07:29:31.000Z
|
2022-03-31T10:07:06.000Z
|
#
# Copyright (c) 2020, Andrey "Limych" Khrolenok <[email protected]>
# Creative Commons BY-NC-SA 4.0 International Public License
# (see LICENSE.md or https://creativecommons.org/licenses/by-nc-sa/4.0/)
#
"""
The Snowtire binary sensor.
For more details about this platform, please refer to the documentation at
https://github.com/Limych/ha-snowtire/
"""
| 30.166667 | 74 | 0.743094 |
be0fff91b15ea92d0c10da9f061370eeda2b8af8
| 23,079 |
py
|
Python
|
tests/test_bayes_classifier.py
|
manishgit138/pomegranate
|
3457dcefdd623483b8efec7e9d87fd1bf4c115b0
|
[
"MIT"
] | 3,019 |
2015-01-04T23:19:03.000Z
|
2022-03-31T12:55:46.000Z
|
tests/test_bayes_classifier.py
|
manishgit138/pomegranate
|
3457dcefdd623483b8efec7e9d87fd1bf4c115b0
|
[
"MIT"
] | 818 |
2015-01-05T10:15:57.000Z
|
2022-03-07T19:30:28.000Z
|
tests/test_bayes_classifier.py
|
manishgit138/pomegranate
|
3457dcefdd623483b8efec7e9d87fd1bf4c115b0
|
[
"MIT"
] | 639 |
2015-01-05T04:16:42.000Z
|
2022-03-29T11:08:00.000Z
|
from __future__ import (division)
from pomegranate import *
from pomegranate.io import DataGenerator
from pomegranate.io import DataFrameGenerator
from nose.tools import with_setup
from nose.tools import assert_almost_equal
from nose.tools import assert_equal
from nose.tools import assert_not_equal
from nose.tools import assert_less_equal
from nose.tools import assert_raises
from nose.tools import assert_true
from numpy.testing import assert_array_almost_equal
import pandas
import random
import pickle
import numpy as np
nan = numpy.nan
def test_io_fit():
X = numpy.random.randn(100, 5) + 0.5
weights = numpy.abs(numpy.random.randn(100))
y = numpy.random.randint(2, size=100)
data_generator = DataGenerator(X, weights, y)
mu1 = numpy.array([0, 0, 0, 0, 0])
mu2 = numpy.array([1, 1, 1, 1, 1])
cov = numpy.eye(5)
d1 = MultivariateGaussianDistribution(mu1, cov)
d2 = MultivariateGaussianDistribution(mu2, cov)
bc1 = BayesClassifier([d1, d2])
bc1.fit(X, y, weights)
d1 = MultivariateGaussianDistribution(mu1, cov)
d2 = MultivariateGaussianDistribution(mu2, cov)
bc2 = BayesClassifier([d1, d2])
bc2.fit(data_generator)
logp1 = bc1.log_probability(X)
logp2 = bc2.log_probability(X)
assert_array_almost_equal(logp1, logp2)
def test_io_from_samples():
X = numpy.random.randn(100, 5) + 0.5
weights = numpy.abs(numpy.random.randn(100))
y = numpy.random.randint(2, size=100)
data_generator = DataGenerator(X, weights, y)
d = MultivariateGaussianDistribution
bc1 = BayesClassifier.from_samples(d, X=X, y=y, weights=weights)
bc2 = BayesClassifier.from_samples(d, X=data_generator)
logp1 = bc1.log_probability(X)
logp2 = bc2.log_probability(X)
assert_array_almost_equal(logp1, logp2)
| 32.597458 | 126 | 0.716669 |
be10e301876952317779fb802d1ea27b44f1342a
| 2,188 |
py
|
Python
|
ks_engine/variable_scoring.py
|
FilippoRanza/ks.py
|
47d909fb70fec50f8d3174855bf5d0c05527bf03
|
[
"MIT"
] | 2 |
2021-01-29T11:45:39.000Z
|
2022-03-10T03:17:12.000Z
|
ks_engine/variable_scoring.py
|
Optimization-Algorithms/ks.py
|
44890d33a744c5c4865b96f97efc1e5241b719b1
|
[
"MIT"
] | 1 |
2020-05-12T16:18:34.000Z
|
2020-05-12T16:18:34.000Z
|
ks_engine/variable_scoring.py
|
Optimization-Algorithms/ks.py
|
44890d33a744c5c4865b96f97efc1e5241b719b1
|
[
"MIT"
] | 1 |
2021-01-29T11:45:45.000Z
|
2021-01-29T11:45:45.000Z
|
#! /usr/bin/python
from .solution import Solution
try:
import gurobipy
except ImportError:
print("Gurobi not found: error ignored to allow tests")
def callback_factory(scoring: AbstactVariableScoring):
if isinstance(scoring, VariableRanking):
output = __build_callback__(scoring)
else:
output = None
return output
def __build_callback__(scoring):
return callback
| 27.012346 | 86 | 0.65128 |
be139101ad7d93480666b4065956e230585c96d9
| 1,180 |
py
|
Python
|
src/fetchcode/vcs/pip/_internal/utils/entrypoints.py
|
quepop/fetchcode
|
ac2461bdf7a249d8815987b4d421dbc615c043b9
|
[
"Apache-2.0"
] | 7 |
2019-10-04T07:27:41.000Z
|
2021-06-07T04:39:18.000Z
|
src/fetchcode/vcs/pip/_internal/utils/entrypoints.py
|
quepop/fetchcode
|
ac2461bdf7a249d8815987b4d421dbc615c043b9
|
[
"Apache-2.0"
] | 64 |
2019-10-07T12:40:56.000Z
|
2022-02-17T18:44:37.000Z
|
src/fetchcode/vcs/pip/_internal/utils/entrypoints.py
|
quepop/fetchcode
|
ac2461bdf7a249d8815987b4d421dbc615c043b9
|
[
"Apache-2.0"
] | 16 |
2019-10-04T08:48:12.000Z
|
2021-06-11T01:22:56.000Z
|
import sys
from fetchcode.vcs.pip._internal.cli.main import main
from fetchcode.vcs.pip._internal.utils.typing import MYPY_CHECK_RUNNING
if MYPY_CHECK_RUNNING:
from typing import Optional, List
def _wrapper(args=None):
# type: (Optional[List[str]]) -> int
"""Central wrapper for all old entrypoints.
Historically pip has had several entrypoints defined. Because of issues
arising from PATH, sys.path, multiple Pythons, their interactions, and most
of them having a pip installed, users suffer every time an entrypoint gets
moved.
To alleviate this pain, and provide a mechanism for warning users and
directing them to an appropriate place for help, we now define all of
our old entrypoints as wrappers for the current one.
"""
sys.stderr.write(
"WARNING: pip is being invoked by an old script wrapper. This will "
"fail in a future version of pip.\n"
"Please see https://github.com/pypa/pip/issues/5599 for advice on "
"fixing the underlying issue.\n"
"To avoid this problem you can invoke Python with '-m pip' instead of "
"running pip directly.\n"
)
return main(args)
| 36.875 | 79 | 0.710169 |
be145918e072dc9949c9e4a6667701e412064948
| 7,896 |
py
|
Python
|
Support/Make_Documentation.py
|
bvbohnen/x4-projects
|
2c9db75a720ddb52ddb9e4160c330d7bb1986aa3
|
[
"MIT"
] | 24 |
2020-04-11T18:43:01.000Z
|
2022-02-23T11:02:02.000Z
|
Support/Make_Documentation.py
|
abouquet/x4-projects
|
27ba6d2faaab95cfb9114bccb41fadbfe56443b7
|
[
"MIT"
] | 10 |
2020-04-11T07:50:33.000Z
|
2022-03-31T05:01:35.000Z
|
Support/Make_Documentation.py
|
abouquet/x4-projects
|
27ba6d2faaab95cfb9114bccb41fadbfe56443b7
|
[
"MIT"
] | 8 |
2020-04-24T05:21:55.000Z
|
2022-03-26T03:02:13.000Z
|
'''
Support for generating documentation readmes for the extensions.
Extracts from decorated lua block comments and xml comments.
'''
from pathlib import Path
from lxml import etree
import sys
from itertools import chain
project_dir = Path(__file__).resolve().parents[1]
# Set up an import from the customizer for some text processing.
x4_customizer_dir = str(project_dir.parent / 'X4_Customizer')
if x4_customizer_dir not in sys.path:
sys.path.append(x4_customizer_dir)
from Framework.Make_Documentation import Merge_Lines
#from Framework.Make_Documentation import Get_BB_Text
# Grab the project specifications.
from Release_Specs import release_specs
def Sections_To_Lines(doc_text_sections):
'''
Converts a dict of {section label: text} to a list of text lines,
with labelling and formatting applied.
Expects the input to start with a 'title', then 'overview', then
a series of names of cues or functions.
'''
# Transfer to annotated/indented lines.
functions_started = False
title = ''
ret_text_lines = []
for key, text in doc_text_sections:
# Extract the title and continue; this isn't printed directly.
if key == 'title':
title = text.strip()
continue
# Header gets an 'overview' label.
if key == 'overview':
ret_text_lines += ['', '### {} Overview'.format(title), '']
indent = ''
# Lua functions are in one lump, like overview.
elif key == 'functions':
ret_text_lines += ['', '### {} Functions'.format(title), '']
indent = ''
# Sections may be multiple.
elif key == 'section':
ret_text_lines += ['','']
indent = ''
# Otherwise these are md cues.
else:
indent = ' '
# Stick a label line when starting the function section.
if not functions_started:
functions_started = True
ret_text_lines += ['', '### {} Cues'.format(title), '']
# Bullet the function name.
ret_text_lines.append('* **{}**'.format(key))
# Process the text a bit.
text = Merge_Lines(text)
# Add indents to functions, and break into convenient lines.
text_lines = [indent + line for line in text.splitlines()]
# Record for output.
ret_text_lines += text_lines
return ret_text_lines
def Get_XML_Cue_Text(xml_path):
'''
Returns a list of lines holding the documentation extracted
from a decorated MD xml file.
'''
# List of tuples of (label, text) hold the extracted text lines.
doc_text_sections = []
# Read the xml and pick out the cues.
tree = etree.parse(str(xml_path))
root = tree.xpath('/*')[0]
cues = tree.xpath('/*/cues')[0]
# Stride through comments/cues in the list.
# Looking for decorated comments.
for node in chain(root.iterchildren(), cues.iterchildren()):
# Skip non-comments.
# Kinda awkward how lxml checks this (isinstance doesn't work).
if node.tag is not etree.Comment:
continue
# Handle title declarations.
if '@doc-title' in node.text:
label = 'title'
text = node.text.replace('@doc-title','')
elif '@doc-overview' in node.text:
label = 'overview'
text = node.text.replace('@doc-overview','')
elif '@doc-section' in node.text:
label = 'section'
text = node.text.replace('@doc-section','')
elif '@doc-cue' in node.text:
label = node.getnext().get('name')
text = node.text.replace('@doc-cue','')
else:
# Unwanted comment; skip.
continue
# Record it.
doc_text_sections.append((label, text))
# Process into lines and return.
return Sections_To_Lines(doc_text_sections)
def Get_Lua_Text(lua_path):
'''
Extract documentation text from a decorated lua file.
'''
text = lua_path.read_text()
ret_text_lines = []
# Extract non-indented comments.
# TODO: maybe regex this.
comment_blocks = []
lua_lines = text.splitlines()
i = 0
while i < len(lua_lines):
this_line = lua_lines[i]
if this_line.startswith('--[['):
# Scan until the closing ]].
these_lines = []
# Record the first line.
these_lines.append(this_line.replace('--[[',''))
i += 1
# Only search to the end of the doc.
while i < len(lua_lines):
next_line = lua_lines[i]
if next_line.startswith(']]'):
# Found the last line; skip it.
break
these_lines.append(next_line)
i += 1
comment_blocks.append('\n'.join(these_lines))
# Check single-line comments after block comments, to avoid
# -- confusion.
elif this_line.startswith('--'):
comment_blocks.append(this_line.replace('--',''))
# Always one increment per loop.
i += 1
# Title to put on label lines.
# Starts blank, filled by decorator.
title = ''
# List of tuples of (label, text) hold the extracted text lines.
doc_text_sections = []
# Go through the comments looking for decorators.
for comment in comment_blocks:
# Handle title declarations.
if '@doc-title' in comment:
label = 'title'
text = comment.replace('@doc-title','')
# Text blocks are either overview or cue.
elif '@doc-overview' in comment:
label = 'overview'
text = comment.replace('@doc-overview','')
# For now, all functions are lumped together in one comment.
elif '@doc-functions' in comment:
label = 'functions'
text = comment.replace('@doc-functions','')
else:
# Unwanted comment; skip.
continue
# Record it.
doc_text_sections.append((label, text))
# Process into lines and return.
return Sections_To_Lines(doc_text_sections)
#-Removed; generally avoiding putting main docs on the forum.
#def Make_BB_Code(doc_dir, header_lines = []):
# '''
# Turn the ext_dir's readme into a bbcode txt file.
# Output is placed in the release folder.
# '''
# release_dir = project_dir / 'Release'
# if not release_dir.exists():
# release_dir.mkdir()
#
# # Grab the readme contents.
# doc_lines = (doc_dir / 'Readme.md').read_text().splitlines()
# # Generate a bbcode version, prefixing with custom header.
# bb_lines = header_lines + Get_BB_Text(doc_lines)
# (release_dir / (doc_dir.name + '_bb_readme.txt')).write_text('\n'.join(bb_lines))
# return
if __name__ == '__main__':
Make()
| 31.967611 | 86 | 0.58498 |
be14596b5522e0877a99c1e6b243c1003263e5ff
| 71 |
py
|
Python
|
Chapter 2 - Variables & Data Types/05_pr_set_add_two_no.py
|
alex-dsouza777/Python-Basics
|
8f1c406f2319cd65b5d54dfea990d09fa69d9adf
|
[
"MIT"
] | null | null | null |
Chapter 2 - Variables & Data Types/05_pr_set_add_two_no.py
|
alex-dsouza777/Python-Basics
|
8f1c406f2319cd65b5d54dfea990d09fa69d9adf
|
[
"MIT"
] | null | null | null |
Chapter 2 - Variables & Data Types/05_pr_set_add_two_no.py
|
alex-dsouza777/Python-Basics
|
8f1c406f2319cd65b5d54dfea990d09fa69d9adf
|
[
"MIT"
] | 1 |
2021-04-21T10:23:08.000Z
|
2021-04-21T10:23:08.000Z
|
#Addition of two numbers
a = 30
b = 17
print("Sum of a and b is",a + b)
| 17.75 | 32 | 0.633803 |
be15fa91cd3274065ddb261552f8c0f2ea292fcd
| 2,960 |
py
|
Python
|
curso 1/04 - caixa de texto/a4.py
|
andersonssh/aprendendo-pyqt5
|
d15ad7378d4573410c11fc39042df19048c656e4
|
[
"MIT"
] | null | null | null |
curso 1/04 - caixa de texto/a4.py
|
andersonssh/aprendendo-pyqt5
|
d15ad7378d4573410c11fc39042df19048c656e4
|
[
"MIT"
] | null | null | null |
curso 1/04 - caixa de texto/a4.py
|
andersonssh/aprendendo-pyqt5
|
d15ad7378d4573410c11fc39042df19048c656e4
|
[
"MIT"
] | null | null | null |
import sys
from PyQt5.QtWidgets import (QApplication,
QMainWindow,
QPushButton,
QToolTip,
QLabel,
QLineEdit)
from PyQt5 import QtGui
if __name__ == '__main__':
app = QApplication(sys.argv)
janela = Janela()
janela.carregar_janela()
sys.exit(app.exec_())
| 31.489362 | 140 | 0.591216 |
be18b88ab1937677b7e3d5583d09538c7f91bce2
| 2,460 |
py
|
Python
|
pdf2write.py
|
codeunik/stylus_labs_write_pdf_importer
|
25d7aa037647a86284c24527bda7b222cf95bb62
|
[
"MIT"
] | null | null | null |
pdf2write.py
|
codeunik/stylus_labs_write_pdf_importer
|
25d7aa037647a86284c24527bda7b222cf95bb62
|
[
"MIT"
] | null | null | null |
pdf2write.py
|
codeunik/stylus_labs_write_pdf_importer
|
25d7aa037647a86284c24527bda7b222cf95bb62
|
[
"MIT"
] | null | null | null |
import base64
import os
import sys
import PyPDF2
svg = '''<svg id="write-document" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink">
<rect id="write-doc-background" width="100%" height="100%" fill="#808080"/>
<defs id="write-defs">
<script type="text/writeconfig">
<int name="docFormatVersion" value="2" />
<int name="pageColor" value="-1" />
<int name="pageNum" value="0" />
<int name="ruleColor" value="0" />
<float name="marginLeft" value="0" />
<float name="xOffset" value="-380.701752" />
<float name="xRuling" value="0" />
<float name="yOffset" value="1536.84216" />
<float name="yRuling" value="0" />
</script>
</defs>
'''
pdf_path = sys.argv[1]
pdf = PyPDF2.PdfFileReader(pdf_path, "rb")
img_width = 720
n_pages = pdf.getNumPages() + 1
page = pdf.getPage(0)
width = page.mediaBox.getWidth()
height = page.mediaBox.getHeight()
aspect_ratio = height/width
img_height = int(aspect_ratio * img_width)
os.system('mkdir -p /tmp/pdf2write')
new_page_height = 0
for page in range(n_pages):
print(f"Processing {page}/{n_pages}", end='\r')
os.system(f'pdftoppm {pdf_path} /tmp/pdf2write/tmp{page} -png -f {page} -singlefile')
with open(f'/tmp/pdf2write/tmp{page}.png', 'rb') as f:
base64_data = base64.b64encode(f.read()).decode('utf-8')
tmp_svg = f'''<svg class="write-page" color-interpolation="linearRGB" x="10" y="{new_page_height+10}" width="{img_width}px" height="{img_height}px" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink">
<g class="write-content write-v3" width="{img_width}" height="{img_height}" xruling="0" yruling="0" marginLeft="0" papercolor="#FFFFFF" rulecolor="#00000000">
<g class="ruleline write-std-ruling write-scale-down" fill="none" stroke="none" stroke-width="1" shape-rendering="crispEdges" vector-effect="non-scaling-stroke">
<rect class="pagerect" fill="#FFFFFF" stroke="none" x="0" y="0" width="{img_width}" height="{img_height}" />
</g>
<image x="0" y="0" width="{img_width}" height="{img_height}" xlink:href="data:image/png;base64,{base64_data}"/>
</g>
</svg>'''
new_page_height += (img_height+10)
svg += tmp_svg
svg += '''</svg>'''
os.system('rm -rf /tmp/pdf2write')
with open(f'{os.path.dirname(pdf_path)}/{os.path.basename(pdf_path).split(".")[0]}.svg', 'w') as f:
f.write(svg)
os.system(f'gzip -S z {os.path.dirname(pdf_path)}/{os.path.basename(pdf_path).split(".")[0]}.svg')
| 37.846154 | 230 | 0.667073 |
be18cd8c90ebbd40ae9aadcbac8dd9bce504b9ec
| 2,462 |
py
|
Python
|
py_headless_daw/project/having_parameters.py
|
hq9000/py-headless-daw
|
33e08727c25d3f00b2556adf5f25c9f7ff4d4304
|
[
"MIT"
] | 22 |
2020-06-09T18:46:56.000Z
|
2021-09-28T02:11:42.000Z
|
py_headless_daw/project/having_parameters.py
|
hq9000/py-headless-daw
|
33e08727c25d3f00b2556adf5f25c9f7ff4d4304
|
[
"MIT"
] | 19 |
2020-06-03T06:34:57.000Z
|
2021-01-26T07:36:17.000Z
|
py_headless_daw/project/having_parameters.py
|
hq9000/py-headless-daw
|
33e08727c25d3f00b2556adf5f25c9f7ff4d4304
|
[
"MIT"
] | 1 |
2020-06-18T09:25:21.000Z
|
2020-06-18T09:25:21.000Z
|
from typing import Dict, List, cast
from py_headless_daw.project.parameter import Parameter, ParameterValueType, ParameterRangeType
| 35.681159 | 107 | 0.644598 |
be193942a6e1e90e82121a1e52ce25e1006effc3
| 488 |
py
|
Python
|
wasatch/ROI.py
|
adiravishankara/Wasatch.PY
|
058b3de2c9399e9aea6347fa360f9c7dbbf296aa
|
[
"MIT"
] | 9 |
2018-10-31T11:38:18.000Z
|
2021-11-23T19:20:54.000Z
|
wasatch/ROI.py
|
adiravishankara/Wasatch.PY
|
058b3de2c9399e9aea6347fa360f9c7dbbf296aa
|
[
"MIT"
] | 3 |
2018-11-01T10:28:53.000Z
|
2022-03-21T17:40:05.000Z
|
wasatch/ROI.py
|
adiravishankara/Wasatch.PY
|
058b3de2c9399e9aea6347fa360f9c7dbbf296aa
|
[
"MIT"
] | 4 |
2018-08-03T08:46:08.000Z
|
2022-03-23T01:09:27.000Z
|
##
# This class encapsulates a Region Of Interest, which may be either horizontal
# (pixels) or vertical (rows/lines).
| 27.111111 | 79 | 0.622951 |
be19a958423363abc9e04beed1c7e6d4e8b02233
| 8,562 |
py
|
Python
|
examples/python/oled_ssd1327.py
|
whpenner/upm
|
3168c61d8613da62ecc7598517a1decf533d5fe7
|
[
"MIT"
] | 1 |
2017-09-22T01:41:30.000Z
|
2017-09-22T01:41:30.000Z
|
bsp/intel/peripheral/libupm/examples/python/oled_ssd1327.py
|
Keneral/ahardware
|
9a8a025f7c9471444c9e271bbe7f48182741d710
|
[
"Unlicense"
] | null | null | null |
bsp/intel/peripheral/libupm/examples/python/oled_ssd1327.py
|
Keneral/ahardware
|
9a8a025f7c9471444c9e271bbe7f48182741d710
|
[
"Unlicense"
] | 1 |
2018-02-24T19:09:04.000Z
|
2018-02-24T19:09:04.000Z
|
#!/usr/bin/python
# Author: Zion Orent <[email protected]>
# Copyright (c) 2015 Intel Corporation.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
# Load i2clcd display module
import time, signal, sys
import pyupm_i2clcd as upmLCD
myLCD = upmLCD.SSD1327(0, 0x3C);
logoArr = [0x00, 0x00, 0x00, 0x00, 0x00, 0x20, 0x08, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x60, 0x04, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0xC0, 0x06, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x01, 0xC0, 0x07, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x01, 0xC0, 0x07, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x03, 0x80, 0x03, 0x80, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x03, 0x80, 0x03, 0x80,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x07, 0x80, 0x03, 0xC0, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x07, 0x80, 0x01, 0xC0,
0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x20,
0x07, 0x80, 0x01, 0xE0, 0x08, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x20, 0x0F, 0x80, 0x01, 0xE0,
0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x30,
0x0F, 0x00, 0x01, 0xE0, 0x08, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x30, 0x0F, 0x00, 0x01, 0xE0,
0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x30,
0x0F, 0x00, 0x01, 0xE0, 0x18, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x30, 0x0F, 0x00, 0x01, 0xE0,
0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x38,
0x0F, 0x00, 0x01, 0xE0, 0x18, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x38, 0x0F, 0x00, 0x01, 0xE0,
0x38, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x38,
0x0F, 0x80, 0x01, 0xE0, 0x38, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x3C, 0x0F, 0x80, 0x01, 0xE0,
0x78, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3E,
0x0F, 0x80, 0x03, 0xE0, 0x78, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x1E, 0x07, 0x80, 0x03, 0xE0,
0xF8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1E,
0x07, 0x80, 0x03, 0xE0, 0xF0, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x1F, 0x07, 0x80, 0x03, 0xC1,
0xF0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0F,
0x87, 0xC0, 0x07, 0xC1, 0xF0, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x0F, 0x83, 0xC0, 0x07, 0x83,
0xE0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0F,
0xC3, 0xC0, 0x07, 0x87, 0xE0, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x07, 0xE1, 0xE0, 0x07, 0x0F,
0xC0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03,
0xF0, 0xE0, 0x0F, 0x0F, 0x80, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x01, 0xF8, 0xF0, 0x0E, 0x1F,
0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01,
0xF8, 0x70, 0x1C, 0x3F, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0xFC, 0x30, 0x18, 0x7E,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x7F, 0x18, 0x30, 0xFC, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x1F, 0x88, 0x21, 0xF0,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x0F, 0xC4, 0x47, 0xE0, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x03, 0xE0, 0x0F, 0x80,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0xF8, 0x3E, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x0E, 0xE0, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00,
0x00, 0x00, 0x6C, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x02, 0x00, 0x06, 0x00, 0x00, 0x6C, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x06,
0x00, 0x00, 0x60, 0x00, 0x7E, 0x3F, 0x0F, 0xC3,
0xF0, 0xFA, 0x0F, 0xDF, 0xE1, 0x9F, 0xEC, 0x7E,
0xE6, 0x73, 0x9C, 0xE7, 0x39, 0xCE, 0x1C, 0xDF,
0xE1, 0xB9, 0xEC, 0xE7, 0xE0, 0x61, 0xD8, 0x66,
0x1B, 0x86, 0x1C, 0x06, 0x61, 0xB0, 0x6D, 0xC3,
0x7C, 0x7F, 0xFF, 0xFF, 0xFF, 0x06, 0x0F, 0x86,
0x61, 0xB0, 0x6D, 0x83, 0x3E, 0x7F, 0xFF, 0xFF,
0xFF, 0x06, 0x07, 0xC6, 0x61, 0xB0, 0x6D, 0x83,
0xC3, 0x61, 0x18, 0x46, 0x03, 0x86, 0x18, 0x66,
0x61, 0xB0, 0x6D, 0xC3, 0xFE, 0x7F, 0x9F, 0xE7,
0xF9, 0xFE, 0x1F, 0xE6, 0x3F, 0x9F, 0xEC, 0xFE,
0x7E, 0x3F, 0x0F, 0xC3, 0xF0, 0xFA, 0x0F, 0xC6,
0x3F, 0x9F, 0xEC, 0x7E, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7C, 0x00,
0x00, 0x20, 0x82, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x44, 0x00, 0x00, 0x20, 0x82, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x6C, 0xF3,
0xCF, 0x70, 0x9E, 0x79, 0xE7, 0x80, 0x00, 0x00,
0x00, 0x00, 0x7D, 0x9E, 0x68, 0x20, 0xB2, 0xC8,
0x64, 0x00, 0x00, 0x00, 0x00, 0x00, 0x47, 0x9E,
0x6F, 0x20, 0xB2, 0xF9, 0xE7, 0x80, 0x00, 0x00,
0x00, 0x00, 0x46, 0x9A, 0x61, 0x20, 0xB2, 0xCB,
0x60, 0x80, 0x00, 0x00, 0x00, 0x00, 0x7C, 0xF3,
0xCF, 0x30, 0x9E, 0x79, 0xE7, 0x90, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x7C, 0x02, 0x00, 0x00, 0x82, 0x60, 0x00, 0x00,
0xF8, 0x00, 0x00, 0x40, 0x40, 0x02, 0x00, 0x00,
0x83, 0x60, 0x00, 0x00, 0x8C, 0x00, 0x00, 0x40,
0x60, 0xB7, 0x79, 0xE7, 0x81, 0xC7, 0x92, 0x70,
0x89, 0xE7, 0x9E, 0x78, 0x7C, 0xE2, 0xC9, 0x2C,
0x81, 0xCC, 0xD2, 0x40, 0xFB, 0x21, 0xB2, 0x48,
0x40, 0x62, 0xF9, 0x2C, 0x80, 0x8C, 0xD2, 0x40,
0x8B, 0xE7, 0xB0, 0x48, 0x40, 0xE2, 0xC9, 0x2C,
0x80, 0x84, 0xD2, 0x40, 0x8B, 0x2D, 0x92, 0x48,
0x7D, 0xB3, 0x79, 0x27, 0x80, 0x87, 0x9E, 0x40,
0x8D, 0xE7, 0x9E, 0x48, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00]
SeeedLogo = upmLCD.uint8Array(len(logoArr))
for x in range(len(logoArr)):
SeeedLogo.__setitem__(x, logoArr[x])
# If you don't set the display to be white, the seeed logo will appear jagged
myLCD.setGrayLevel(12)
myLCD.draw(SeeedLogo, 96 * 96 / 8);
for i in range(12):
myLCD.setCursor(i, 0)
myLCD.setGrayLevel(i)
myLCD.write('Hello World')
print "Exiting"
| 45.063158 | 77 | 0.68512 |
be1bdf6fe279b2f8b2c141f3279c61f47199ae18
| 898 |
py
|
Python
|
digital_image_processing/algorithms/edge_detection_algorithms/threshold/adaptive_thresholding_methods/__init__.py
|
juansdev/digital_image_processing
|
a0fe429c0664d81063dc76502a3e4874eea901a7
|
[
"MIT"
] | 1 |
2022-03-22T03:37:44.000Z
|
2022-03-22T03:37:44.000Z
|
digital_image_processing/algorithms/edge_detection_algorithms/threshold/adaptive_thresholding_methods/__init__.py
|
juansdev/digital_image_processing
|
a0fe429c0664d81063dc76502a3e4874eea901a7
|
[
"MIT"
] | null | null | null |
digital_image_processing/algorithms/edge_detection_algorithms/threshold/adaptive_thresholding_methods/__init__.py
|
juansdev/digital_image_processing
|
a0fe429c0664d81063dc76502a3e4874eea901a7
|
[
"MIT"
] | null | null | null |
from .bernsen import bernsen_thresholding_method
from .bradley_roth import bradley_thresholding_method
from .contrast import contrast_thresholding_method
from .feng import feng_thresholding_method
from .gaussian import threshold_value_gaussian
from .johannsen import johannsen_thresholding_method
from .kapur import kapur_thresholding_method
from .mean import threshold_value_mean
from .minimum_error import minimum_err_thresholding_method
from .niblack import niblack_thresholding_method
from .nick import nick_thresholding_method
from .otsu import otsu_thresholding_method
from .p_tile import p_tile_thresholding_method
from .pun import pun_thresholding_method
from .rosin import rosin_thresholding_method
from .sauvola import sauvola_thresholding_method
from .singh import singh_thresholding_method
from .two_peaks import two_peaks_thresholding_method
from .wolf import wolf_thresholding_method
| 44.9 | 58 | 0.894209 |
be1d04203f18e6f16b60a723e614122b48a08671
| 1,097 |
py
|
Python
|
data/train/python/be1d04203f18e6f16b60a723e614122b48a08671celeryconfig.py
|
harshp8l/deep-learning-lang-detection
|
2a54293181c1c2b1a2b840ddee4d4d80177efb33
|
[
"MIT"
] | 84 |
2017-10-25T15:49:21.000Z
|
2021-11-28T21:25:54.000Z
|
data/train/python/be1d04203f18e6f16b60a723e614122b48a08671celeryconfig.py
|
vassalos/deep-learning-lang-detection
|
cbb00b3e81bed3a64553f9c6aa6138b2511e544e
|
[
"MIT"
] | 5 |
2018-03-29T11:50:46.000Z
|
2021-04-26T13:33:18.000Z
|
data/train/python/be1d04203f18e6f16b60a723e614122b48a08671celeryconfig.py
|
vassalos/deep-learning-lang-detection
|
cbb00b3e81bed3a64553f9c6aa6138b2511e544e
|
[
"MIT"
] | 24 |
2017-11-22T08:31:00.000Z
|
2022-03-27T01:22:31.000Z
|
import os
from kombu import Queue, Exchange
## Broker settings.
BROKER_URL = os.getenv('BROKER_URL', 'amqp://guest:guest@localhost:5672')
#BROKER_URL = "amqp://guest:guest@localhost:5672/"
#BROKER_URL = os.getenv('BROKER_URL', 'redis://guest@localhost:6379')
#BROKER_HOST = "localhost"
#BROKER_PORT = 27017
#BROKER_TRANSPORT = 'mongodb'
#BROKER_VHOST = 'celery'
CELERY_DEFAULT_QUEUE = 'default'
CELERY_QUEUES = (
Queue('default', exchange=Exchange('default'), routing_key='default'),
# Queue('aws_uploads', routing_key='video.uploads'),
)
CELERY_DEFAULT_EXCHANGE = 'default'
CELERY_DEFAULT_EXCHANGE_TYPE = 'direct'
CELERY_DEFAULT_ROUTING_KEY = 'default'
CELERY_IMPORTS = ('celeryservice.tasks',)
#CELERY_RESULT_BACKEND = os.getenv('CELERY_RESULT_BACKEND', 'redis')
CELERY_RESULT_BACKEND = os.getenv('CELERY_RESULT_BACKEND', 'amqp')
## Using the database to store task state and results.
#CELERY_RESULT_BACKEND = "mongodb"
#CELERY_MONGODB_BACKEND_SETTINGS = {
# "host": "localhost",
# "port": 27017,
# "database": "celery",
# "taskmeta_collection": "celery_taskmeta",
#}
| 30.472222 | 76 | 0.739289 |
be1d72eb89ee80a827a9a1150e2c759579770b36
| 21,106 |
py
|
Python
|
timesheet.py
|
dgollub/timesheet-google-thingy
|
3ffab402444dba520ff3416b2327f6d2ceeeac39
|
[
"MIT"
] | null | null | null |
timesheet.py
|
dgollub/timesheet-google-thingy
|
3ffab402444dba520ff3416b2327f6d2ceeeac39
|
[
"MIT"
] | null | null | null |
timesheet.py
|
dgollub/timesheet-google-thingy
|
3ffab402444dba520ff3416b2327f6d2ceeeac39
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
#
#
from __future__ import print_function
import csv
import os
import re
import sys
import arrow
from gsheets import Sheets
CURRENT_PATH = os.path.abspath(os.path.dirname(os.path.realpath(__file__)))
DEBUG = os.environ.get('DEBUG', "0") == "1"
AS_CSV = os.environ.get('CSV', "0") == "1"
COL_DATE = 0
COL_WEEKDAY = 1
COL_TIME_START = 2
COL_TIME_END = 3
COL_LUNCH = 4
COL_TIME = 5 # includes lunch
COL_TIME_FIXED = 6 # does not include lunch
COL_MOVE = 7
COL_WORK_FROM_HOME = 8
COL_NOTES = 9
COL_TASKS_START = 10
SPECIAL_VALUES = ["sick", "ab", "off", "wfh", "hol"]
SATURDAY = 5
SUNDAY = 6
if __name__ == "__main__":
main()
| 34.6 | 158 | 0.588032 |
be1da4c3a9cd8b6f92a68b6f9d9dd0277f9d55ce
| 7,578 |
py
|
Python
|
league/game.py
|
Orpheon/All-in
|
016901953904250226f388422318ef2f739bf82e
|
[
"MIT"
] | null | null | null |
league/game.py
|
Orpheon/All-in
|
016901953904250226f388422318ef2f739bf82e
|
[
"MIT"
] | null | null | null |
league/game.py
|
Orpheon/All-in
|
016901953904250226f388422318ef2f739bf82e
|
[
"MIT"
] | null | null | null |
import numpy as np
import pickle
import treys
import constants
FULL_DECK = np.array(treys.Deck.GetFullDeck())
| 44.05814 | 133 | 0.684613 |
be1dddb28d3c0ea4aa8ef940a579e9c73af88093
| 2,487 |
py
|
Python
|
cms/admin/views.py
|
miloprice/django-cms
|
c6f548f0983a7488609e07a57552b47675d8d78e
|
[
"BSD-3-Clause"
] | null | null | null |
cms/admin/views.py
|
miloprice/django-cms
|
c6f548f0983a7488609e07a57552b47675d8d78e
|
[
"BSD-3-Clause"
] | null | null | null |
cms/admin/views.py
|
miloprice/django-cms
|
c6f548f0983a7488609e07a57552b47675d8d78e
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
from cms.models import Page, Title, CMSPlugin, Placeholder
from cms.utils import get_language_from_request
from django.http import Http404
from django.shortcuts import get_object_or_404
| 36.573529 | 101 | 0.62686 |
be1f1730e3c83173cbfa65bc65d2316eb598bfbe
| 4,127 |
py
|
Python
|
delete.py
|
lvwuyunlifan/crop
|
7392d007a8271ff384c5c66ed5717afbc4172b4d
|
[
"Apache-2.0"
] | null | null | null |
delete.py
|
lvwuyunlifan/crop
|
7392d007a8271ff384c5c66ed5717afbc4172b4d
|
[
"Apache-2.0"
] | null | null | null |
delete.py
|
lvwuyunlifan/crop
|
7392d007a8271ff384c5c66ed5717afbc4172b4d
|
[
"Apache-2.0"
] | null | null | null |
import os
from PIL import Image, ImageFilter
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
# import seaborn as sns
import pandas as pd
import numpy as np
import random
train_path = './AgriculturalDisease_trainingset/'
valid_path = './AgriculturalDisease_validationset/'
if __name__ == '__main__':
genImage(train_path, 'train')
genImage(valid_path, 'valid')
| 35.886957 | 83 | 0.557063 |
be1f5618419f3d6206980e4841ac306ca5a5ac13
| 854 |
py
|
Python
|
数据分析/matplotlib/03.demo.py
|
likedeke/python-spider-study
|
09bee3cbe833234a86efcc28d62ace000e2fbb4b
|
[
"Apache-2.0"
] | 1 |
2021-08-20T11:47:51.000Z
|
2021-08-20T11:47:51.000Z
|
数据分析/matplotlib/03.demo.py
|
likedeke/python-spider-study
|
09bee3cbe833234a86efcc28d62ace000e2fbb4b
|
[
"Apache-2.0"
] | null | null | null |
数据分析/matplotlib/03.demo.py
|
likedeke/python-spider-study
|
09bee3cbe833234a86efcc28d62ace000e2fbb4b
|
[
"Apache-2.0"
] | null | null | null |
# - - - - - - - - - - -
# @author like
# @since 2021-02-23 11:08
# @email [email protected]
#
from matplotlib import pyplot as plt
from matplotlib import rc
from matplotlib import font_manager
import random
x = range(0, 120)
y = [random.randint(20, 35) for i in range(120)]
plt.figure(figsize=(20, 8), dpi=80)
plt.plot(x, y)
#
chFont = font_manager.FontProperties(family="SimHei") # SimHei
# chFont = font_manager.FontProperties(fname="C:/Windows/Fonts/SIMHEI.TTF")
#
step = 10
xLabels = ["10,{}".format(i) for i in range(60)]
xLabels += ["11,{}".format(i) for i in range(60)]
plt.xticks(list(x)[::step], xLabels[::step], rotation=25, fontProperties=chFont)
#
plt.xlabel("", fontProperties=chFont)
plt.ylabel(" ()", fontProperties=chFont)
plt.title("1012", fontProperties=chFont)
plt.show()
| 23.722222 | 80 | 0.696721 |
be1f96521bb4c93e3fbc514880ddde1a151dfa0d
| 1,351 |
py
|
Python
|
testing/vcs/test_vcs_isoline_labels.py
|
xylar/cdat
|
8a5080cb18febfde365efc96147e25f51494a2bf
|
[
"BSD-3-Clause"
] | 62 |
2018-03-30T15:46:56.000Z
|
2021-12-08T23:30:24.000Z
|
testing/vcs/test_vcs_isoline_labels.py
|
xylar/cdat
|
8a5080cb18febfde365efc96147e25f51494a2bf
|
[
"BSD-3-Clause"
] | 114 |
2018-03-21T01:12:43.000Z
|
2021-07-05T12:29:54.000Z
|
testing/vcs/test_vcs_isoline_labels.py
|
CDAT/uvcdat
|
5133560c0c049b5c93ee321ba0af494253b44f91
|
[
"BSD-3-Clause"
] | 14 |
2018-06-06T02:42:47.000Z
|
2021-11-26T03:27:00.000Z
|
import os, sys, cdms2, vcs, vcs.testing.regression as regression
dataset = cdms2.open(os.path.join(vcs.sample_data,"clt.nc"))
data = dataset("clt")
canvas = regression.init()
isoline = canvas.createisoline()
isoline.label="y"
texts=[]
colors = []
for i in range(10):
text = canvas.createtext()
text.color = 50 + 12 * i
text.height = 12
colors.append(100 + 12 * i)
if i%2 == 0:
texts.append(text.name)
else:
texts.append(text)
isoline.text = texts
# First test using isoline.text[...].color
canvas.plot(data, isoline, bg=1)
baseline = os.path.splitext(sys.argv[1])
baselineImage = "%s%s"%baseline
ret = regression.run_wo_terminate(canvas, "test_vcs_isoline_labels.png", baselineImage)
# Now set isoline.linecolors and test again.
canvas.clear()
isoline.linecolors = colors
canvas.plot(data, isoline, bg=1)
baselineImage = "%s%d%s"%(baseline[0], 2, baseline[1])
testImage = os.path.abspath("test_vcs_isoline_labels2.png")
ret += regression.run_wo_terminate(canvas, testImage, baselineImage)
# Now set isoline.textcolors and test again.
canvas.clear()
isoline.textcolors = colors
canvas.plot(data, isoline, bg=1)
baselineImage = "%s%d%s"%(baseline[0], 3, baseline[1])
testImage = os.path.abspath("test_vcs_isoline_labels3.png")
ret += regression.run_wo_terminate(canvas, testImage, baselineImage)
sys.exit(ret)
| 29.369565 | 87 | 0.721688 |
be204f98e2c8943df601cdf5f75bb96f08fc6392
| 34,671 |
py
|
Python
|
src/Python_version/ICE_py36.py
|
ds-utilities/ICE
|
9461bbb8d6c7b3d3b32eac8ee29bd4ae3ccb286f
|
[
"MIT"
] | 2 |
2019-08-05T08:26:38.000Z
|
2020-05-16T14:10:00.000Z
|
src/Python_version/ICE_py36.py
|
postyear/ICE
|
9461bbb8d6c7b3d3b32eac8ee29bd4ae3ccb286f
|
[
"MIT"
] | null | null | null |
src/Python_version/ICE_py36.py
|
postyear/ICE
|
9461bbb8d6c7b3d3b32eac8ee29bd4ae3ccb286f
|
[
"MIT"
] | 2 |
2020-05-16T14:10:01.000Z
|
2021-02-09T20:05:46.000Z
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Mon Mar 5 05:47:03 2018
@author: zg
"""
import numpy as np
#from scipy import io
import scipy.io
#import pickle
from sklearn.model_selection import StratifiedKFold
#import sklearn
from scipy.sparse import spdiags
from scipy.spatial import distance
#import matplotlib.pyplot as plt
from sklearn.ensemble import BaggingClassifier
from sklearn import svm
#from sklearn import metrics
from sklearn.metrics import roc_auc_score
from sklearn import tree
import copy
import numpy.matlib
from sklearn.exceptions import NotFittedError
#import FuzzyRwrBagging as frb
#from joblib import Parallel, delayed
#import multiprocessing
def RWR(A, nSteps, laziness, p0 = None):
'''
% the random walk algorithm.
% A is the input net matrix, with the diag to be 0.
% nSteps: how many steps to walk
% laziness: the probablity to go back.
% p0: the initial probability. usually it is a zero matrix with the diag to
% be 1.
%
% for example, A could be:
% A = [0,2,2,0,0,0,0;...
% 2,0,1,1,0,0,0;...
% 2,1,0,0,1,0,0;...
% 0,1,0,0,0,1,1;...
% 0,0,1,0,0,0,0;...
% 0,0,0,1,0,0,1;...
% 0,0,0,1,0,1,0]
%
% if nSteps is 1000 and laziness is 0.3, p0 is default, the result is:
% [0.449, 0.207, 0.220, 0.064, 0.154, 0.034, 0.034;...
% 0.207, 0.425, 0.167, 0.132, 0.117, 0.071, 0.071;...
% 0.220, 0.167, 0.463, 0.052, 0.324, 0.028, 0.028;...
% 0.048, 0.099, 0.039, 0.431, 0.027, 0.232, 0.232;...
% 0.038, 0.029, 0.081, 0.009, 0.356, 0.004, 0.004;...
% 0.017, 0.035, 0.014, 0.154, 0.009, 0.425, 0.203;...
% 0.017, 0.035, 0.014, 0.154, 0.009, 0.203, 0.425]
%
% Each column represents the propability for each node. each element in the
% column means the probability to go to that node.
% This algorithm will converge. For example, for the above matrix, nSteps =
% 100, 1000 or 10000, will give the same result.
'''
n = len(A)
if p0 == None:
p0 = np.eye(n)
'''
% In the example above, spdiags(sum(A)'.^(-1), 0, n, n) will be
% 0.2500 0 0 0 0 0 0
% 0 0.2500 0 0 0 0 0
% 0 0 0.2500 0 0 0 0
% 0 0 0 0.3333 0 0 0
% 0 0 0 0 1.0000 0 0
% 0 0 0 0 0 0.5000 0
% 0 0 0 0 0 0 0.5000
% W will be:
% 0 0.5000 0.5000 0 0 0 0
% 0.5000 0 0.2500 0.3333 0 0 0
% 0.5000 0.2500 0 0 1.0000 0 0
% 0 0.2500 0 0 0 0.5000 0.5000
% 0 0 0.2500 0 0 0 0
% 0 0 0 0.3333 0 0 0.5000
% 0 0 0 0.3333 0 0.5000 0
'''
#W = A * spdiags(sum(A)'.^(-1), 0, n, n);
#W = spdiags(np.power(sum(np.float64(A)) , -1).T , 0, n, n).toarray()
W = A.dot( spdiags(np.power(sum(np.float64(A)) , -1)[np.newaxis], \
0, n, n).toarray() )
p = p0
pl2norm = np.inf
unchanged = 0
for i in range(1, nSteps+1):
if i % 100 == 0:
print(' done rwr ' + str(i-1) )
pnew = (1-laziness) * W.dot(p) + laziness * p0
l2norm = max(np.sqrt(sum((pnew - p) ** 2) ) )
p = pnew
if l2norm < np.finfo(float).eps:
break
else:
if l2norm == pl2norm:
unchanged = unchanged +1
if unchanged > 10:
break
else:
unchanged = 0
pl2norm = l2norm
return p
# test RWR()
'''
A = np.array([[0,2,2,0,0,0,0],\
[2,0,1,1,0,0,0],\
[2,1,0,0,1,0,0],\
[0,1,0,0,0,1,1],\
[0,0,1,0,0,0,0],\
[0,0,0,1,0,0,1],\
[0,0,0,1,0,1,0]])
nSteps = 1000
lazi = 0.3
RWR(A, nSteps, lazi, None)
'''
# test
#dst = distance.euclidean(A)
# corrent, the same as in Matlab
def f_sim_2_aRankNet(sim, k=3):
'''
% Convert the similarity matrix to a network graph where each node
% has k edges to other nodes (aRank).
'''
# delete the diagnal values.
# sim = sim-diag(diag(sim) );
np.fill_diagonal(sim, 0)
# [~, I] = sort(sim-diag(diag(sim) ) );
I = np.argsort(sim, kind='mergesort') + 1
# [~, I2] = sort(I);
I2 = (np.argsort(I, kind='mergesort').T + 1).T
# for every column, just keep the top k edges.
#aRankNet = (I2 >length(sim)-k);
aRankNet = I2 > (len(sim) - k)
# make it a diagonal matrix
# aRankNet = max(aRankNet, aRankNet');
aRankNet = np.logical_or(aRankNet, aRankNet.T)
# remove the diagonal 1s.
# aRankNet = aRankNet-diag(diag(aRankNet) );
np.fill_diagonal(aRankNet, False)
return aRankNet
# test
#sim = np.array([[0, 0.5566, 0.6448, 0.3289], \
# [0.5566, 0, -0.0842, -0.0170], \
# [0.6448, -0.0842, 0, 0.8405], \
# [0.3289, -0.0170, 0.8405, 0]])
#
#f_sim_2_aRankNet(sim,1)
#f_sim_2_aRankNet(sim,2)
#f_sim_2_aRankNet(sim,3)
#
#array([[False, True, True, False],
# [ True, False, False, False],
# [ True, False, False, True],
# [False, False, True, False]])
#
#array([[False, True, True, True],
# [ True, False, False, False],
# [ True, False, False, True],
# [ True, False, True, False]])
#
#array([[False, True, True, True],
# [ True, False, False, True],
# [ True, False, False, True],
# [ True, True, True, False]])
def f_find_centers_rwMat(rw_mat, k):
'''
% on the rw_mat matrix, find some nodes as the centroids for soft
% clustering. If we just random pickup some nodes as centroids, that is
% not good for fuzzy clusters.
% k is the number of centroids.
'''
ixs = []
# 1. find the most connected center node as the first centroid.
a = np.sum(rw_mat, axis=1) # axis=1 for rows; 0 for col
# % most connected node.
ix = np.argmax(a)
ixs.append(ix)
# % 2. iteratively find the rest nodes
for i in range(1, k):
tmp = rw_mat[:, ixs]
b = np.sum(tmp, axis=1)
b[ixs] = np.inf
# % find the farthest node
ix = np.argmin(b)
ixs.append(ix)
return ixs
# test
#tmp = f_find_centers_rwMat(rw_mat, 10)
#test
#>>> a = np.array([[1,2], [3,4]])
#>>> a.flatten()
#array([1, 2, 3, 4])
#>>> a.flatten('F')
#array([1, 3, 2, 4])
'''
a = np.array( range(0,100) )
b = np.matlib.repmat(a, 100, 1)
ct = getCutoff(b, 70)
'''
def f_eu_dist(X):
'''
calculate the euclidean distance between instances
'''
sim = np.zeros(( len(X), len(X) ))
for i in range(0, len(X)):
for j in range(i+1, len(X)):
tmp = distance.euclidean(X[i], X[j])
sim[i][j] = tmp
sim[j][i] = tmp
sim = -sim
np.fill_diagonal(sim, 0)
return sim
#test
#sim = f_eu_dist(X)
def f_eu_dist2(X1, X2):
'''
calculate the euclidean distance between instances from two datasets
'''
sim = np.zeros(( len(X1), len(X2) ))
for i in range(0, len(X1) ):
for j in range(0, len(X2) ):
tmp = distance.euclidean(X1[i], X2[j])
sim[i][j] = tmp
sim = -sim
return sim
#test
#sim = f_eu_dist2(X_tr, X_te)
def f_fuzzy_rwr_clusters(X, k=100, each_clus_sz=None):
# X: data
# k: number of clusters
'''
The return variable clus stores the instance indices for each cluster.
However, this data structure is not easy to find for a instance, which are
the clusters it belongs to, thus we also need to convert clus to a
true-false matrix.
'''
if each_clus_sz == None:
# on average, how many clusters does one inst belongs to.
#overlap_factor = 2;
# the estimated size of each cluster. default is half the number of
# instances.
each_clus_sz=len(X)/3
print('RWR-based fuzzy clustering starts...')
print(' NO. clusters = '+str(k)+'; avg. cluster size = '+str(each_clus_sz) )
# sim = squareform(pdist(X));
# sim = -sim;
sim = np.zeros((len(X), len(X) ) )
for i in range(0, len(X)):
for j in range(i+1, len(X)):
tmp = distance.euclidean(X[i], X[j])
sim[i][j] = tmp
sim[j][i] = tmp
sim = -sim
print(' done calculating the Euclidean distance matrix')
# ---------------------------------------------------------------
aRank_k_neighbors = np.ceil(np.log10(len(sim)) )
ori_graph = f_sim_2_aRankNet(sim, aRank_k_neighbors)
print(' done calculating the A-rank KNN graph')
# % -------- RWR --------
nSteps = 1000
lazi = 0.3
rw = RWR(ori_graph, nSteps, lazi)
# remove probability of returning start node
np.fill_diagonal(rw, 0)
rw_mat = rw
print(' done RWR')
# ---------------------------------------------------------------
ixs_centers = f_find_centers_rwMat(rw_mat, k)
ct = getCutoff(rw_mat, each_clus_sz)
rw_net = rw_mat > ct
# % set the diagnal to 1
np.fill_diagonal(rw_net, True)
clus = []
for i in range(0, k):
tmp = np.argwhere(rw_net[:, ixs_centers[i] ] ).flatten()
clus.append(tmp)
# ---------------------------------------------------------------
# % sort the clusters
lens = f_len_of_each_ele(clus)
ix = np.argsort(lens)[::-1]
clus_ordered = [clus[i] for i in ix]
print(' center inst. index of each cluster: ')
ixs_centers = np.array(ixs_centers)
print(ixs_centers[ix])
print(' size of each cluster: ')
print(lens[ix])
print(' done RWR clustering')
return clus_ordered
#test
#clus = f_fuzzy_rwr_clusters(X, 100)
# pass
# test
#tfs = f_clus_to_tfs(clus, len(X))
# pass
def f_tfs_2_instClus(tfs):
'''
convert the boolean table representation of clustering result to for each
instance, what clusters it belongs to.
'''
inst_clus = []
for i in range(0, len(tfs)):
row = list( np.where(tfs[i, :] ) [0] )
inst_clus.append(row)
return inst_clus
# test
#inst_clus = f_tfs_2_instClus(tfs)
#def f_bg_svm_tr_te(X_tr, y_tr, X_te, y_te):
# #bagging = BaggingClassifier(base_estimator = svm.LinearSVC(), \
# bagging = BaggingClassifier(base_estimator = tree.DecisionTreeClassifier(), \
# random_state=None, n_estimators = 100 )
# bagging.fit(X_tr, y_tr)
#
# y_pred = bagging.predict_proba(X_te)
# y_pred = y_pred[:, 1].flatten()
#
# auc = roc_auc_score(y_te.flatten(), y_pred)
#
# return [y_pred, auc]
# test
'''
X_tr = X
y_tr = y
X_te = X
y_te = y
[y_pred, auc] = f_bg_svm_tr_te(X_tr, y_tr, X_te, y_te)
'''
#def f_bg_tr_te(X_tr, y_tr, X_te, y_te, BaseBagging):
# '''
# corresponds to f_weka_bg_svm_tr_te() in Matlab version
# '''
# #bagging = BaggingClassifier(base_estimator = svm.LinearSVC(), \
# bagging = BaggingClassifier(BaseBagging, \
# random_state=None, n_estimators = 100 )
# bagging.fit(X_tr, y_tr)
#
# y_pred = bagging.predict_proba(X_te)
# y_pred = y_pred[:, 1].flatten()
#
# auc = roc_auc_score(y_te.flatten(), y_pred)
#
# return [y_pred, auc]
def f_tr_te(X_tr, y_tr, X_te, model):
'''
corresponds to f_weka_bg_svm_tr_te() in Matlab version
'''
#bagging = BaggingClassifier(base_estimator = svm.LinearSVC(), \
#bagging = BaggingClassifier(BaseBagging, \
# random_state=None, n_estimators = 100 )
model_inner = copy.deepcopy(model)
model_inner.fit(X_tr, y_tr)
y_pred = model_inner.predict_proba(X_te)
y_pred = y_pred[:, 1].flatten()
#auc = roc_auc_score(y_te.flatten(), y_pred)
return y_pred
def f_k_fo(X, y, model, k_fold=10):
'''
corresponds to f_weka_bg_svm_arff_k_fo_3_parfor() in Matlab version
'''
y = y.flatten()
y_pred = np.zeros(y.size)
skf = StratifiedKFold(n_splits=k_fold, random_state=None, shuffle=True)
skf.get_n_splits(X, y)
for train_index, test_index in skf.split(X, y):
#print("TRAIN: ", train_index, " TEST: ", test_index)
X_tr, X_te = X[train_index], X[test_index]
#y_tr, y_te = y[train_index], y[test_index]
y_tr = y[train_index]
if np.unique(y_tr).size == 1:
y_pred_fo = np.zeros( len(test_index) )
#print len(X_te)
#print len(test_index)
#print y_pred_fo
y_pred_fo.fill(np.unique(y_tr)[0] )
#print y_pred_fo
else:
y_pred_fo = f_tr_te(X_tr, y_tr, X_te, model)
y_pred[test_index] = y_pred_fo
#auc = roc_auc_score(y.flatten(), y_pred)
return y_pred
# test
#pa = '/Volumes/Macintosh_HD/Users/zg/bio/3_ensembF/3_scripts/2017_4_4/'
##X = scipy.io.loadmat(pa+'/data/data_all_pickle/30/data.mat')['X'] # 30:breast cancer
##y = scipy.io.loadmat(pa+'/data/data_all_pickle/30/data.mat')['y']
#X = scipy.io.loadmat(pa+'/data/data_all_pickle/11/data.mat')['X'] # 11:mesothelioma
#y = scipy.io.loadmat(pa+'/data/data_all_pickle/11/data.mat')['y']
#
#model = BaggingClassifier(base_estimator = tree.DecisionTreeClassifier(), \
# random_state=None, n_estimators = 100 )
#y_pred = f_k_fo(X, y, model, k_fold=10)
#
#print roc_auc_score(y.flatten(), y_pred)
# the easy dataset mesothelioma get 1.0 CV result.
# breast cancer get 0.599
# all results are correct.
def f_quantileNorm(templete, target):
'''
Templete is the standard, change the target to the values in the templete.
Target may have a very different range than the templete.
templete and target should be 1d n by 1 array.
f_my_quantileNorm()
'''
ix_target = np.argsort(target, kind='mergesort')
ix_templete = np.argsort(templete, kind='mergesort')
target[ix_target] = templete[ix_templete]
new = target
return new
# test
#templete = X[:, 0]
#target = X[:, 1]
#new = f_quantileNorm(templete, target)
#def f_bg_k_fo_3(X, y, k_fold=10):
# '''
# corresponds to f_weka_bgSvm_arff_k_fo_3_parfor() in Matlab version
# corresponds to f_k_fo()
# '''
# y_pred = np.zeros((y.size, 1))
#
# skf = StratifiedKFold(n_splits=k_fold)
# skf.get_n_splits(X, y)
#
# for train_index, test_index in skf.split(X, y):
# #print("TRAIN:", train_index, "TEST:", test_index)
# X_tr, X_te = X[train_index], X[test_index]
# y_tr, y_te = y[train_index], y[test_index]
def f_use_each_clus_forWhole(X, y, clus, y_pred_whole, model, fo_inner):
'''
% using each cluster data to predict the whole instances, while self
% prediction using 10-fold CV.
corresponds to f_use_each_clus_forWhole_bg_svm() in Matlab version
'''
n_clusters = len(clus)
y_pred_multi = np.zeros((y.size, n_clusters) )
models = []
for j in range(0, n_clusters):
# for each cluster
Xj = X[clus[j].flatten(), :]
yj = y[clus[j].flatten() ]
model_a_clust = copy.deepcopy(model)
print(' Cluster '+str(j)+' started...')
#if len(yj) > 10:
if len(yj) > 15 and np.unique(yj).size != 1:
# ------------------ for self ------------------
#if np.unique(yj).size == 1:
# y_pred = np.zeros(yj.size)
# y_pred.fill(np.unique(yj)[0])
#else:
try:
y_pred = f_k_fo(Xj, yj, model, fo_inner)
# quantileNorm
templete = y_pred_whole[clus[j].flatten()]
target = y_pred
y_pred = f_quantileNorm(templete, target)
# copy the normed prediction to the whole data.
y_pred_multi[clus[j].flatten(), j] = y_pred
print(' c-'+str(j)+' done predicting local instances')
# ------------------ for other -----------------
ix_other = set(range(0, y.size)) - set(clus[j].flatten())
ix_other = list(ix_other)
#print ix_other
X_other = X[ix_other , :]
#y_other = y[ix_other ]
# predict
#y_pred = f_tr_te(Xj, yj, X_other, model)
#if np.unique(yj).size != 1:
model_a_clust.fit(Xj, yj)
y_pred = model_a_clust.predict_proba(X_other)
y_pred = y_pred[:, 1].flatten()
# quantileNorm
templete = y_pred_whole[ix_other]
target = y_pred
y_pred = f_quantileNorm(templete, target)
#else:
# y_pred = np.zeros(X_other.size)
# y_pred.fill(np.unique(yj)[0])
# copy to the whole array
y_pred_multi[ix_other, j] = y_pred
print(' c-'+str(j)+' done predicting remote instances')
except ValueError as e:
print(e)
print(' skip this cluster')
y_pred = np.zeros(y.size)
y_pred.fill(np.nan)
y_pred_multi[:, j] = y_pred
else:
if len(yj) <= 15:
print (' '+str(len(yj))+' insts in cluster, <= 15, skip...')
y_pred = np.zeros(y.size)
y_pred.fill(np.nan)
y_pred_multi[:, j] = y_pred
if np.unique(yj).size == 1:
print (' warning, #unique class label(s) == 1')
y_pred = np.zeros(y.size)
y_pred.fill(np.unique(yj)[0])
y_pred_multi[:, j] = y_pred
model_a_clust = np.unique(yj)[0]
models.append(model_a_clust)
return [y_pred_multi, models]
# test
#[y_pred_multi, models] = f_use_each_clus_forWhole(X, y, clus, y_pred_whole, model)
#def f_dec_tab_4_bg_svm(X, y, clus):
# '''
# Calculate the decision table
# % This version changed from the cluster-cluster dec_mat to instance-cluster
# % dec_mat. This solution will avoid the case that if one cluster decision
# % is wrong leading entrie cluster prediction is wrong, which is the reason
# % of instability. However, we cannot use a systematic evaluation criteria
# % such as AUC, I will try using the predicted prob at first.
#
# % This version 3 adds the support for fuzzy clustering - one instance may
# % belongs to more than one cluster.
# % This updated version also outputs the predicted values of y.
# % support more than 3 clusters
# % normalization take place in y_pred_self and y_pred_other, thus do not
# % need normalization when predict y_pred_ICE.
# % ixsp is another cluster form.
#
# corresponds to f_dec_tab_4_bg_svm() in Matlab version
# '''
# #n_clusters = len(clus)
# ## dec_mat stores the prediction error.
# #pred_mat=np.zeros((y.size, n_clusters+1)) #the extra col is for whole pred
# #
# ## k_fold of inner cross-validation
# #fo_inner = 10
# # --------------------------- WHOLE -------------------------
#
# # --------------------------- SELF -------------------------
def f_err_mat(X, y, clus, model):
'''
Calculate the decision table
corresponds to f_dec_tab_4_bg_svm() in Matlab version
'''
n_clusters = len(clus)
# err_mat stores the prediction error.
pred_prob_mat=np.zeros((y.size, n_clusters+1)) #the extra col is for whole pred
# col 0 to col n_clusters-1 store the predictions by each cluster
# the last col stores the pred by whole data
#models = []
# k_fold of inner cross-validation
fo_inner = 5
# --------------------------- WHOLE -------------------------
# Predict each cluster using the whole data.
model_whole = copy.deepcopy(model)
y_pred_whole = f_k_fo(X, y, model_whole, fo_inner)
model_whole.fit(X, y) # fit a model using all data rather than only a fold
pred_prob_mat[:, n_clusters] = y_pred_whole
print (' Done evaluation using whole instances')
print (' Start to evaluate each cluster ')
# --------------------------- SELF -------------------------
# predict the whole instances using each cluster data, while self
# prediction using 10-fold CV.
[y_pred_multi, models] = f_use_each_clus_forWhole(X, y, clus, \
y_pred_whole, model, fo_inner)
print (' Done evaluation using each cluster')
models.append(model_whole)
pred_prob_mat[:, 0:n_clusters] = y_pred_multi
# make a tmp array a stores y
tmp = np.matlib.repmat(y.reshape((y.size, 1)), 1, n_clusters+1)
err_mat = abs(pred_prob_mat - tmp )
print (' Done calculating error table and fitting ICE models')
return [err_mat, models]
"""
#mat = scipy.io.loadmat('/Volumes/Macintosh_HD/Users/zg/bio/3_ensembF/'+\
# '3_scripts/2017_4_4/data/names.mat')['names']
#mat = io.loadmat('/Users/zg/Desktop/a.mat')['names']
#test
pa = '/Volumes/Macintosh_HD/Users/zg/bio/3_ensembF/3_scripts/2017_4_4/'
X = scipy.io.loadmat(pa+'/data/data_all_pickle/30/data.mat')['X'] # 30:breast cancer
y = scipy.io.loadmat(pa+'/data/data_all_pickle/30/data.mat')['y']
#X = scipy.io.loadmat(pa+'/data/data_all_pickle/11/data.mat')['X'] # 11:mesothelioma
#y = scipy.io.loadmat(pa+'/data/data_all_pickle/11/data.mat')['y']
n_clus = 3
clus = f_fuzzy_rwr_clusters(X, n_clus)
tfs = f_clus_to_tfs(clus, len(X))
y = y.astype(float)
#model = BaggingClassifier(base_estimator = tree.DecisionTreeClassifier(), \
#model = BaggingClassifier(base_estimator = svm.LinearSVR(), \
#model = BaggingClassifier(base_estimator = svm.LinearSVC(), \
model = BaggingClassifier(base_estimator = svm.SVC(kernel='linear'), \
random_state=None, n_estimators = 100 )
[err_mat, models] = f_err_mat(X, y, clus, model)
"""
def f_err_2_decMat(err_mat, tfs, adv_whole=0.4, adv_self=0.5):
'''
Convert the err table to decision table.
'''
dec_mat = np.zeros(( len(err_mat), err_mat[0].size-1 ), dtype=bool)
# dec_ixs: for each instance, which clusters should be used.
dec_ixs = []
inst_clus = f_tfs_2_instClus(tfs)
for i in range(0, len(err_mat)):
# Matlab code:
#dec_row = dec_mat(cur_nb_ix, :);
#dec_row(:, end ) = dec_row(:, end ) - adv_whole;
#dec_row(:, clus_id) = dec_row(:, clus_id) - adv_self;
row = np.copy( err_mat[i, :] )
#print row
row[-1] = row[-1] - adv_whole
inst_i_clus = inst_clus[i]
if len(inst_i_clus) > 0:
row[inst_i_clus] = row[inst_i_clus] - adv_self
#print row
ix_good_clus = list( np.where( row < row[-1] ) [0] )
#print ix_good_clus
if len(ix_good_clus) > 0:
dec_mat[i, ix_good_clus] = True
dec_ixs.append(ix_good_clus)
else:
dec_ixs.append([])
return [dec_mat, dec_ixs]
#[dec_mat, dec_ixs] = f_err_2_decMat(err_mat, tfs)
def f_ICE_tr_te_all_clus(X_tr, X_te, clus, models, doNorm=True):
'''
Use the training data to predict the testing data.
Use whole training data to predict
Use each cluster of training data to predict the testing data.
'''
y_pred_all = np.zeros(( len(X_te), len(clus) + 1 ))
# the first col is the prediction using the whole data
model_whole = models[-1]
y_pred_all[:, 0] = f_te(X_te, model_whole)
#y_pred_all[:, 0] = f_tr_te(X_tr, y_tr, X_te, model)
#print 'whole model good '
# start from the second col, the result is by each cluster
for i in range(0, len(clus)):
#Xi = X_tr[clus[i].flatten(), :]
#yi = y_tr[clus[i].flatten() ]
model_i = models[i]
#model_a_clust = copy.deepcopy(model)
try:
y_pred_te = f_te(X_te, model_i)
except :
if model_i == 0:
y_pred_te = np.zeros(len(X_te))
elif model_i == 1:
y_pred_te = np.ones(len(X_te))
else:
y_pred_te = np.zeros(len(X_te))
y_pred_te.fill(np.nan)
#except NotFittedError as e:
# print(repr(e))
# y_pred_te = np.zeros(len(X_te))
# y_pred_te.fill(np.nan)
#print 'model '+str(i)+' good '
#y_pred_te = f_tr_te(Xi, yi, X_te, model)
if doNorm == True:
templete = y_pred_all[:, 0]
target = y_pred_te
y_pred = f_quantileNorm(templete, target)
else:
y_pred = y_pred_te
y_pred_all[:, i+1] = y_pred
return y_pred_all
# test
#y_pred_all = f_ICE_tr_te_all_clus(X, X, clus, model)
def f_ICE_fit(X_tr, y_tr, n_clus, model, w=0.4, s=0.5):
'''
'''
# rwr based fuzzy clustering
clus = f_fuzzy_rwr_clusters(X_tr, n_clus)
#print clus[0]
tfs = f_clus_to_tfs(clus, len(X_tr))
# train models and calculate the error-dicision tables
y_tr = y_tr.astype(float)
#model = BaggingClassifier(base_estimator = svm.SVC(kernel='linear'), \
# random_state=None, n_estimators = 100 )
[err_mat, models] = f_err_mat(X_tr, y_tr, clus, model)
[dec_mat, dec_ixs] = f_err_2_decMat(err_mat, tfs, w, s)
print (' Done calucating decision table')
return [clus, models, dec_ixs]
#def_deal_miss_v_1(d):
'''
deal with missing values by replacing them by mean.
'''
def f_ICE_fit_2(X_tr, y_tr, n_clus, model, w=0.4, s=0.5):
'''
This version use the err mat to re-clustering
'''
# rwr based fuzzy clustering
clus = f_fuzzy_rwr_clusters(X_tr, n_clus)
#print clus[0]
tfs = f_clus_to_tfs(clus, len(X_tr))
# train models and calculate the error-dicision tables
y_tr = y_tr.astype(float)
#model = BaggingClassifier(base_estimator = svm.SVC(kernel='linear'), \
# random_state=None, n_estimators = 100 )
[err_mat, models] = f_err_mat(X_tr, y_tr, clus, model)
# ******************** re-clustering ********************
n_iter = 2
for i in range(0, n_iter):
clus = f_fuzzy_rwr_clusters(err_mat, n_clus)
tfs = f_clus_to_tfs(clus, len(X_tr))
[err_mat, models] = f_err_mat(X_tr, y_tr, clus, model)
# *******************************************************
[dec_mat, dec_ixs] = f_err_2_decMat(err_mat, tfs, w, s)
print (' Done calucating decision table')
return [clus, models, dec_ixs]
def f_ICE_pred(X_tr, y_tr, X_te, clus, dec_ixs, models,N=5,alpha=1,beta=1):
'''
clus and inst_clus contains the same information that clus is the instances
ids for each cluster, while inst_clus stores that for each instance, which
cluster(s) it belongs to.
dec_ixs stores the good cluster(s) for each instance, which may include
even a remote cluster. each instance in dec_ixs does not contain the whole
set of instances.
'''
# the first col is the prediction using the whole data
# start from the second col, the result is by each cluster
y_pred_all = f_ICE_tr_te_all_clus(X_tr, X_te, clus, models)
y_pred_ICE = np.zeros( len(X_te) )
neighbour_mat = f_eu_dist2(X_tr, X_te)
# ---------- for each testing instance ----------
#n_partials = np.zeros( len(X_te) )
#n_wholes = np.zeros( len(X_te) )
for j in range(0, len(X_te) ):
# for each testing instance
# find the top 10 neighbors for each test instance
neighbour_col = neighbour_mat[:, j].flatten()
ix = np.argsort(neighbour_col )
ix = ix[::-1]
ix_top_neighbors = ix[0:N]
#print 'testing inst ' + str(j)
#print ' ix of top neighbors:'
#print ix_top_neighbors
# ---------- find all neighbors' picks ----------
clus_ids_to_use = []
nei_labels = []
for cur_nb in range(0, N):
# for each neighbour
# find each neighbour's pick
cur_nb_ix = ix_top_neighbors[cur_nb]
clus_id_to_use = list( dec_ixs[cur_nb_ix] )
clus_ids_to_use = clus_ids_to_use + clus_id_to_use
# also find neighbor's label. maybe will be used later as KNN pred
# instead of using whole to pred.
nei_labels = nei_labels + list( y_tr[cur_nb_ix] )
#print ' clus_ids_to_use:'
#print clus_ids_to_use
# cluster id + 1 to make the ix fit the col id in y_pred_all
a = clus_ids_to_use
a = list( np.array(a) + 1 )
clus_ids_to_use = a
# number of partial models used
n_partial = len(clus_ids_to_use)
# number of whole models used, based on parameters alpha, beta and N.
n_whole = int( round( alpha*n_partial + beta*N ) )
clus_ids_to_use = clus_ids_to_use + [0] * n_whole
#print ' clus_ids_to_use:'
#print clus_ids_to_use
#print nei_labels
y_pred_ICE[j] = np.nanmean(y_pred_all[j, clus_ids_to_use])
print ('Done predicting testing instances.')
return y_pred_ICE
# test
# pa = '/Volumes/Macintosh_HD/Users/zg/bio/3_ensembF/3_scripts/2017_4_4/'
# pa = '/Users/zg/Dropbox/bio/ICE_2018/'
# pa = './'
pa = 'C:/Users/zg/Dropbox/bio/ICE_2018/'
n_clus = 100
w = 0.4
s = 0.5
N = 5
alpha = 1
beta = 1
k_fold = 10
aucs_ICE = []
aucs_whole = []
# f_res = pa + 'data/res_ICE_bg_svm_1_iter.txt'
#f_res = pa + 'data/res_ICE_bg_svm_py.txt'
f_res = pa + 'data/res_ICE_SVM_py.txt'
f = open(f_res, 'w')
#for j in range(1, 50):
for j in range(1, 49):
try:
X = scipy.io.loadmat(pa+'data/data_all/'+str(j)+'/data.mat')['X'] # 30:breast cancer
y = scipy.io.loadmat(pa+'data/data_all/'+str(j)+'/data.mat')['y']
#X = scipy.io.loadmat(pa+'/data/data_all_pickle/30/data.mat')['X'] # 30:breast cancer
#y = scipy.io.loadmat(pa+'/data/data_all_pickle/30/data.mat')['y']
#X = scipy.io.loadmat(pa+'/data/data_all_pickle/37/data.mat')['X'] # 37:congress
#y = scipy.io.loadmat(pa+'/data/data_all_pickle/37/data.mat')['y']
#imgplot = plt.imshow(ori_graph, interpolation='nearest', aspect='auto')
#plt.show()
#sim = np.corrcoef(X)
#np.fill_diagonal(sim, 0)
#n_clus = 100
#model = BaggingClassifier(base_estimator = svm.SVC(kernel='linear'), \
# random_state=None, n_estimators = 100 )
model = svm.SVC(kernel='linear', probability = True)
skf = StratifiedKFold(n_splits=k_fold)
skf.get_n_splits(X, y)
y_preds_ICE = np.zeros( y.size )
y_preds_whole = np.zeros( y.size )
fold_i = 1
for train_index, test_index in skf.split(X, y):
# print("TRAIN:", train_index, "TEST:", test_index)
X_tr, X_te = X[train_index], X[test_index]
y_tr, y_te = y[train_index], y[test_index]
[clus, models, dec_ixs] = f_ICE_fit(X_tr, y_tr, n_clus, model, w, s)
#[clus, models, dec_ixs] = f_ICE_fit_2(X_tr, y_tr, n_clus, model, w, s)
y_pred_ICE = f_ICE_pred(X_tr, y_tr, X_te, clus, dec_ixs, models,N,alpha,beta)
y_preds_ICE[test_index] = y_pred_ICE
y_pred_whole = f_tr_te(X_tr, y_tr, X_te, model)
y_preds_whole[test_index] = y_pred_whole
print( j)
print( 'fold ' + str(fold_i) + ' finished')
fold_i = fold_i + 1
auc_ICE = roc_auc_score(y.flatten(), y_preds_ICE.flatten() )
auc_whole = roc_auc_score(y.flatten(), y_preds_whole.flatten() )
print (auc_ICE, auc_whole)
aucs_ICE.append(auc_ICE)
aucs_whole.append(auc_whole)
f.write(str(j) + '\t' + str(auc_ICE) + ' \t ' + str(auc_whole) + '\n')
except:
continue
| 31.348101 | 93 | 0.551123 |
be20c61ee255e8ce67c5713e68e8dff144cc5ef4
| 44,105 |
py
|
Python
|
xc/common/utils/prjxray_routing_import.py
|
FireFox317/symbiflow-arch-defs
|
f0e7b4212544e1d55da776fb7a2ff79117e01454
|
[
"ISC"
] | 1 |
2020-09-23T17:57:07.000Z
|
2020-09-23T17:57:07.000Z
|
xc/common/utils/prjxray_routing_import.py
|
tcal-x/symbiflow-arch-defs
|
1e513ac778371608c51fa86a98e54279e3c74752
|
[
"ISC"
] | null | null | null |
xc/common/utils/prjxray_routing_import.py
|
tcal-x/symbiflow-arch-defs
|
1e513ac778371608c51fa86a98e54279e3c74752
|
[
"ISC"
] | null | null | null |
#!/usr/bin/env python3
""" Imports 7-series routing fabric to the rr graph.
For ROI configurations, this also connects the synthetic IO tiles to the routing
node specified.
Rough structure:
Add rr_nodes for CHANX and CHANY from the database. IPIN and OPIN rr_nodes
should already be present from the input rr_graph.
Create a mapping between database graph_nodes and IPIN, OPIN, CHANX and CHANY
rr_node ids in the rr_graph.
Add rr_edge for each row in the graph_edge table.
Import channel XML node from connection database and serialize output to
rr_graph XML.
"""
import argparse
import os.path
from hilbertcurve.hilbertcurve import HilbertCurve
import math
import prjxray.db
from prjxray.roi import Roi
import prjxray.grid as grid
from lib.rr_graph import graph2
from lib.rr_graph import tracks
from lib.connection_database import get_wire_pkey, get_track_model
import lib.rr_graph_capnp.graph2 as capnp_graph2
from prjxray_constant_site_pins import feature_when_routed
from prjxray_tile_import import remove_vpr_tile_prefix
import simplejson as json
from lib import progressbar_utils
import datetime
import re
import functools
import pickle
import sqlite3
now = datetime.datetime.now
HCLK_CK_BUFHCLK_REGEX = re.compile('HCLK_CK_BUFHCLK[0-9]+')
CLK_HROW_CK_MUX_REGEX = re.compile('CLK_HROW_CK_MUX_OUT_([LR])([0-9]+)')
CASCOUT_REGEX = re.compile('BRAM_CASCOUT_ADDR((?:BWR)|(?:ARD))ADDRU([0-9]+)')
CONNECTION_BOX_FILTER = re.compile('([^0-9]+)[0-9]*')
BUFG_CLK_IN_REGEX = re.compile('CLK_HROW_CK_IN_[LR][0-9]+')
BUFG_CLK_OUT_REGEX = re.compile('CLK_HROW_R_CK_GCLK[0-9]+')
CCIO_ACTIVE_REGEX = re.compile('HCLK_CMT_CCIO[0-9]+')
HCLK_OUT = re.compile('CLK_HROW_CK_HCLK_OUT_([LR])([0-9]+)')
IOI_OCLK = re.compile('IOI_OCLK_([01])')
# Regex for [LR]IOI_SING tiles
IOI_SITE_PIPS = ['OLOGIC', 'ILOGIC', 'IDELAY', 'OCLK_', 'OCLKM_']
IOI_SING_REGEX = re.compile(
r'([RL]IOI3_SING_X[0-9]+Y)([0-9]+)(\.IOI_)({})([01])(.*)'.format(
"|".join(IOI_SITE_PIPS)
)
)
def reduce_connection_box(box):
""" Reduce the number of connection boxes by merging some.
Examples:
>>> reduce_connection_box('IMUX0')
'IMUX'
>>> reduce_connection_box('IMUX1')
'IMUX'
>>> reduce_connection_box('IMUX10')
'IMUX'
>>> reduce_connection_box('BRAM_ADDR')
'IMUX'
>>> reduce_connection_box('A_L10')
'A'
>>> reduce_connection_box('B')
'B'
>>> reduce_connection_box('B_L')
'B'
"""
box = CONNECTION_BOX_FILTER.match(box).group(1)
if 'BRAM_ADDR' in box:
box = 'IMUX'
if box.endswith('_L'):
box = box.replace('_L', '')
return box
REBUF_NODES = {}
REBUF_SOURCES = {}
def get_clk_hrow_and_rebuf_tiles_sorted(cur):
"""
Finds all CLK_HROW_TOP_R, CLK_HROW_BOT_T and REBUF tiles.
returns them in a list sorted according to their Y coordinates.
"""
cur.execute(
"""
SELECT name
FROM phy_tile
WHERE
name LIKE "CLK_HROW_BOT_R_%"
OR
name LIKE "CLK_HROW_TOP_R_%"
OR
name LIKE "CLK_BUFG_REBUF_%"
ORDER BY grid_y DESC;
"""
)
return [t[0] for t in cur.fetchall()]
HCLK_CMT_TILES = {}
def check_feature(feature):
""" Check if enabling this feature requires other features to be enabled.
Some pips imply other features. Example:
.HCLK_LEAF_CLK_B_BOTL0.HCLK_CK_BUFHCLK10
implies:
.ENABLE_BUFFER.HCLK_CK_BUFHCLK10
"""
# IOI_SING tiles have bits in common with the IOI tiles.
#
# The difference is that the TOP IOI_SING tile shares bits with
# the bottom half of a normal IOI tile, while the BOTTOM IOI_SING
# shares bits with the top half of a normal IOI TILE.
#
# The following, is to change the edge feature to accomodate this
# need, as the IOI_SING tiles have the same wire, and pip names
# despite they are found on the TOP or BOTTOM of an IOI column
m = IOI_SING_REGEX.fullmatch(feature)
if m:
# Each clock region spans a total of 50 IOBs.
# The IOI_SING are found on top or bottom of the whole
# IOI/IOB column. The Y coordinate identified with the
# second capture group is dived by 50 to get the relative
# position of the IOI_SING within the clock region column
is_bottom_sing = int(m.group(2)) % 50 == 0
# This is the value to attach to the source pip name that
# changes based on which IOI_SING is selected (top or bottom)
#
# Example: IOI_OLOGIC0_D1.IOI_IMUX34_0 -> IOI_OLOGIC0_D1.IOI_IMUX34_1
src_value = '1' if is_bottom_sing else '0'
# This is the value to attach to the IOI_SITE_PIPS names
# in the destination wire of the pip
#
# Example: IOI_OLOGIC0 -> IOI_OLOGIC1
dst_value = '0' if is_bottom_sing else '1'
unchanged_feature = "{}{}{}{}".format(
m.group(1), m.group(2), m.group(3), m.group(4)
)
src_wire = m.group(6).replace('_SING', '')
for pip in ['IMUX', 'LOGIC_OUTS', 'CTRL', 'FAN', 'BYP']:
if pip in src_wire:
src_wire = src_wire.replace('_0', '_{}'.format(src_value))
if 'IOI_OCLK' in src_wire:
src_wire = src_wire.replace('_0', '_{}'.format(dst_value))
changed_feature = "{}{}".format(dst_value, src_wire)
feature = "{}{}".format(unchanged_feature, changed_feature)
feature_path = feature.split('.')
# IOB_DIFFO_OUT0->IOB_DIFFO_IN1
#
# When this PIP is active the IOB operates in the differential output mode.
# There is no feature assosciated with that PIP in the prjxray db but there
# is a tile-wide feature named "DIFF_OUT".
#
# The "DIFF_OUT" cannot be set in the architecture as it is defined one
# level up in the hierarchy (its tile-wide, not site-wide). So here we
# map the PIP's feature to "DIFF_OUT"
if feature_path[2] == "IOB_DIFFO_OUT0" and \
feature_path[1] == "IOB_DIFFO_IN1":
return '{}.OUT_DIFF'.format(feature_path[0])
# IOB_PADOUT0->IOB_DIFFI_IN1
# IOB_PADOUT1->IOB_DIFFI_IN0
#
# These connections are hard wires that connect IOB33M and IOB33S sites.
# They are used in differential input mode.
#
# Vivado does not report this connection as a PIP but in the prjxray db it
# is a pip. Instead of making it a pseudo-pip we simply reject fasm
# features here.
if feature_path[2] == "IOB_PADOUT0" and feature_path[1] == "IOB_DIFFI_IN1":
return ''
if feature_path[2] == "IOB_PADOUT1" and feature_path[1] == "IOB_DIFFI_IN0":
return ''
# REBUF stuff
rebuf_key = (feature_path[0], feature_path[1])
if rebuf_key in REBUF_SOURCES:
return ' '.join([feature] + REBUF_NODES[REBUF_SOURCES[rebuf_key]])
m = IOI_OCLK.fullmatch(feature_path[1])
if m:
enable_oclkm_feature = '{}.IOI_OCLKM_{}.{}'.format(
feature_path[0], m.group(1), feature_path[-1]
)
return ' '.join((feature, enable_oclkm_feature))
if HCLK_CK_BUFHCLK_REGEX.fullmatch(feature_path[-1]):
enable_buffer_feature = '{}.ENABLE_BUFFER.{}'.format(
feature_path[0], feature_path[-1]
)
return ' '.join((feature, enable_buffer_feature))
# BUFHCE sites are now routed through, without the need of placing them, therefore,
# when the relative pip is traversed, the correct fasm feature needs to be added.
# The relevant features are:
# - IN_USE: to enable the BUFHCE site
# - ZINV_CE: to disable the inverter on CE input which is connected to VCC.
# This sets the CE signal to constant 1
m = CLK_HROW_CK_MUX_REGEX.fullmatch(feature_path[-1])
if m:
x_loc_str = m.group(1)
if 'L' in x_loc_str:
x_loc = 0
elif 'R' in x_loc_str:
x_loc = 1
else:
assert False, "Impossible to determine X location of BUFHCE"
y_loc = m.group(2)
bufhce_loc = 'BUFHCE_X{}Y{}'.format(x_loc, y_loc)
enable_bufhce_in_use = '{}.BUFHCE.{}.IN_USE'.format(
feature_path[0], bufhce_loc
)
enable_bufhce_zinv_ce = '{}.BUFHCE.{}.ZINV_CE=1\'b1'.format(
feature_path[0], bufhce_loc
)
return ' '.join((feature, enable_bufhce_in_use, enable_bufhce_zinv_ce))
if BUFG_CLK_IN_REGEX.fullmatch(feature_path[-1]):
enable_feature = '{}.{}_ACTIVE'.format(
feature_path[0], feature_path[-1]
)
return ' '.join((feature, enable_feature))
if BUFG_CLK_OUT_REGEX.fullmatch(feature_path[-1]):
enable_feature = '{}.{}_ACTIVE'.format(
feature_path[0], feature_path[-1]
)
return ' '.join((feature, enable_feature))
if CCIO_ACTIVE_REGEX.fullmatch(feature_path[-1]):
features = [feature]
features.append(
'{}.{}_ACTIVE'.format(feature_path[0], feature_path[-1])
)
features.append('{}.{}_USED'.format(feature_path[0], feature_path[-1]))
return ' '.join(features)
m = HCLK_OUT.fullmatch(feature_path[-1])
if m:
return ' '.join(
[feature] + find_hclk_cmt_hclk_feature(
feature_path[0], m.group(1), m.group(2)
)
)
m = CASCOUT_REGEX.fullmatch(feature_path[-2])
if m:
enable_cascout = '{}.CASCOUT_{}_ACTIVE'.format(
feature_path[0], m.group(1)
)
return ' '.join((feature, enable_cascout))
parts = feature.split('.')
wire_feature = feature_when_routed(parts[1])
if wire_feature is not None:
return '{} {}.{}'.format(feature, parts[0], wire_feature)
return feature
# CLBLL_L.CLBLL_LL_A1[0] -> (CLBLL_L, CLBLL_LL_A1)
PIN_NAME_TO_PARTS = re.compile(r'^([^\.]+)\.([^\]]+)\[0\]$')
def set_connection_box(
graph, node_idx, grid_x, grid_y, box_id, site_pin_delay
):
""" Assign a connection box to an IPIN node. """
node_dict = graph.nodes[node_idx]._asdict()
node_dict['connection_box'] = graph2.ConnectionBox(
x=grid_x,
y=grid_y,
id=box_id,
site_pin_delay=site_pin_delay,
)
graph.nodes[node_idx] = graph2.Node(**node_dict)
def update_connection_box(
conn, graph, graph_node_pkey, node_idx, connection_box_map
):
""" Update connection box of IPIN node if needed. """
cur = conn.cursor()
cur.execute(
"""
SELECT connection_box_wire_pkey
FROM graph_node WHERE pkey = ?""", (graph_node_pkey, )
)
connection_box_wire_pkey = cur.fetchone()[0]
if connection_box_wire_pkey is not None:
cur.execute(
"""
SELECT grid_x, grid_y FROM phy_tile WHERE pkey = (
SELECT phy_tile_pkey FROM wire WHERE pkey = ?
)""", (connection_box_wire_pkey, )
)
grid_x, grid_y = cur.fetchone()
cur.execute(
"SELECT wire_in_tile_pkey FROM wire WHERE pkey = ?",
(connection_box_wire_pkey, )
)
wire_in_tile_pkey = cur.fetchone()[0]
box_id = connection_box_map[wire_in_tile_pkey]
cur.execute(
"""
SELECT switch.intrinsic_delay
FROM switch
WHERE pkey = (
SELECT site_pin_switch_pkey
FROM wire_in_tile
WHERE pkey = (
SELECT wire_in_tile_pkey
FROM wire
WHERE pkey = (
SELECT site_wire_pkey
FROM node
WHERE pkey = (
SELECT node_pkey
FROM graph_node
WHERE pkey = ?
)
)
)
)""", (graph_node_pkey, )
)
site_pin_delay = cur.fetchone()[0]
set_connection_box(
graph, node_idx, grid_x, grid_y, box_id, site_pin_delay
)
def create_connection_boxes(conn, graph):
""" Assign connection box ids for all connection box types. """
cur = conn.cursor()
cur.execute(
"""
SELECT pkey, tile_type_pkey, name FROM wire_in_tile WHERE pkey IN (
SELECT DISTINCT wire_in_tile_pkey FROM wire WHERE pkey IN (
SELECT connection_box_wire_pkey FROM graph_node
WHERE connection_box_wire_pkey IS NOT NULL
)
);"""
)
connection_box_map = {}
for wire_in_tile_pkey, tile_type_pkey, wire_name in cur:
connection_box_map[wire_in_tile_pkey] = graph.maybe_add_connection_box(
reduce_connection_box(wire_name)
)
return connection_box_map
def phy_grid_dims(conn):
""" Returns physical grid dimensions. """
cur = conn.cursor()
cur.execute("SELECT grid_x FROM phy_tile ORDER BY grid_x DESC LIMIT 1;")
x_max = cur.fetchone()[0]
cur.execute("SELECT grid_y FROM phy_tile ORDER BY grid_y DESC LIMIT 1;")
y_max = cur.fetchone()[0]
return x_max + 1, y_max + 1
def find_constant_network(graph):
""" Find VCC and GND tiles and create synth_tiles input.
All arches should have these synthetic tiles, search the input rr graph
for the SYN-GND and SYN-VCC tiles.
"""
block_types = {}
for block_type in graph.block_types:
block_types[block_type.name] = block_type.id
assert 'SYN-GND' in block_types
assert 'SYN-VCC' in block_types
gnd_block_id = block_types['SYN-GND']
vcc_block_id = block_types['SYN-VCC']
gnd_loc = None
vcc_loc = None
for grid_loc in graph.grid:
if gnd_block_id == grid_loc.block_type_id:
assert gnd_loc is None
gnd_loc = (grid_loc.x, grid_loc.y)
if vcc_block_id == grid_loc.block_type_id:
assert vcc_loc is None
vcc_loc = (grid_loc.x, grid_loc.y)
assert gnd_loc is not None
assert vcc_loc is not None
synth_tiles = {
'tiles':
{
"VCC":
{
'loc':
vcc_loc,
'pins':
[
{
'wire': 'VCC',
'pad': 'VCC',
'port_type': 'VCC',
'is_clock': False,
},
],
},
"GND":
{
'loc':
gnd_loc,
'pins':
[
{
'wire': 'GND',
'pad': 'GND',
'port_type': 'GND',
'is_clock': False,
},
],
},
}
}
return synth_tiles
if __name__ == '__main__':
main()
| 30.375344 | 90 | 0.590114 |
be20fd972c9533d7359e606c8ff9c31f5c519ad2
| 17,854 |
py
|
Python
|
testing/onQuest/longClusters/m67/OLD-analyseEBLSSTm67.py
|
andrewbowen19/ClusterEclipsingBinaries
|
e554cb6bb613e0d3703314e50fcf5289f50bf572
|
[
"MIT"
] | null | null | null |
testing/onQuest/longClusters/m67/OLD-analyseEBLSSTm67.py
|
andrewbowen19/ClusterEclipsingBinaries
|
e554cb6bb613e0d3703314e50fcf5289f50bf572
|
[
"MIT"
] | null | null | null |
testing/onQuest/longClusters/m67/OLD-analyseEBLSSTm67.py
|
andrewbowen19/ClusterEclipsingBinaries
|
e554cb6bb613e0d3703314e50fcf5289f50bf572
|
[
"MIT"
] | null | null | null |
#########################
#########################
# Need to account for limit in input period
#########################
#########################
# Baseline M67 long script -- NO crowding
# New script copied from quest - want to take p and ecc from each population (all, obs, rec) and put them into separate file
# Doing this so we don't have to run analyse each time
# Can write separate script for p-ecc plots
# Quest paths in this version of script
import pandas as pd
import numpy as np
import os
from astropy.coordinates import SkyCoord
from astropy import units, constants
from astropy.modeling import models, fitting
import scipy.stats
from scipy.integrate import quad
#for Quest
import matplotlib
matplotlib.use('Agg')
doIndividualPlots = True
from matplotlib import pyplot as plt
#similar to field, but limiting by the hard-soft boundary
if __name__ == "__main__":
filters = ['u_', 'g_', 'r_', 'i_', 'z_', 'y_', 'all']
#get the Raghavan binary fraction fit
fbFit= fitRagfb()
print(fbFit)
#to normalize
intAll, err = quad(RagNormal, -20, 20)
intCut, err = quad(RagNormal, -20, np.log10(365*10.))
intNorm = intCut/intAll
#cutoff in percent error for "recovered"
Pcut = 0.1
#assumed mean stellar mass
mMean = 0.5
#minimum number of lines to consider in file
Nlim = 3
if (doIndividualPlots):
fmass, axmass = plt.subplots()
fqrat, axqrat = plt.subplots()
fecc, axecc = plt.subplots()
flper, axlper = plt.subplots()
fdist, axdist = plt.subplots()
fmag, axmag = plt.subplots()
frad, axrad = plt.subplots()
#bins for all the histograms
Nbins = 25
mbins = np.arange(0,10, 0.1, dtype='float')
qbins = np.arange(0,1, 0.1, dtype='float')
ebins = np.arange(0, 1.05, 0.05, dtype='float')
lpbins = np.arange(-2, 10, 0.5, dtype='float')
dbins = np.arange(0, 40, 1, dtype='float')
magbins = np.arange(11, 25, 1, dtype='float')
rbins = np.arange(0, 100, 0.2, dtype='float')
#blanks for the histograms
#All
m1hAll = np.zeros_like(mbins)[1:]
qhAll = np.zeros_like(qbins)[1:]
ehAll = np.zeros_like(ebins)[1:]
lphAll = np.zeros_like(lpbins)[1:]
dhAll = np.zeros_like(dbins)[1:]
maghAll = np.zeros_like(magbins)[1:]
rhAll = np.zeros_like(rbins)[1:]
#Observable
m1hObs = np.zeros_like(mbins)[1:]
qhObs = np.zeros_like(qbins)[1:]
ehObs = np.zeros_like(ebins)[1:]
lphObs = np.zeros_like(lpbins)[1:]
dhObs = np.zeros_like(dbins)[1:]
maghObs = np.zeros_like(magbins)[1:]
rhObs = np.zeros_like(rbins)[1:]
#Recovered
m1hRec = dict()
qhRec = dict()
ehRec = dict()
lphRec = dict()
dhRec = dict()
maghRec = dict()
rhRec = dict()
for f in filters:
m1hRec[f] = np.zeros_like(mbins)[1:]
qhRec[f] = np.zeros_like(qbins)[1:]
ehRec[f] = np.zeros_like(ebins)[1:]
lphRec[f] = np.zeros_like(lpbins)[1:]
dhRec[f] = np.zeros_like(dbins)[1:]
maghRec[f] = np.zeros_like(magbins)[1:]
rhRec[f] = np.zeros_like(rbins)[1:]
RA = []
Dec = []
recFrac = []
recN = []
rawN = []
obsN = []
fileN = []
fileObsN = []
fileRecN = []
allNPrsa = []
obsNPrsa = []
recNPrsa = []
# Lists for period and eccentricity for Andrew's circularization plots
eccAll = []
eccObs = []
eccRec = []
pAll = []
pObs = []
pRec = []
# Using prsa dataframes for these lists because of period cutoff at 1000 days
# Dataframes to write to files later; 3 files for each sub-population - append everything to these
peccAll = pd.DataFrame(columns = ['e', 'p'])
peccObs = pd.DataFrame(columns = ['e', 'p'])
peccRec = pd.DataFrame(columns = ['e', 'p'])
#Read in all the data and make the histograms
d = "./input_files/"
files = os.listdir(d)
IDs = []
for i, f in enumerate(files):
print(round(i/len(files),4), f)
fl = file_len(d+f)
if (fl >= 4):
#read in the header
header = pd.read_csv(d+f, nrows=1)
######################
#NEED TO ACCOUNT FOR THE BINARY FRACTION when combining histograms
#####################
Nmult = header['clusterMass'][0]/mMean
#Nmult = 1.
RA.append(header['OpSimRA'])
Dec.append(header['OpSimDec'])
#read in rest of the file
data = pd.read_csv(d+f, header = 2).fillna(-999)
rF = 0.
rN = 0.
Nrec = 0.
Nobs = 0.
raN = 0.
obN = 0.
fiN = 0.
fioN = 0.
firN = 0.
NallPrsa = 0.
NobsPrsa = 0.
NrecPrsa = 0.
Nall = len(data.index)/intNorm ###is this correct? (and the only place I need to normalize?)
prsa = data.loc[(data['appMagMean_r'] <= 19.5) & (data['appMagMean_r'] > 15.8) & (data['p'] < 1000) & (data['p'] > 0.5)]
# Appending for Andrew
eccAll.append(prsa['e'].values)
pAll.append(prsa['p'].values)
NallPrsa = len(prsa.index)
if (Nall >= Nlim):
#create histograms
#All
m1hAll0, m1b = np.histogram(data["m1"], bins=mbins)
qhAll0, qb = np.histogram(data["m2"]/data["m1"], bins=qbins)
ehAll0, eb = np.histogram(data["e"], bins=ebins)
lphAll0, lpb = np.histogram(np.ma.log10(data["p"].values).filled(-999), bins=lpbins)
dhAll0, db = np.histogram(data["d"], bins=dbins)
maghAll0, magb = np.histogram(data["appMagMean_r"], bins=magbins)
rhAll0, rb = np.histogram(data["r2"]/data["r1"], bins=rbins)
if (doIndividualPlots):
axmass.step(m1b[0:-1], m1hAll0/np.sum(m1hAll0), color='black', alpha=0.1)
axqrat.step(qb[0:-1], qhAll0/np.sum(qhAll0), color='black', alpha=0.1)
axecc.step(eb[0:-1], ehAll0/np.sum(ehAll0), color='black', alpha=0.1)
axlper.step(lpb[0:-1], lphAll0/np.sum(lphAll0), color='black', alpha=0.1)
axdist.step(db[0:-1], dhAll0/np.sum(dhAll0), color='black', alpha=0.1)
axmag.step(magb[0:-1], maghAll0/np.sum(maghAll0), color='black', alpha=0.1)
axrad.step(rb[0:-1], rhAll0/np.sum(rhAll0), color='black', alpha=0.1)
#account for the binary fraction, as a function of mass
dm1 = np.diff(m1b)
m1val = m1b[:-1] + dm1/2.
fb = np.sum(m1hAll0/len(data.index)*fbFit(m1val))
#account for the hard-soft boundary
Phs = getPhs(header['clusterVdisp'].iloc[0]*units.km/units.s).to(units.day).value
fb *= RagNormal(np.log10(Phs), cdf = True)
print("fb, Phs = ", fb, Phs)
Nmult *= fb
m1hAll += m1hAll0/Nall*Nmult
qhAll += qhAll0/Nall*Nmult
ehAll += ehAll0/Nall*Nmult
lphAll += lphAll0/Nall*Nmult
dhAll += dhAll0/Nall*Nmult
maghAll += maghAll0/Nall*Nmult
rhAll += rhAll0/Nall*Nmult
#Obs
obs = data.loc[data['LSM_PERIOD'] != -999]
Nobs = len(obs.index)
prsaObs = data.loc[(data['appMagMean_r'] <= 19.5) & (data['appMagMean_r'] > 15.8) & (data['p'] < 1000) & (data['p'] >0.5) & (data['LSM_PERIOD'] != -999)]
NobsPrsa = len(prsaObs.index)
# Appending for Andrew's files
eccObs.append(prsaObs['e'].values)
pObs.append(prsaObs['p'].values)
if (Nobs >= Nlim):
m1hObs0, m1b = np.histogram(obs["m1"], bins=mbins)
qhObs0, qb = np.histogram(obs["m2"]/obs["m1"], bins=qbins)
ehObs0, eb = np.histogram(obs["e"], bins=ebins)
lphObs0, lpb = np.histogram(np.ma.log10(obs["p"].values).filled(-999), bins=lpbins)
dhObs0, db = np.histogram(obs["d"], bins=dbins)
maghObs0, magb = np.histogram(obs["appMagMean_r"], bins=magbins)
rhObs0, rb = np.histogram(obs["r2"]/obs["r1"], bins=rbins)
m1hObs += m1hObs0/Nall*Nmult
qhObs += qhObs0/Nall*Nmult
ehObs += ehObs0/Nall*Nmult
lphObs += lphObs0/Nall*Nmult
dhObs += dhObs0/Nall*Nmult
maghObs += maghObs0/Nall*Nmult
rhObs += rhObs0/Nall*Nmult
#Rec
recCombined = pd.DataFrame()
prsaRecCombined = pd.DataFrame()
for filt in filters:
key = filt+'LSS_PERIOD'
if (filt == 'all'):
key = 'LSM_PERIOD'
fullP = abs(data[key] - data['p'])/data['p']
halfP = abs(data[key] - 0.5*data['p'])/(0.5*data['p'])
twiceP = abs(data[key] - 2.*data['p'])/(2.*data['p'])
rec = data.loc[(data[key] != -999) & ( (fullP < Pcut) | (halfP < Pcut) | (twiceP < Pcut))]
prsaRec = data.loc[(data['appMagMean_r'] <= 19.5) & (data['appMagMean_r'] >15.8) & (data['p'] < 1000) & (data['p'] >0.5) & (data['LSM_PERIOD'] != -999) & ( (fullP < Pcut) | (halfP < Pcut) | (twiceP < Pcut))]
Nrec = len(rec.index)
#I'd like to account for all filters here to have more accurate numbers
recCombined = recCombined.append(rec)
prsaRecCombined = prsaRecCombined.append(prsaRec)
# Going to use prsaRecCombined for ecc-p plots to account for all filters
eccRec.append(prsaRec['e'].values)
pRec.append(prsaRec['p'].values)
if (filt == 'all'):
recCombined.drop_duplicates(inplace=True)
prsaRecCombined.drop_duplicates(inplace=True)
if (Nrec >= Nlim):
m1hRec0, m1b = np.histogram(rec["m1"], bins=mbins)
qhRec0, qb = np.histogram(rec["m2"]/rec["m1"], bins=qbins)
ehRec0, eb = np.histogram(rec["e"], bins=ebins)
lphRec0, lpb = np.histogram(np.ma.log10(rec["p"].values).filled(-999), bins=lpbins)
dhRec0, db = np.histogram(rec["d"], bins=dbins)
maghRec0, magb = np.histogram(rec["appMagMean_r"], bins=magbins)
rhRec0, rb = np.histogram(rec["r2"]/rec["r1"], bins=rbins)
m1hRec[filt] += m1hRec0/Nall*Nmult
qhRec[filt] += qhRec0/Nall*Nmult
ehRec[filt] += ehRec0/Nall*Nmult
lphRec[filt] += lphRec0/Nall*Nmult
dhRec[filt] += dhRec0/Nall*Nmult
maghRec[filt] += maghRec0/Nall*Nmult
rhRec[filt] += rhRec0/Nall*Nmult
#for the mollweide
if (filt == 'all'):
Nrec = len(recCombined.index)
rF = Nrec/Nall
rN = Nrec/Nall*Nmult
raN = Nmult
obN = Nobs/Nall*Nmult
fiN = Nall
fioN = Nobs
firN = Nrec
NrecPrsa = len(prsaRecCombined.index)
NrecPrsa = NrecPrsa/Nall*Nmult
NobsPrsa = NobsPrsa/Nall*Nmult
NallPrsa = NallPrsa/Nall*Nmult
recFrac.append(rF)
recN.append(rN)
rawN.append(raN)
obsN.append(obN)
fileN.append(fiN)
fileObsN.append(fioN)
fileRecN.append(firN)
allNPrsa.append(NallPrsa)
obsNPrsa.append(NobsPrsa)
recNPrsa.append(NrecPrsa)
#print(np.sum(lphRec), np.sum(recN), np.sum(lphRec)/np.sum(recN), np.sum(lphRec0), Nrec, np.sum(lphRec0)/Nrec, np.sum(lphObs), np.sum(obsN), np.sum(lphObs)/np.sum(obsN))
# Concatenating p and ecc lists
eccAll = np.concatenate(eccAll)
eccObs = np.concatenate(eccObs)
eccRec = np.concatenate(eccRec)
pAll = np.concatenate(pAll)
pObs = np.concatenate(pObs)
pRec = np.concatenate(pRec)
# print('Ecc lists:', eccAll, eccObs, eccRec)
# print('P lists:', pAll, pObs, pRec)
# Appending lists with all the p/ecc values to our dataframes
# All dataframe
peccAll['e'] = eccAll
peccAll['p'] = pAll
# Observable dataframe
peccObs['e'] = eccObs
peccObs['p'] = pObs
# Recovered dataframe
peccRec['e'] = eccRec
peccRec['p'] = pRec
# print('Final Dataframes:', peccAll, peccObs, peccRec)
# print(peccRec.columns)
# 3 letter code corresponds to scenario (OC/GC, baseline/colossus, crowding/no crowding)
peccAll.to_csv('./pecc/all-M67BN-ecc-p.csv', header = ['e', 'p'])
peccObs.to_csv('./pecc/obs-M67BN-ecc-p.csv', header = ['e', 'p'])
peccRec.to_csv('./pecc/rec-M67BN-ecc-p.csv', header = ['e', 'p'])
#plot and save the histograms
saveHist(m1hAll, m1hObs, m1hRec, m1b, 'm1 (Msolar)', 'EBLSST_m1hist')
saveHist(qhAll, qhObs, qhRec, qb, 'q (m2/m1)', 'EBLSST_qhist')
saveHist(ehAll, ehObs, ehRec, eb, 'e', 'EBLSST_ehist')
saveHist(lphAll, lphObs, lphRec, lpb, 'log(P [days])', 'EBLSST_lphist')
saveHist(dhAll, dhObs, dhRec, db, 'd (kpc)', 'EBLSST_dhist')
saveHist(maghAll, maghObs, maghRec, magb, 'mag', 'EBLSST_maghist')
saveHist(rhAll, rhObs, rhRec, rb, 'r2/r1', 'EBLSST_rhist')
#make the mollweide
coords = SkyCoord(RA, Dec, unit=(units.degree, units.degree),frame='icrs')
lGal = coords.galactic.l.wrap_at(180.*units.degree).degree
bGal = coords.galactic.b.wrap_at(180.*units.degree).degree
RAwrap = coords.ra.wrap_at(180.*units.degree).degree
Decwrap = coords.dec.wrap_at(180.*units.degree).degree
f, ax = plt.subplots(subplot_kw={'projection': "mollweide"}, figsize=(8,5))
ax.grid(True)
#ax.set_xlabel(r"$l$",fontsize=16)
#ax.set_ylabel(r"$b$",fontsize=16)
#mlw = ax.scatter(lGal.ravel()*np.pi/180., bGal.ravel()*np.pi/180., c=np.log10(np.array(recFrac)*100.), cmap='viridis_r', s = 4)
ax.set_xlabel("RA",fontsize=16)
ax.set_ylabel("Dec",fontsize=16)
mlw = ax.scatter(np.array(RAwrap).ravel()*np.pi/180., np.array(Decwrap).ravel()*np.pi/180., c=np.array(recFrac)*100., cmap='viridis_r', s = 4)
cbar = f.colorbar(mlw, shrink=0.7)
cbar.set_label(r'% recovered')
f.savefig('./plots/' + 'mollweide_pct.pdf',format='pdf', bbox_inches = 'tight')
f, ax = plt.subplots(subplot_kw={'projection': "mollweide"}, figsize=(8,5))
ax.grid(True)
#ax.set_xlabel(r"$l$",fontsize=16)
#ax.set_ylabel(r"$b$",fontsize=16)
#mlw = ax.scatter(lGal.ravel()*np.pi/180., bGal.ravel()*np.pi/180., c=np.log10(np.array(recN)), cmap='viridis_r', s = 4)
ax.set_xlabel("RA",fontsize=16)
ax.set_ylabel("Dec",fontsize=16)
mlw = ax.scatter(np.array(RAwrap).ravel()*np.pi/180., np.array(Decwrap).ravel()*np.pi/180., c=np.log10(np.array(recN)), cmap='viridis_r', s = 4)
cbar = f.colorbar(mlw, shrink=0.7)
cbar.set_label(r'log10(N) recovered')
f.savefig('./plots/' + 'mollweide_N.pdf',format='pdf', bbox_inches = 'tight')
if (doIndividualPlots):
fmass.savefig('./plots/' + 'massPDFall.pdf',format='pdf', bbox_inches = 'tight')
fqrat.savefig('./plots/' + 'qPDFall.pdf',format='pdf', bbox_inches = 'tight')
fecc.savefig('./plots/' + 'eccPDFall.pdf',format='pdf', bbox_inches = 'tight')
flper.savefig('./plots/' + 'lperPDFall.pdf',format='pdf', bbox_inches = 'tight')
fdist.savefig('./plots/' + 'distPDFall.pdf',format='pdf', bbox_inches = 'tight')
fmag.savefig('./plots/' + 'magPDFall.pdf',format='pdf', bbox_inches = 'tight')
frad.savefig('./plots/' + 'radPDFall.pdf',format='pdf', bbox_inches = 'tight')
print("###################")
print("number of binaries in input files (raw, log):",np.sum(fileN), np.log10(np.sum(fileN)))
print("number of binaries in tested with gatspy (raw, log):",np.sum(fileObsN), np.log10(np.sum(fileObsN)))
print("number of binaries in recovered with gatspy (raw, log):",np.sum(fileRecN), np.log10(np.sum(fileRecN)))
print("recovered/observable*100 with gatspy:",np.sum(fileRecN)/np.sum(fileObsN)*100.)
print("###################")
print("total in sample (raw, log):",np.sum(rawN), np.log10(np.sum(rawN)))
print("total observable (raw, log):",np.sum(obsN), np.log10(np.sum(obsN)))
print("total recovered (raw, log):",np.sum(recN), np.log10(np.sum(recN)))
print("recovered/observable*100:",np.sum(recN)/np.sum(obsN)*100.)
print("###################")
print("total in Prsa 15.8<r<19.5 P<1000d sample (raw, log):",np.sum(allNPrsa), np.log10(np.sum(allNPrsa)))
print("total observable in Prsa 15.8<r<19.5 P<1000d sample (raw, log):",np.sum(obsNPrsa), np.log10(np.sum(obsNPrsa)))
print("total recovered in Prsa 15.8<r<19.5 P<1000d sample (raw, log):",np.sum(recNPrsa), np.log10(np.sum(recNPrsa)))
print("Prsa 15.8<r<19.5 P<1000d rec/obs*100:",np.sum(recNPrsa)/np.sum(obsNPrsa)*100.)
| 35.284585 | 213 | 0.641089 |
be21dcede1ec1af84c0ccb9e8297bd042d23271a
| 1,712 |
py
|
Python
|
CondTools/BeamSpot/test/BeamSpotRcdPrinter_cfg.py
|
ckamtsikis/cmssw
|
ea19fe642bb7537cbf58451dcf73aa5fd1b66250
|
[
"Apache-2.0"
] | 13 |
2015-11-30T15:49:45.000Z
|
2022-02-08T16:11:30.000Z
|
CondTools/BeamSpot/test/BeamSpotRcdPrinter_cfg.py
|
ckamtsikis/cmssw
|
ea19fe642bb7537cbf58451dcf73aa5fd1b66250
|
[
"Apache-2.0"
] | 640 |
2015-02-11T18:55:47.000Z
|
2022-03-31T14:12:23.000Z
|
CondTools/BeamSpot/test/BeamSpotRcdPrinter_cfg.py
|
ckamtsikis/cmssw
|
ea19fe642bb7537cbf58451dcf73aa5fd1b66250
|
[
"Apache-2.0"
] | 51 |
2015-08-11T21:01:40.000Z
|
2022-03-30T07:31:34.000Z
|
import FWCore.ParameterSet.Config as cms
import os
process = cms.Process("summary")
process.MessageLogger = cms.Service( "MessageLogger",
debugModules = cms.untracked.vstring( "*" ),
cout = cms.untracked.PSet( threshold = cms.untracked.string( "DEBUG" ) ),
destinations = cms.untracked.vstring( "cout" )
)
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(1)
)
process.source = cms.Source("EmptySource",
numberEventsInRun = cms.untracked.uint32(1),
firstRun = cms.untracked.uint32(1)
)
process.load("CondCore.CondDB.CondDB_cfi")
process.load("CondTools.BeamSpot.BeamSpotRcdPrinter_cfi")
### 2018 Prompt
process.BeamSpotRcdPrinter.tagName = "BeamSpotObjects_PCL_byLumi_v0_prompt"
process.BeamSpotRcdPrinter.startIOV = 1350646955507767
process.BeamSpotRcdPrinter.endIOV = 1406876667347162
process.BeamSpotRcdPrinter.output = "summary2018_Prompt.txt"
### 2017 ReReco
#process.BeamSpotRcdPrinter.tagName = "BeamSpotObjects_LumiBased_v4_offline"
#process.BeamSpotRcdPrinter.startIOV = 1275820035276801
#process.BeamSpotRcdPrinter.endIOV = 1316235677532161
### 2018 ABC ReReco
#process.BeamSpotRcdPrinter.tagName = "BeamSpotObjects_LumiBased_v4_offline"
#process.BeamSpotRcdPrinter.startIOV = 1354018504835073
#process.BeamSpotRcdPrinter.endIOV = 1374668707594734
### 2018D Prompt
#process.BeamSpotRcdPrinter.tagName = "BeamSpotObjects_PCL_byLumi_v0_prompt"
#process.BeamSpotRcdPrinter.startIOV = 1377280047710242
#process.BeamSpotRcdPrinter.endIOV = 1406876667347162
process.p = cms.Path(process.BeamSpotRcdPrinter)
| 38.044444 | 110 | 0.733645 |
be237e880ccb11dff8fac9488a75005cce1dd897
| 381 |
py
|
Python
|
django/authentication/api/urls.py
|
NAVANEETHA-BS/Django-Reactjs-Redux-Register-login-logout-Homepage--Project
|
f29ed189b988a2d46d76b3c58cf77d1ed58ca64d
|
[
"MIT"
] | 2 |
2021-05-13T18:02:00.000Z
|
2022-03-30T19:53:38.000Z
|
django/authentication/api/urls.py
|
NAVANEETHA-BS/Django-Reactjs-Redux-Register-login-logout-Homepage--Project
|
f29ed189b988a2d46d76b3c58cf77d1ed58ca64d
|
[
"MIT"
] | null | null | null |
django/authentication/api/urls.py
|
NAVANEETHA-BS/Django-Reactjs-Redux-Register-login-logout-Homepage--Project
|
f29ed189b988a2d46d76b3c58cf77d1ed58ca64d
|
[
"MIT"
] | null | null | null |
from django.urls import path
from rest_framework_simplejwt.views import (
TokenObtainPairView,
TokenRefreshView,
TokenVerifyView
)
urlpatterns = [
path('obtain/', TokenObtainPairView.as_view(), name='token_obtain_pair'),
path('refresh/', TokenRefreshView.as_view(), name='token_refresh'),
path('verify/', TokenVerifyView.as_view(), name='token_verify'),
]
| 29.307692 | 77 | 0.734908 |
be23b9cced5e521037b8711e7bde05f5d17925f0
| 7,257 |
py
|
Python
|
yue/core/explorer/ftpsource.py
|
nsetzer/YueMusicPlayer
|
feaf6fe5c046b1a7f6b7774d4e86a2fbb1e431cf
|
[
"MIT"
] | null | null | null |
yue/core/explorer/ftpsource.py
|
nsetzer/YueMusicPlayer
|
feaf6fe5c046b1a7f6b7774d4e86a2fbb1e431cf
|
[
"MIT"
] | null | null | null |
yue/core/explorer/ftpsource.py
|
nsetzer/YueMusicPlayer
|
feaf6fe5c046b1a7f6b7774d4e86a2fbb1e431cf
|
[
"MIT"
] | 1 |
2019-03-06T14:29:27.000Z
|
2019-03-06T14:29:27.000Z
|
from ftplib import FTP,error_perm, all_errors
import posixpath
from io import BytesIO,SEEK_SET
from .source import DataSource
import sys
import re
reftp = re.compile('(ssh|ftp)\:\/\/(([^@:]+)?:?([^@]+)?@)?([^:]+)(:[0-9]+)?\/(.*)')
| 27.384906 | 83 | 0.539893 |
be23cbbbbbb53c2c62b109846cda81e757eb1b58
| 14,527 |
py
|
Python
|
tests/engine/knowledge_base.py
|
roshanmaskey/plaso
|
637856f578eb4bc81f62b97d7f483f69314e7f47
|
[
"Apache-2.0"
] | 1,253 |
2015-01-02T13:58:02.000Z
|
2022-03-31T08:43:39.000Z
|
tests/engine/knowledge_base.py
|
roshanmaskey/plaso
|
637856f578eb4bc81f62b97d7f483f69314e7f47
|
[
"Apache-2.0"
] | 3,388 |
2015-01-02T11:17:58.000Z
|
2022-03-30T10:21:45.000Z
|
tests/engine/knowledge_base.py
|
roshanmaskey/plaso
|
637856f578eb4bc81f62b97d7f483f69314e7f47
|
[
"Apache-2.0"
] | 376 |
2015-01-20T07:04:54.000Z
|
2022-03-04T23:53:00.000Z
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Tests for the knowledge base."""
import unittest
from plaso.containers import artifacts
from plaso.engine import knowledge_base
from tests import test_lib as shared_test_lib
if __name__ == '__main__':
unittest.main()
| 37.153453 | 79 | 0.748537 |
be247dcc0b3afb4ed9e9527cdfcf9da7e14edb83
| 2,244 |
py
|
Python
|
Problems/Dynamic Programming/140. Word Break II.py
|
BYJRK/LeetCode-Solutions
|
008467e1717309066a519acb8623d2f84071b64a
|
[
"MIT"
] | null | null | null |
Problems/Dynamic Programming/140. Word Break II.py
|
BYJRK/LeetCode-Solutions
|
008467e1717309066a519acb8623d2f84071b64a
|
[
"MIT"
] | null | null | null |
Problems/Dynamic Programming/140. Word Break II.py
|
BYJRK/LeetCode-Solutions
|
008467e1717309066a519acb8623d2f84071b64a
|
[
"MIT"
] | null | null | null |
# https://leetcode.com/problems/word-break-ii/
from typing import List
s = Solution()
print(s.wordBreak_dfs('catsanddog', ["cat", "cats", "and", "sand", "dog"]))
print(s.wordBreak_dfs('pineapplepenapple', [
"apple", "pen", "applepen", "pine", "pineapple"]))
# text = "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaabaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
# words = ["a", "aa", "aaa", "aaaa", "aaaaa", "aaaaaa",
# "aaaaaaa", "aaaaaaaa", "aaaaaaaaa", "aaaaaaaaaa"]
# print(s.wordBreak(text, words))
| 29.142857 | 162 | 0.483512 |
be260edf2b0780a31f443fdc8e024043c1398df0
| 30,595 |
py
|
Python
|
neutron/tests/unit/db/test_migration.py
|
banhr/neutron
|
4b3e73648327ce9f4d3437986a8663372f577f1b
|
[
"Apache-2.0"
] | 1 |
2018-07-04T07:59:31.000Z
|
2018-07-04T07:59:31.000Z
|
neutron/tests/unit/db/test_migration.py
|
weiqiLee/neutron
|
ddc72ebd41a0e7804b33a21583d3add008191229
|
[
"Apache-2.0"
] | null | null | null |
neutron/tests/unit/db/test_migration.py
|
weiqiLee/neutron
|
ddc72ebd41a0e7804b33a21583d3add008191229
|
[
"Apache-2.0"
] | 1 |
2018-08-28T17:13:16.000Z
|
2018-08-28T17:13:16.000Z
|
# Copyright 2012 New Dream Network, LLC (DreamHost)
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import os
import re
import sys
import textwrap
from alembic.autogenerate import api as alembic_ag_api
from alembic import config as alembic_config
from alembic.operations import ops as alembic_ops
from alembic import script as alembic_script
import fixtures
import mock
from neutron_lib.utils import helpers
from oslo_utils import fileutils
import pkg_resources
import sqlalchemy as sa
from testtools import matchers
from neutron.conf.db import migration_cli
from neutron.db import migration
from neutron.db.migration import autogen
from neutron.db.migration import cli
from neutron.tests import base
from neutron.tests import tools
from neutron.tests.unit import testlib_api
| 40.469577 | 79 | 0.610557 |
be26276b9a7545ff4607b3e77287b80155ccbf7d
| 959 |
py
|
Python
|
withdrawal/floor_ceiling.py
|
hoostus/prime-harvesting
|
6606b94ea7859fbf217dbea4ace856e3fa4d154e
|
[
"BlueOak-1.0.0",
"Apache-2.0"
] | 23 |
2016-09-07T06:13:37.000Z
|
2022-02-17T23:49:03.000Z
|
withdrawal/floor_ceiling.py
|
hoostus/prime-harvesting
|
6606b94ea7859fbf217dbea4ace856e3fa4d154e
|
[
"BlueOak-1.0.0",
"Apache-2.0"
] | null | null | null |
withdrawal/floor_ceiling.py
|
hoostus/prime-harvesting
|
6606b94ea7859fbf217dbea4ace856e3fa4d154e
|
[
"BlueOak-1.0.0",
"Apache-2.0"
] | 12 |
2016-06-30T17:27:39.000Z
|
2021-12-12T07:54:27.000Z
|
from decimal import Decimal
from .abc import WithdrawalStrategy
# Bengen's Floor-to-Ceiling, as described in McClung's Living Off Your Money
| 30.935484 | 91 | 0.693431 |
be2647506be1ffc3fcefa8eacc15a737776b73ab
| 8,288 |
py
|
Python
|
20190426/6_BME280_WiFi/bme280.py
|
rcolistete/MicroPython_MiniCurso_ProjOrientado
|
c82affe833587141c4c05ee08ea84b095bfe845f
|
[
"MIT"
] | null | null | null |
20190426/6_BME280_WiFi/bme280.py
|
rcolistete/MicroPython_MiniCurso_ProjOrientado
|
c82affe833587141c4c05ee08ea84b095bfe845f
|
[
"MIT"
] | null | null | null |
20190426/6_BME280_WiFi/bme280.py
|
rcolistete/MicroPython_MiniCurso_ProjOrientado
|
c82affe833587141c4c05ee08ea84b095bfe845f
|
[
"MIT"
] | null | null | null |
"""
MicroPython driver for Bosh BME280 temperature, pressure and humidity I2C sensor:
https://www.bosch-sensortec.com/bst/products/all_products/bme280
Authors: Nelio Goncalves Godoi, Roberto Colistete Jr
Version: 3.1.2 @ 2018/04
License: MIT License (https://opensource.org/licenses/MIT)
"""
import time
from ustruct import unpack, unpack_from
from array import array
# BME280 default address
BME280_I2CADDR = 0x76
# BME280_I2CADDR = 0x77
OSAMPLE_0 = 0
OSAMPLE_1 = 1
OSAMPLE_2 = 2
OSAMPLE_4 = 3
OSAMPLE_8 = 4
OSAMPLE_16 = 5
BME280_REGISTER_STATUS = 0xF3
BME280_REGISTER_CONTROL_HUM = 0xF2
BME280_REGISTER_CONTROL = 0xF4
BME280_REGISTER_CONTROL_IIR = 0xF5
FILTER_OFF = 0
FILTER_2 = 1
FILTER_4 = 2
FILTER_8 = 3
FILTER_16 = 4
CELSIUS = 'C'
FAHRENHEIT = 'F'
KELVIN = 'K'
| 33.554656 | 81 | 0.558518 |
be2674ce54565aac0c872fd9c167bb04e3da2fda
| 9,749 |
py
|
Python
|
airflow/contrib/secrets/hashicorp_vault.py
|
colpal/airfloss
|
1857cf309b69d4c2d60e9bb67f731eb01d0ecda1
|
[
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | null | null | null |
airflow/contrib/secrets/hashicorp_vault.py
|
colpal/airfloss
|
1857cf309b69d4c2d60e9bb67f731eb01d0ecda1
|
[
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 7 |
2020-10-05T18:20:16.000Z
|
2022-02-01T00:54:35.000Z
|
airflow/contrib/secrets/hashicorp_vault.py
|
colpal/airfloss
|
1857cf309b69d4c2d60e9bb67f731eb01d0ecda1
|
[
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 1 |
2020-10-21T03:22:43.000Z
|
2020-10-21T03:22:43.000Z
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Objects relating to sourcing connections & variables from Hashicorp Vault
"""
from typing import Optional
import hvac
from cached_property import cached_property
from hvac.exceptions import InvalidPath, VaultError
from airflow.exceptions import AirflowException
from airflow.secrets import BaseSecretsBackend
from airflow.utils.log.logging_mixin import LoggingMixin
| 40.452282 | 102 | 0.647656 |
be27d0cf506bd514ef2b8fd412eba196789b1b66
| 6,347 |
py
|
Python
|
Trajectory_Mining/Bag_of_Words/Comp_Corr_KD_CosDist/comp_dist_partialKD.py
|
AdamCoscia/eve-trajectory-mining
|
134f142a5665f66fbf92aada8dd6252fab64ddff
|
[
"MIT"
] | null | null | null |
Trajectory_Mining/Bag_of_Words/Comp_Corr_KD_CosDist/comp_dist_partialKD.py
|
AdamCoscia/eve-trajectory-mining
|
134f142a5665f66fbf92aada8dd6252fab64ddff
|
[
"MIT"
] | null | null | null |
Trajectory_Mining/Bag_of_Words/Comp_Corr_KD_CosDist/comp_dist_partialKD.py
|
AdamCoscia/eve-trajectory-mining
|
134f142a5665f66fbf92aada8dd6252fab64ddff
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""Computes distance between killmails by text similarity.
Edit Distance Metrics
- Levenshtein Distance
- Damerau-Levenshtein Distance
- Jaro Distance
- Jaro-Winkler Distance
- Match Rating Approach Comparison
- Hamming Distance
Vector Distance Metrics
- Jaccard Similarity
- Cosine Distance
Written By: Adam Coscia
Updated On: 11/09/2019
"""
# Start timing
import time
start = time.time()
total = 0
def lap(msg):
"""Records time elapsed."""
global start, total
elapsed = (time.time() - start) - total
total = time.time() - start
if elapsed > 3600:
print(f'(+{elapsed/3600:.2f}h|t:{total/3600:.2f}h) {msg}')
elif elapsed > 60:
if total > 3600:
print(f'(+{elapsed/60:.2f}m|t:{total/3600:.2f}h) {msg}')
else:
print(f'(+{elapsed/60:.2f}m|t:{total/60:.2f}m) {msg}')
else:
if total > 3600:
print(f'(+{elapsed:.3f}s|t:{total/3600:.2f}h) {msg}')
elif total > 60:
print(f'(+{elapsed:.3f}s|t:{total/60:.2f}m) {msg}')
else:
print(f'(+{elapsed:.3f}s|t:{total:.3f}s) {msg}')
lap("Importing modules...")
from ast import literal_eval
from functools import reduce
import os
import sys
import numpy as np
import pandas as pd
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics.pairwise import linear_kernel
def get_long_text_cosine_distance(los1, los2):
"""Calculates cosine distance between two killmails' item lists.
1. Converts collection of long text items to raw document representation.
2. Converts the collection of raw documents to a matrix of TF-IDF features
using TfidfVectorizer (combines vector counting and TF-IDF calculator).
3. Computes cosine similarity between feature vectors. Uses linear kernel
since TF-IDF matrix will be normalized already.
Arguments:
los1: First document, a list of raw strings.
los2: Second document, a list of raw strings.
Returns:
cosine distance as a value between 0-1, with 1 being identical.
"""
if type(los1) == float or type(los2) == float:
return 0
if len(los1) == 0 or len(los2) == 0:
return 0
doc1 = reduce(lambda x, y: f'{x} {y}', [x[0] for x in los1]) # Create bag of words
doc2 = reduce(lambda x, y: f'{x} {y}', [x[0] for x in los2]) # Create bag of words
tfidf = TfidfVectorizer().fit_transform([doc1, doc2]) # Vectorize the bag of words
cos_dist = linear_kernel(tfidf[0:1], tfidf[1:2]).flatten()[0] # Compute cosine distance
return cos_dist
def get_short_text_cosine_distance(los1, los2):
"""Calculates cosine distance between two killmails' item lists.
1. Converts collection of short text items to raw document representation.
2. Converts the collection of raw documents to a matrix of TF-IDF features
using TfidfVectorizer (combines vector counting and TF-IDF calculator).
3. Computes cosine similarity between feature vectors. Uses linear kernel
since TF-IDF matrix will be normalized already.
Arguments:
los1: First document, a list of raw strings.
los2: Second document, a list of raw strings.
Returns:
cosine distance as a value between 0-1, with 1 being identical and 0
being complete different.
"""
if type(los1) == float or type(los2) == float:
return 0
if len(los1) == 0 or len(los2) == 0:
return 0
doc1 = reduce(lambda x, y: f'{x} {y}', [x[1] for x in los1]) # Create bag of words
doc2 = reduce(lambda x, y: f'{x} {y}', [x[1] for x in los2]) # Create bag of words
tfidf = TfidfVectorizer().fit_transform([doc1, doc2]) # Vectorize the bag of words
cos_dist = linear_kernel(tfidf[0:1], tfidf[1:2]).flatten()[0] # Compute cosine distance
return cos_dist
# Load CSV from local file
lap("Loading CSV data from local file...")
df = pd.read_csv(f'data/all_victims_complete_partialKD.csv', encoding='utf-8')
df = df.drop(columns=['HighSlotISK', 'MidSlotISK', 'LowSlotISK', 'type', 'fill'])
df = df.dropna()
# Convert items column to correct data type
lap("Converting 'item' column value types...")
df['items'] = df['items'].apply(literal_eval)
# Group DataFrame by character_id and compute distance series for each group
lap("Computing cosine distances and change in kd by grouping character_id's...")
groupby = df.groupby('character_id') # group dataframe by character_id
num_groups = len(groupby) # get number of groups
count = 0 # current group number out of number of groups
groups = [] # list to append modified group dataframes to
for name, gp in groupby:
# Order the observations and prepare the dataframe
gp = (gp.sort_values(by=['killmail_id'])
.reset_index()
.drop('index', axis=1))
# Generate change in kills over change in deaths and change in kd ratio
kills1 = gp['k_count']
kills2 = gp['k_count'].shift()
deaths1 = gp['d_count']
deaths2 = gp['d_count'].shift()
idx = len(gp.columns)
gp.insert(idx, 'del_kdratio', (kills2 - kills1) / (deaths2 - deaths1))
gp.insert(idx+1, 'kd_ratio_diff', gp['kd_ratio']-gp['kd_ratio'].shift())
# Generate pairs of observations sequentially to compare
pairs = []
items1 = gp['items']
items2 = gp['items'].shift()
for i in range(1, len(gp)): # Start from 1 to avoid adding nan pair
los1 = items1.iloc[i]
los2 = items2.iloc[i]
pairs.append((los2, los1))
# Generate distance series using pairs list and different metrics
# start distance series with nan due to starting range at 1
cos_dist_lt = [np.nan] # cosine distance b/w long text BoW
cos_dist_st = [np.nan] # cosine distance b/w short text BoW
for pair in pairs:
cos_dist_lt.append(get_long_text_cosine_distance(pair[0], pair[1]))
cos_dist_st.append(get_short_text_cosine_distance(pair[0], pair[1]))
idx = len(gp.columns)
gp.insert(idx, 'cos_dist_lt', cos_dist_lt)
gp.insert(idx, 'cos_dist_st', cos_dist_st)
groups.append(gp)
# Record progress
count += 1
print(f"Progress {count/num_groups:2.1%}", end="\r")
lap("Concatenating resulting groups and writing to file...")
df_res = pd.concat(groups)
df_res.to_csv(f'data/useable_victims_distancesAndKD.csv')
lap("Exit")
| 37.556213 | 92 | 0.669293 |
be28146fdfcf8ed2a16239294869650841f46a74
| 1,181 |
py
|
Python
|
src/chess/utils.py
|
Dalkio/custom-alphazero
|
e24ee8c646a37bf9509b99ca6c96d3f6e69ee4db
|
[
"MIT"
] | null | null | null |
src/chess/utils.py
|
Dalkio/custom-alphazero
|
e24ee8c646a37bf9509b99ca6c96d3f6e69ee4db
|
[
"MIT"
] | 6 |
2020-08-13T13:02:58.000Z
|
2022-02-10T02:21:49.000Z
|
src/chess/utils.py
|
Dalkio/custom-alphazero
|
e24ee8c646a37bf9509b99ca6c96d3f6e69ee4db
|
[
"MIT"
] | null | null | null |
import numpy as np
from itertools import product
from typing import List
from src.config import ConfigChess
from src.chess.board import Board
from src.chess.move import Move
| 36.90625 | 99 | 0.686706 |
be2868ed0261dc37f256c2a99990b52d127544a4
| 1,845 |
py
|
Python
|
multirotor.py
|
christymarc/mfac
|
29449a0c79e618059fa6f67ae7ab76711543c513
|
[
"MIT"
] | null | null | null |
multirotor.py
|
christymarc/mfac
|
29449a0c79e618059fa6f67ae7ab76711543c513
|
[
"MIT"
] | null | null | null |
multirotor.py
|
christymarc/mfac
|
29449a0c79e618059fa6f67ae7ab76711543c513
|
[
"MIT"
] | 1 |
2022-03-01T05:00:02.000Z
|
2022-03-01T05:00:02.000Z
|
from random import gauss
| 27.132353 | 86 | 0.566938 |
be286e006cd7ef8775677a3d599b4cc9bc55f723
| 6,329 |
py
|
Python
|
stpmex/client.py
|
cuenca-mx/stpmex-python
|
93f630cd05cea927b32f5aeb5f9b958c4ee91af9
|
[
"MIT"
] | 37 |
2019-01-06T02:52:38.000Z
|
2022-03-17T21:19:48.000Z
|
stpmex/client.py
|
cuenca-mx/stpmex-python
|
93f630cd05cea927b32f5aeb5f9b958c4ee91af9
|
[
"MIT"
] | 204 |
2018-09-05T22:55:33.000Z
|
2022-03-31T23:21:13.000Z
|
stpmex/client.py
|
cuenca-mx/stpmex-python
|
93f630cd05cea927b32f5aeb5f9b958c4ee91af9
|
[
"MIT"
] | 20 |
2018-09-17T15:29:51.000Z
|
2022-02-03T06:29:32.000Z
|
import re
from typing import Any, ClassVar, Dict, List, NoReturn, Union
from cryptography.exceptions import UnsupportedAlgorithm
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import serialization
from requests import Response, Session
from .exc import (
AccountDoesNotExist,
BankCodeClabeMismatch,
ClaveRastreoAlreadyInUse,
DuplicatedAccount,
InvalidAccountType,
InvalidAmount,
InvalidField,
InvalidInstitution,
InvalidPassphrase,
InvalidRfcOrCurp,
InvalidTrackingKey,
MandatoryField,
NoOrdenesEncontradas,
NoServiceResponse,
PldRejected,
SameAccount,
SignatureValidationError,
StpmexException,
)
from .resources import CuentaFisica, Orden, Resource, Saldo
from .version import __version__ as client_version
DEMO_HOST = 'https://demo.stpmex.com:7024'
PROD_HOST = 'https://prod.stpmex.com'
| 34.026882 | 78 | 0.618739 |
be288cac85f4b858cc1c87f0fce298bec6844670
| 4,770 |
py
|
Python
|
aql/tests/types/aql_test_list_types.py
|
menify/sandbox
|
32166c71044f0d5b414335b2b6559adc571f568c
|
[
"MIT"
] | null | null | null |
aql/tests/types/aql_test_list_types.py
|
menify/sandbox
|
32166c71044f0d5b414335b2b6559adc571f568c
|
[
"MIT"
] | null | null | null |
aql/tests/types/aql_test_list_types.py
|
menify/sandbox
|
32166c71044f0d5b414335b2b6559adc571f568c
|
[
"MIT"
] | null | null | null |
import sys
import os.path
import timeit
sys.path.insert( 0, os.path.normpath(os.path.join( os.path.dirname( __file__ ), '..') ))
from aql_tests import skip, AqlTestCase, runLocalTests
from aql.util_types import UniqueList, SplitListType, List, ValueListType
#//===========================================================================//
#//===========================================================================//
if __name__ == "__main__":
runLocalTests()
| 27.413793 | 88 | 0.469182 |
be2a32ef4dd37c381a36c7a58f2812962caeb4d5
| 502 |
py
|
Python
|
logger_application/logger.py
|
swatishayna/OnlineEDAAutomation
|
a1bfe8b1dee51a4872529a98f6e1136922329e3e
|
[
"MIT"
] | 1 |
2022-03-24T20:26:44.000Z
|
2022-03-24T20:26:44.000Z
|
logger_application/logger.py
|
surajaiswal13/OnlineEDAAutomation
|
a1bfe8b1dee51a4872529a98f6e1136922329e3e
|
[
"MIT"
] | null | null | null |
logger_application/logger.py
|
surajaiswal13/OnlineEDAAutomation
|
a1bfe8b1dee51a4872529a98f6e1136922329e3e
|
[
"MIT"
] | 2 |
2022-02-08T16:35:32.000Z
|
2022-03-04T06:56:54.000Z
|
from datetime import datetime
from src.utils import uploaded_file
import os
| 27.888889 | 143 | 0.621514 |
be2a7a241325332e4117c63de7ba8c5d1c491871
| 332 |
py
|
Python
|
metasync/params.py
|
dstarikov/metavault
|
1933cc6cd828ee9c594a45a78238a9a319de0143
|
[
"MIT"
] | 1 |
2019-05-28T15:59:35.000Z
|
2019-05-28T15:59:35.000Z
|
metasync/params.py
|
dstarikov/metavault
|
1933cc6cd828ee9c594a45a78238a9a319de0143
|
[
"MIT"
] | null | null | null |
metasync/params.py
|
dstarikov/metavault
|
1933cc6cd828ee9c594a45a78238a9a319de0143
|
[
"MIT"
] | null | null | null |
# config params
KB = 1024
MB = 1024*KB
GB = 1024*MB
# name of meta root dir
META_DIR = ".metasync"
# batching time for daemon
SYNC_WAIT = 3
# blob size
BLOB_UNIT = 32*MB
# Increase of Paxos proposal number
PAXOS_PNUM_INC = 10
# authentication directory
import os
AUTH_DIR = os.path.join(os.path.expanduser("~"), ".metasync")
| 15.090909 | 61 | 0.713855 |
be2be4ab8f891e1d119f0c6cbe7bc4c566727644
| 547 |
py
|
Python
|
py/tests/test_valid_parentheses.py
|
Dragonway/LeetCode
|
53ed9e9bcc1ed6955b013e0d37d2a684c2ec7135
|
[
"MIT"
] | null | null | null |
py/tests/test_valid_parentheses.py
|
Dragonway/LeetCode
|
53ed9e9bcc1ed6955b013e0d37d2a684c2ec7135
|
[
"MIT"
] | null | null | null |
py/tests/test_valid_parentheses.py
|
Dragonway/LeetCode
|
53ed9e9bcc1ed6955b013e0d37d2a684c2ec7135
|
[
"MIT"
] | null | null | null |
import unittest
from py.tests.utils import test
from py import valid_parentheses as vp
| 30.388889 | 46 | 0.521024 |
be2c413f1972d5571cb52206e64c8dffe9762a99
| 2,503 |
py
|
Python
|
hitnet/hitnet.py
|
AchintyaSrivastava/HITNET-Stereo-Depth-estimation
|
90654dafc8c8bdf5c17079d3cb8bf7ad6d3da166
|
[
"MIT"
] | 38 |
2021-09-05T13:59:11.000Z
|
2022-03-28T14:18:30.000Z
|
hitnet/hitnet.py
|
AchintyaSrivastava/HITNET-Stereo-Depth-estimation
|
90654dafc8c8bdf5c17079d3cb8bf7ad6d3da166
|
[
"MIT"
] | 3 |
2021-11-25T08:21:01.000Z
|
2022-03-07T08:22:11.000Z
|
hitnet/hitnet.py
|
AchintyaSrivastava/HITNET-Stereo-Depth-estimation
|
90654dafc8c8bdf5c17079d3cb8bf7ad6d3da166
|
[
"MIT"
] | 5 |
2021-09-05T23:15:10.000Z
|
2022-02-10T08:32:00.000Z
|
import tensorflow as tf
import numpy as np
import time
import cv2
from hitnet.utils_hitnet import *
drivingStereo_config = CameraConfig(0.546, 1000)
| 25.804124 | 96 | 0.742709 |
be2cf6688bc9f36adc898b8d1394b2bd6f967ed1
| 854 |
py
|
Python
|
fobi_custom/plugins/form_elements/fields/intercept/household_tenure/fobi_form_elements.py
|
datamade/just-spaces
|
cc2b7d1518e5da65a403413d39a309fa3e2ac122
|
[
"MIT"
] | 6 |
2019-04-09T06:52:31.000Z
|
2021-08-31T04:31:59.000Z
|
fobi_custom/plugins/form_elements/fields/intercept/household_tenure/fobi_form_elements.py
|
datamade/just-spaces
|
cc2b7d1518e5da65a403413d39a309fa3e2ac122
|
[
"MIT"
] | 176 |
2019-01-11T21:05:50.000Z
|
2021-03-16T17:04:13.000Z
|
fobi_custom/plugins/form_elements/fields/intercept/household_tenure/fobi_form_elements.py
|
datamade/just-spaces
|
cc2b7d1518e5da65a403413d39a309fa3e2ac122
|
[
"MIT"
] | 1 |
2019-05-10T15:30:25.000Z
|
2019-05-10T15:30:25.000Z
|
from django import forms
from fobi.base import FormFieldPlugin, form_element_plugin_registry
from .forms import HouseholdTenureForm
form_element_plugin_registry.register(HouseholdTenurePlugin)
| 29.448276 | 70 | 0.686183 |
076c3b7d76dce4361980237fd24f6e7d24b9f302
| 368 |
py
|
Python
|
utils/scripts/OOOlevelGen/src/sprites/__init__.py
|
fullscreennl/monkeyswipe
|
c56192e202674dd5ab18023f6cf14cf51e95fbd0
|
[
"MIT"
] | null | null | null |
utils/scripts/OOOlevelGen/src/sprites/__init__.py
|
fullscreennl/monkeyswipe
|
c56192e202674dd5ab18023f6cf14cf51e95fbd0
|
[
"MIT"
] | null | null | null |
utils/scripts/OOOlevelGen/src/sprites/__init__.py
|
fullscreennl/monkeyswipe
|
c56192e202674dd5ab18023f6cf14cf51e95fbd0
|
[
"MIT"
] | null | null | null |
__all__ = ['EnemyBucketWithStar',
'Nut',
'Beam',
'Enemy',
'Friend',
'Hero',
'Launcher',
'Rotor',
'SpikeyBuddy',
'Star',
'Wizard',
'EnemyEquipedRotor',
'CyclingEnemyObject',
'Joints',
'Bomb',
'Contacts']
| 21.647059 | 33 | 0.366848 |
076ca6ec3c064417c645687635c5d40cf01c07b7
| 29,159 |
py
|
Python
|
code/trainer.py
|
mazzaAnt/StackGAN-v2
|
dcf696f34bc8e360179eec9e7f2e9e66eec8b9a0
|
[
"MIT"
] | 1 |
2019-02-04T20:45:51.000Z
|
2019-02-04T20:45:51.000Z
|
code/trainer.py
|
mazzaAnt/StackGAN-v2
|
dcf696f34bc8e360179eec9e7f2e9e66eec8b9a0
|
[
"MIT"
] | null | null | null |
code/trainer.py
|
mazzaAnt/StackGAN-v2
|
dcf696f34bc8e360179eec9e7f2e9e66eec8b9a0
|
[
"MIT"
] | null | null | null |
from __future__ import print_function
from six.moves import range
import torchvision.transforms as transforms
import torch.backends.cudnn as cudnn
import torch
import torch.nn as nn
from torch.autograd import Variable
import torch.optim as optim
import torchvision.utils as vutils
import numpy as np
import os
import time
from PIL import Image, ImageFont, ImageDraw
from copy import deepcopy
from miscc.config import cfg
from miscc.utils import mkdir_p
from CaptionDatasets import *
from tensorboard import summary
from tensorboard import FileWriter
from model import G_NET, D_NET64, D_NET128, D_NET256, D_NET512, D_NET1024, INCEPTION_V3
# ################## Shared functions ###################
# ################# Text to image task############################ #
| 41.360284 | 116 | 0.540245 |
076cc2a993643184f8804f5d69cb1769c80c9cee
| 5,654 |
py
|
Python
|
spletni_vmesnik.py
|
LeaHolc/recepcija
|
bff9f804e795e45c2da214432042c0ae067783b0
|
[
"MIT"
] | 1 |
2021-11-11T08:20:13.000Z
|
2021-11-11T08:20:13.000Z
|
spletni_vmesnik.py
|
LeaHolc/recepcija
|
bff9f804e795e45c2da214432042c0ae067783b0
|
[
"MIT"
] | null | null | null |
spletni_vmesnik.py
|
LeaHolc/recepcija
|
bff9f804e795e45c2da214432042c0ae067783b0
|
[
"MIT"
] | null | null | null |
from bottle import TEMPLATE_PATH, route, run, template, redirect, get, post, request, response, auth_basic, Bottle, abort, error, static_file
import bottle
import controller
from controller import dobi_parcele_za_prikaz, dobi_info_parcele, dodaj_gosta_na_rezervacijo, naredi_rezervacijo, dobi_rezervacijo_po_id, zakljuci_na_datum_in_placaj, dobi_postavke_racuna
import datetime as dt
bottle.run(reloader=True, debug=True)
| 37.693333 | 196 | 0.703926 |
076da057376eccf60a978162dbf694687eba8ff6
| 1,233 |
py
|
Python
|
espnet/nets/pytorch_backend/transducer/initializer.py
|
magictron/espnet
|
075cee8d586957241be3e54c47846fbb12a32310
|
[
"Apache-2.0"
] | 2 |
2020-06-21T11:15:10.000Z
|
2021-12-03T08:08:45.000Z
|
espnet/nets/pytorch_backend/transducer/initializer.py
|
magictron/espnet
|
075cee8d586957241be3e54c47846fbb12a32310
|
[
"Apache-2.0"
] | 1 |
2021-03-05T10:43:49.000Z
|
2021-03-05T10:43:49.000Z
|
espnet/nets/pytorch_backend/transducer/initializer.py
|
magictron/espnet
|
075cee8d586957241be3e54c47846fbb12a32310
|
[
"Apache-2.0"
] | 2 |
2021-03-30T06:02:08.000Z
|
2021-08-06T06:59:22.000Z
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Parameter initialization for transducer RNN/Transformer parts."""
import six
from espnet.nets.pytorch_backend.initialization import lecun_normal_init_parameters
from espnet.nets.pytorch_backend.initialization import set_forget_bias_to_one
from espnet.nets.pytorch_backend.transformer.initializer import initialize
def initializer(model, args):
"""Initialize transducer model.
Args:
model (torch.nn.Module): transducer instance
args (Namespace): argument Namespace containing options
"""
if args.dtype != "transformer":
if args.etype == "transformer":
initialize(model.encoder, args.transformer_init)
lecun_normal_init_parameters(model.dec)
else:
lecun_normal_init_parameters(model)
model.dec.embed.weight.data.normal_(0, 1)
for l in six.moves.range(len(model.dec.decoder)):
set_forget_bias_to_one(model.dec.decoder[l].bias_ih)
else:
if args.etype == "transformer":
initialize(model, args.transformer_init)
else:
lecun_normal_init_parameters(model.encoder)
initialize(model.decoder, args.transformer_init)
| 31.615385 | 83 | 0.697486 |
076e350bd997dc6e64e333caef566c1b62991f65
| 970 |
py
|
Python
|
evaluate.py
|
adelmassimo/EM-Algorithm-for-MMPP
|
23ae031076a464bfba5286cf6b5a1fa5e1cc66b1
|
[
"MIT"
] | null | null | null |
evaluate.py
|
adelmassimo/EM-Algorithm-for-MMPP
|
23ae031076a464bfba5286cf6b5a1fa5e1cc66b1
|
[
"MIT"
] | null | null | null |
evaluate.py
|
adelmassimo/EM-Algorithm-for-MMPP
|
23ae031076a464bfba5286cf6b5a1fa5e1cc66b1
|
[
"MIT"
] | null | null | null |
import model
import numpy as np
import datasetReader as df
import main
# Number of traces loaded T
T = 1
# Generate traces
traces_factory = df.DatasetFactory()
traces_factory.createDataset(T)
traces = traces_factory.traces
P0 = np.matrix("[ .02 0;"
"0 0 0.5;"
"0 0 0]")
P1 = np.matrix("[0.1 0 0;"
"0 0.5 0;"
"0 0 0.9]")
M = np.matrix("[0.25 0 0;"
"0 0.23 0;"
"0 0 0.85]")
| 23.095238 | 74 | 0.541237 |
076ea8e320bea4958c4967806ffb3361e0b72568
| 2,400 |
py
|
Python
|
Imaging/Core/Testing/Python/TestHSVToRGB.py
|
forestGzh/VTK
|
bc98327275bd5cfa95c5825f80a2755a458b6da8
|
[
"BSD-3-Clause"
] | 1,755 |
2015-01-03T06:55:00.000Z
|
2022-03-29T05:23:26.000Z
|
Imaging/Core/Testing/Python/TestHSVToRGB.py
|
forestGzh/VTK
|
bc98327275bd5cfa95c5825f80a2755a458b6da8
|
[
"BSD-3-Clause"
] | 29 |
2015-04-23T20:58:30.000Z
|
2022-03-02T16:16:42.000Z
|
Imaging/Core/Testing/Python/TestHSVToRGB.py
|
forestGzh/VTK
|
bc98327275bd5cfa95c5825f80a2755a458b6da8
|
[
"BSD-3-Clause"
] | 1,044 |
2015-01-05T22:48:27.000Z
|
2022-03-31T02:38:26.000Z
|
#!/usr/bin/env python
import vtk
from vtk.util.misc import vtkGetDataRoot
VTK_DATA_ROOT = vtkGetDataRoot()
# Use the painter to draw using colors.
# This is not a pipeline object. It will support pipeline objects.
# Please do not use this object directly.
imageCanvas = vtk.vtkImageCanvasSource2D()
imageCanvas.SetNumberOfScalarComponents(3)
imageCanvas.SetScalarTypeToUnsignedChar()
imageCanvas.SetExtent(0,320,0,320,0,0)
imageCanvas.SetDrawColor(0,0,0)
imageCanvas.FillBox(0,511,0,511)
# r, g, b
imageCanvas.SetDrawColor(255,0,0)
imageCanvas.FillBox(0,50,0,100)
imageCanvas.SetDrawColor(128,128,0)
imageCanvas.FillBox(50,100,0,100)
imageCanvas.SetDrawColor(0,255,0)
imageCanvas.FillBox(100,150,0,100)
imageCanvas.SetDrawColor(0,128,128)
imageCanvas.FillBox(150,200,0,100)
imageCanvas.SetDrawColor(0,0,255)
imageCanvas.FillBox(200,250,0,100)
imageCanvas.SetDrawColor(128,0,128)
imageCanvas.FillBox(250,300,0,100)
# intensity scale
imageCanvas.SetDrawColor(5,5,5)
imageCanvas.FillBox(0,50,110,210)
imageCanvas.SetDrawColor(55,55,55)
imageCanvas.FillBox(50,100,110,210)
imageCanvas.SetDrawColor(105,105,105)
imageCanvas.FillBox(100,150,110,210)
imageCanvas.SetDrawColor(155,155,155)
imageCanvas.FillBox(150,200,110,210)
imageCanvas.SetDrawColor(205,205,205)
imageCanvas.FillBox(200,250,110,210)
imageCanvas.SetDrawColor(255,255,255)
imageCanvas.FillBox(250,300,110,210)
# saturation scale
imageCanvas.SetDrawColor(245,0,0)
imageCanvas.FillBox(0,50,220,320)
imageCanvas.SetDrawColor(213,16,16)
imageCanvas.FillBox(50,100,220,320)
imageCanvas.SetDrawColor(181,32,32)
imageCanvas.FillBox(100,150,220,320)
imageCanvas.SetDrawColor(149,48,48)
imageCanvas.FillBox(150,200,220,320)
imageCanvas.SetDrawColor(117,64,64)
imageCanvas.FillBox(200,250,220,320)
imageCanvas.SetDrawColor(85,80,80)
imageCanvas.FillBox(250,300,220,320)
convert = vtk.vtkImageRGBToHSV()
convert.SetInputConnection(imageCanvas.GetOutputPort())
convertBack = vtk.vtkImageHSVToRGB()
convertBack.SetInputConnection(convert.GetOutputPort())
cast = vtk.vtkImageCast()
cast.SetInputConnection(convertBack.GetOutputPort())
cast.SetOutputScalarTypeToFloat()
cast.ReleaseDataFlagOff()
viewer = vtk.vtkImageViewer()
viewer.SetInputConnection(convertBack.GetOutputPort())
#viewer SetInputConnection [imageCanvas GetOutputPort]
viewer.SetColorWindow(256)
viewer.SetColorLevel(127.5)
viewer.SetSize(320,320)
viewer.Render()
# --- end of script --
| 34.285714 | 67 | 0.814583 |
076eec8de4f676b9d586492c7ab7750df189a96a
| 296 |
py
|
Python
|
kelas_2b/echa.py
|
barizraihan/belajarpython
|
57df4c939600dd34a519599d6c78178bfb55063b
|
[
"MIT"
] | null | null | null |
kelas_2b/echa.py
|
barizraihan/belajarpython
|
57df4c939600dd34a519599d6c78178bfb55063b
|
[
"MIT"
] | null | null | null |
kelas_2b/echa.py
|
barizraihan/belajarpython
|
57df4c939600dd34a519599d6c78178bfb55063b
|
[
"MIT"
] | null | null | null |
import csv
| 32.888889 | 93 | 0.567568 |
076f84eca9f11a3725b25d5cf7a8fa60fb6dd720
| 3,399 |
py
|
Python
|
tests/test_handler_surface_distance.py
|
dyollb/MONAI
|
9084c452c48095c82c71d4391b3684006e5a3c56
|
[
"Apache-2.0"
] | 2,971 |
2019-10-16T23:53:16.000Z
|
2022-03-31T20:58:24.000Z
|
tests/test_handler_surface_distance.py
|
dyollb/MONAI
|
9084c452c48095c82c71d4391b3684006e5a3c56
|
[
"Apache-2.0"
] | 2,851 |
2020-01-10T16:23:44.000Z
|
2022-03-31T22:14:53.000Z
|
tests/test_handler_surface_distance.py
|
dyollb/MONAI
|
9084c452c48095c82c71d4391b3684006e5a3c56
|
[
"Apache-2.0"
] | 614 |
2020-01-14T19:18:01.000Z
|
2022-03-31T14:06:14.000Z
|
# Copyright 2020 - 2021 MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from typing import Tuple
import numpy as np
import torch
from ignite.engine import Engine
from monai.handlers import SurfaceDistance
def create_spherical_seg_3d(
radius: float = 20.0, centre: Tuple[int, int, int] = (49, 49, 49), im_shape: Tuple[int, int, int] = (99, 99, 99)
) -> np.ndarray:
"""
Return a 3D image with a sphere inside. Voxel values will be
1 inside the sphere, and 0 elsewhere.
Args:
radius: radius of sphere (in terms of number of voxels, can be partial)
centre: location of sphere centre.
im_shape: shape of image to create
See also:
:py:meth:`~create_test_image_3d`
"""
# Create image
image = np.zeros(im_shape, dtype=np.int32)
spy, spx, spz = np.ogrid[
-centre[0] : im_shape[0] - centre[0], -centre[1] : im_shape[1] - centre[1], -centre[2] : im_shape[2] - centre[2]
]
circle = (spx * spx + spy * spy + spz * spz) <= radius * radius
image[circle] = 1
image[~circle] = 0
return image
sampler_sphere = torch.Tensor(create_spherical_seg_3d(radius=20, centre=(20, 20, 20))).unsqueeze(0).unsqueeze(0)
# test input a list of channel-first tensor
sampler_sphere_gt = [torch.Tensor(create_spherical_seg_3d(radius=20, centre=(10, 20, 20))).unsqueeze(0)]
sampler_sphere_zeros = torch.zeros_like(sampler_sphere)
TEST_SAMPLE_1 = [sampler_sphere, sampler_sphere_gt]
TEST_SAMPLE_2 = [sampler_sphere_gt, sampler_sphere_gt]
TEST_SAMPLE_3 = [sampler_sphere_zeros, sampler_sphere_gt]
TEST_SAMPLE_4 = [sampler_sphere_zeros, sampler_sphere_zeros]
if __name__ == "__main__":
unittest.main()
| 35.778947 | 120 | 0.692262 |
077018ad315b121efadde62952dbcb47369a343a
| 2,368 |
py
|
Python
|
benchmarks/eval.py
|
rom1mouret/anoflows
|
42381c06b8897e4510e73cda87ea97ea3f4a5579
|
[
"Apache-2.0"
] | null | null | null |
benchmarks/eval.py
|
rom1mouret/anoflows
|
42381c06b8897e4510e73cda87ea97ea3f4a5579
|
[
"Apache-2.0"
] | null | null | null |
benchmarks/eval.py
|
rom1mouret/anoflows
|
42381c06b8897e4510e73cda87ea97ea3f4a5579
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
import sys
import logging
import yaml
import pandas as pd
import numpy as np
from collections import defaultdict
from sklearn.model_selection import train_test_split
from sklearn.ensemble import IsolationForest
from sklearn.impute import SimpleImputer
from anoflows.hpo import find_best_flows
from data_loading import load_data
logging.getLogger().setLevel(logging.INFO)
if len(sys.argv) == 1:
logging.error("YAML data specification missing from the command line arguments")
exit(1)
spec_file = sys.argv[1]
df, spec = load_data(spec_file)
max_rows = min(len(df), spec.get("max_rows", 40000))
novelty_detection = spec.get("novelty", True)
normal_classes = spec["normal_classes"]
precision = defaultdict(list)
for rounds in range(spec.get("rounds", 1)):
# random sampling
df = df.sample(n=max_rows, replace=False)
label_col = spec["label_column"]
y = df[label_col].values
other = df.drop(label_col, inplace=False, axis=1)
X = other.values
# imputing
X = SimpleImputer(copy=False).fit_transform(X)
# train/test split
X_train, X_test, y_train, y_test = \
train_test_split(X, y, shuffle=False, test_size=0.5)
if novelty_detection:
keep = np.where(np.isin(y_train, normal_classes))[0]
X_train = X_train[keep, :]
y_train = y_train[keep]
# training
#flows, loss = find_best_flows(X_train, device='cpu', n_trials=1)
from anoflows.anoflow_bagging import AnoFlowBagging
flows = AnoFlowBagging()
flows.fit(X_train)
iforest = IsolationForest().fit(X_train)
# prediction
pred = {
"anoflows": flows.likelihood(X_test),
"iforest": iforest.decision_function(X_test)
}
# evaluation
y_true = np.where(np.isin(y_test, spec["anomaly_classes"]))[0]
ref = np.zeros(len(y_test))
ref[y_true] = 1
k = len(y_true)
for name, y_pred in pred.items():
anomaly_indices = y_pred.argsort()[:k]
prec = ref[anomaly_indices].sum() / k
logging.info("%s: %.1f%% (%d anomalies / %d rows)" % (name, 100*prec, k, len(y_test)))
precision[name].append(prec)
logging.info("* SUMMARY %s", spec_file)
for name, prec in precision.items():
prec = 100 * np.array(prec)
mean = np.mean(prec)
std = np.std(prec)
logging.info("%s; mean=%.1f%% std=%.1f%%" % (name, mean, std))
| 29.234568 | 94 | 0.678209 |
07702a9eb4e9374ca232b483bdbecbfbdb1917c5
| 840 |
py
|
Python
|
pydantic/version.py
|
jamescurtin/pydantic
|
4f8f9396906a094626b770fb7cc8eecf03770ffe
|
[
"MIT"
] | 1 |
2020-02-25T15:28:47.000Z
|
2020-02-25T15:28:47.000Z
|
pydantic/version.py
|
jamescurtin/pydantic
|
4f8f9396906a094626b770fb7cc8eecf03770ffe
|
[
"MIT"
] | 1 |
2020-01-17T17:12:45.000Z
|
2020-01-17T17:12:45.000Z
|
pydantic/version.py
|
jamescurtin/pydantic
|
4f8f9396906a094626b770fb7cc8eecf03770ffe
|
[
"MIT"
] | 1 |
2020-12-19T18:00:19.000Z
|
2020-12-19T18:00:19.000Z
|
__all__ = ['VERSION', 'version_info']
VERSION = '1.4a1'
| 27.096774 | 101 | 0.589286 |
0770f2a922548842dd4151e55d3fc69c6cf5b84c
| 2,319 |
py
|
Python
|
spire/core/registry.py
|
siq/spire
|
6365590277e9a6bfb6e4e0df5b2b47dba0f71711
|
[
"Linux-OpenIB"
] | null | null | null |
spire/core/registry.py
|
siq/spire
|
6365590277e9a6bfb6e4e0df5b2b47dba0f71711
|
[
"Linux-OpenIB"
] | 1 |
2016-09-15T16:19:27.000Z
|
2016-09-15T16:20:06.000Z
|
spire/core/registry.py
|
siq/spire
|
6365590277e9a6bfb6e4e0df5b2b47dba0f71711
|
[
"Linux-OpenIB"
] | null | null | null |
from scheme import Structure
__all__ = ('Configurable', 'Registry')
| 34.61194 | 93 | 0.581285 |
07710c963c7c958684d4d5e192f36678ee929e23
| 231 |
py
|
Python
|
oslo_devsupport/model/__init__.py
|
berrange/oslo.devsupport
|
463c5842e95c5f8a7009ab1041f290e3a1050a06
|
[
"Apache-1.1"
] | null | null | null |
oslo_devsupport/model/__init__.py
|
berrange/oslo.devsupport
|
463c5842e95c5f8a7009ab1041f290e3a1050a06
|
[
"Apache-1.1"
] | null | null | null |
oslo_devsupport/model/__init__.py
|
berrange/oslo.devsupport
|
463c5842e95c5f8a7009ab1041f290e3a1050a06
|
[
"Apache-1.1"
] | null | null | null |
from .command import *
from .database import *
from .entrypoint import *
from .group import *
from .http import *
from .messaging import *
from .method import *
from .operation import *
from .stack import *
from .threads import *
| 19.25 | 25 | 0.735931 |
0771ae571980aa4669298ae5f48b1ac83a19af96
| 2,953 |
py
|
Python
|
scripts/extract.py
|
nng555/fairseq
|
c9730a125825a85f33042e1b9fd1959b8ca829e5
|
[
"MIT"
] | 2 |
2020-10-05T08:52:01.000Z
|
2021-03-03T15:26:35.000Z
|
scripts/extract.py
|
nng555/fairseq
|
c9730a125825a85f33042e1b9fd1959b8ca829e5
|
[
"MIT"
] | null | null | null |
scripts/extract.py
|
nng555/fairseq
|
c9730a125825a85f33042e1b9fd1959b8ca829e5
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
#
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Extracts random constraints from reference files."""
import argparse
import random
import sys
from sacrebleu import extract_ngrams
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--number", "-n", type=int, default=1, help="number of phrases")
parser.add_argument("--len", "-l", type=int, default=1, help="phrase length")
parser.add_argument(
"--add-sos", default=False, action="store_true", help="add <s> token"
)
parser.add_argument(
"--add-eos", default=False, action="store_true", help="add </s> token"
)
parser.add_argument("--seed", "-s", default=0, type=int)
args = parser.parse_args()
Main(args)
| 31.752688 | 88 | 0.529292 |
0773947b769d5f943efc051b2beaf2ee562da724
| 1,231 |
py
|
Python
|
AppImageBuilder/commands/file.py
|
gouchi/appimage-builder
|
40e9851c573179e066af116fb906e9cad8099b59
|
[
"MIT"
] | null | null | null |
AppImageBuilder/commands/file.py
|
gouchi/appimage-builder
|
40e9851c573179e066af116fb906e9cad8099b59
|
[
"MIT"
] | null | null | null |
AppImageBuilder/commands/file.py
|
gouchi/appimage-builder
|
40e9851c573179e066af116fb906e9cad8099b59
|
[
"MIT"
] | null | null | null |
# Copyright 2020 Alexis Lopez Zubieta
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
import os
from .command import Command
| 31.564103 | 80 | 0.685621 |
0775eae440b3ed8a8de73f26dfbbc57343a6323d
| 6,670 |
py
|
Python
|
text_selection/analyse_zenon_scrape.py
|
dainst/chronoi-corpus-processing
|
7f508a7572e1022c4c88d1477db029e6619a1f0c
|
[
"MIT"
] | null | null | null |
text_selection/analyse_zenon_scrape.py
|
dainst/chronoi-corpus-processing
|
7f508a7572e1022c4c88d1477db029e6619a1f0c
|
[
"MIT"
] | null | null | null |
text_selection/analyse_zenon_scrape.py
|
dainst/chronoi-corpus-processing
|
7f508a7572e1022c4c88d1477db029e6619a1f0c
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import argparse
import csv
import furl
import json
import re
import sys
from collections import defaultdict
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description="Process a file with zenon json records and print some information about them.")
parser.add_argument("scrape_file", type=str, help="The file that contains the zenon dumps as json.")
parser.add_argument("--desc-filters", type=str, help="A file to filter urls by. Excludes urls with 'desc' fields matching a line in the file.")
# these are arguments to print some specific information
parser.add_argument("--print-common-hosts", type=int, default=-1, help="Print hosts that appear more than n times in the records urls, then exit.")
parser.add_argument("--print-host-urls", type=str, help="Print all urls for the host, then exit.")
parser.add_argument("--patterns-cooccur", type=str, help="Format: 'pattern1,pattern2', print how often these occur in single records url fields, then exit.")
# these are meant to work together select by a url pattern then print information about the records
parser.add_argument("--select-by-url", type=str, help="Give a pattern for a url to select records by.")
parser.add_argument("--print-url", action="store_true", help="Print the first of each urls for the selected records. (Ignores other urls present on the records if --select-url is given.)")
parser.add_argument("--print-pub-date", action="store_true", help="Print the earliest publication year for each of the selected records.")
parser.add_argument("--print-id", action="store_true", help="Print the selected records' ids")
parser.add_argument("--print-languages", action="store_true", help="Print the selected records' languages")
main(parser.parse_args())
| 40.670732 | 192 | 0.669715 |
07773d417997f41786d66f2eb9103478a102aad8
| 2,578 |
py
|
Python
|
src/python/twitter/pants/targets/java_antlr_library.py
|
wfarner/commons
|
42988a7a49f012665174538cca53604c7846ee86
|
[
"Apache-2.0"
] | 1 |
2019-12-20T14:13:27.000Z
|
2019-12-20T14:13:27.000Z
|
src/python/twitter/pants/targets/java_antlr_library.py
|
wfarner/commons
|
42988a7a49f012665174538cca53604c7846ee86
|
[
"Apache-2.0"
] | null | null | null |
src/python/twitter/pants/targets/java_antlr_library.py
|
wfarner/commons
|
42988a7a49f012665174538cca53604c7846ee86
|
[
"Apache-2.0"
] | 1 |
2019-12-20T14:13:29.000Z
|
2019-12-20T14:13:29.000Z
|
# ==================================================================================================
# Copyright 2012 Twitter, Inc.
# --------------------------------------------------------------------------------------------------
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this work except in compliance with the License.
# You may obtain a copy of the License in the LICENSE file, or at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==================================================================================================
__author__ = 'Brian Larson'
from twitter.pants.targets.exportable_jvm_library import ExportableJvmLibrary
| 44.448276 | 100 | 0.588053 |
0777dbaeb86425a933c2accd81e0d8dadd226bab
| 3,092 |
py
|
Python
|
bigml/tests/create_pca_steps_bck.py
|
devs-cloud/python_ml
|
05d90f5ce1862a5d2d8ff99d2e46446dc1d5af3c
|
[
"Apache-2.0"
] | null | null | null |
bigml/tests/create_pca_steps_bck.py
|
devs-cloud/python_ml
|
05d90f5ce1862a5d2d8ff99d2e46446dc1d5af3c
|
[
"Apache-2.0"
] | null | null | null |
bigml/tests/create_pca_steps_bck.py
|
devs-cloud/python_ml
|
05d90f5ce1862a5d2d8ff99d2e46446dc1d5af3c
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
#!/usr/bin/env python
#
# Copyright 2018-2020 BigML
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import time
import json
import os
from datetime import datetime, timedelta
from world import world
from nose.tools import eq_, assert_less
from bigml.api import HTTP_CREATED
from bigml.api import HTTP_ACCEPTED
from bigml.api import FINISHED
from bigml.api import FAULTY
from bigml.api import get_status
from read_pca_steps import i_get_the_pca
#@step(r'the pca name is "(.*)"')
#@step(r'I create a PCA from a dataset$')
#@step(r'I create a PCA from a dataset$')
#@step(r'I update the PCA name to "(.*)"$')
#@step(r'I wait until the PCA status code is either (\d) or (-\d) less than (\d+)')
#@step(r'I wait until the PCA is ready less than (\d+)')
| 32.893617 | 83 | 0.698254 |
077860d7dfef7192b10ddd84d4a9115cb45934f6
| 290 |
py
|
Python
|
config.py
|
Pasmikh/quiz_please_bot
|
2b619b359d8021be57b404525013c53403d6cde1
|
[
"MIT"
] | null | null | null |
config.py
|
Pasmikh/quiz_please_bot
|
2b619b359d8021be57b404525013c53403d6cde1
|
[
"MIT"
] | null | null | null |
config.py
|
Pasmikh/quiz_please_bot
|
2b619b359d8021be57b404525013c53403d6cde1
|
[
"MIT"
] | null | null | null |
days_of_week = ['Monday','Tuesday','Wednesday','Thursday','Friday','Saturday', 'Sunday']
operation = ''
options = ['Info', 'Check-in/Out', 'Edit games', 'Back']
admins = ['admin1_telegram_nickname', 'admin2_telegram_nickname']
avail_days = []
TOKEN = 'bot_token'
group_id = id_of_group_chat
| 41.428571 | 88 | 0.713793 |
0778705078ff1aa67fe1ad3d2a88bc9581c13e09
| 2,331 |
py
|
Python
|
Chapter 8/sandwich-maker.py
|
ostin-r/automate-boring-stuff-solutions
|
78f0a2981e6520ff2907285e666168a0f35eba02
|
[
"FTL"
] | 4 |
2021-06-14T10:37:58.000Z
|
2021-12-30T17:49:17.000Z
|
Chapter 8/sandwich-maker.py
|
ostin-r/automate-boring-stuff-solutions
|
78f0a2981e6520ff2907285e666168a0f35eba02
|
[
"FTL"
] | null | null | null |
Chapter 8/sandwich-maker.py
|
ostin-r/automate-boring-stuff-solutions
|
78f0a2981e6520ff2907285e666168a0f35eba02
|
[
"FTL"
] | 1 |
2021-07-29T15:26:54.000Z
|
2021-07-29T15:26:54.000Z
|
'''
Austin Richards 2/20/21
sandwich-maker.py uses pyinputplus to validate user input for sandwich preferences
'''
import pyinputplus as ip
def get_cost(food_name):
'''gets the cost of items in sandwich_builder'''
food_dict = {
'sourdough':1.75,
'rye':2.0,
'wheat':1.50,
'white':1.25,
'chicken':2.0,
'turkey':1.50,
'ham':2.0,
'tofu':1.25,
'cheddar':2.0,
'swiss':2.5,
'mozzarella':2.5,
'yes':0.25, # toppings return 'yes' in sandwich_builder(), so I made them all cost 0.25
'no':0 # saying no to a topping costs nothing
}
return food_dict[food_name]
sandwich_builder()
| 33.3 | 96 | 0.62248 |
0778aa1b06b2fda0447a13db0a273ce1b3e6b40f
| 2,021 |
py
|
Python
|
tests/core/test_headerupdater.py
|
My-Novel-Management/storybuilderunite
|
c003d3451e237f574c54a87ea7d4fd8da8e833be
|
[
"MIT"
] | 1 |
2020-06-18T01:38:55.000Z
|
2020-06-18T01:38:55.000Z
|
tests/core/test_headerupdater.py
|
My-Novel-Management/storybuilder
|
1f36e56a74dbb55a25d60fce3ce81f3c650f521a
|
[
"MIT"
] | 143 |
2019-11-13T00:21:11.000Z
|
2020-08-15T05:47:41.000Z
|
tests/core/test_headerupdater.py
|
My-Novel-Management/storybuilderunite
|
c003d3451e237f574c54a87ea7d4fd8da8e833be
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
'''
HeaderUpdater class test
========================
'''
import unittest
from tests.testutils import print_testtitle, validate_with_fail
from builder.commands.scode import SCode, SCmd
from builder.containers.chapter import Chapter
from builder.containers.episode import Episode
from builder.containers.scene import Scene
from builder.containers.story import Story
from builder.core import headerupdater as hd
| 33.131148 | 66 | 0.597724 |
0778ae783c1f5257a96e5e0972a23c96938e6782
| 682 |
py
|
Python
|
dotsDB/test_vlen_datasets.py
|
aernesto/Lab_DotsDB_Utilities
|
d8458b4126d80daeb5084234889fc6674158ea0f
|
[
"MIT"
] | 1 |
2019-03-11T19:12:12.000Z
|
2019-03-11T19:12:12.000Z
|
dotsDB/test_vlen_datasets.py
|
aernesto/Lab_DotsDB_Utilities
|
d8458b4126d80daeb5084234889fc6674158ea0f
|
[
"MIT"
] | null | null | null |
dotsDB/test_vlen_datasets.py
|
aernesto/Lab_DotsDB_Utilities
|
d8458b4126d80daeb5084234889fc6674158ea0f
|
[
"MIT"
] | 1 |
2019-10-31T20:10:12.000Z
|
2019-10-31T20:10:12.000Z
|
import numpy as np
import h5py
filename = "test_vlen_datasets_np_bool.h5"
rows = [np.array([np.True_, np.False_]),
np.array([np.True_, np.True_, np.False_])]
f = h5py.File(filename, 'x') # create file, fails if exists
vlen_data_type = h5py.special_dtype(vlen=np.bool_)
dset = f.create_dataset("vlen_matrix", (2,),
compression="gzip",
compression_opts=9,
fletcher32=True,
dtype=vlen_data_type)
for r in range(len(rows)):
dset[r] = rows[r]
f.flush()
f.close()
f = h5py.File(filename, 'r')
dsetr = f["vlen_matrix"]
for r in range(dsetr.shape[0]):
print(dsetr[r])
| 22.733333 | 60 | 0.590909 |
0779ab4524c7785b80eb2c94fee42447c65c7dbc
| 8,824 |
py
|
Python
|
utils.py
|
g4idrijs/CardiacUltrasoundPhaseEstimation
|
6bd2e157240133b6e306a7ca931d3d3b96647b88
|
[
"Apache-2.0"
] | 1 |
2020-11-17T16:14:06.000Z
|
2020-11-17T16:14:06.000Z
|
utils.py
|
g4idrijs/CardiacUltrasoundPhaseEstimation
|
6bd2e157240133b6e306a7ca931d3d3b96647b88
|
[
"Apache-2.0"
] | null | null | null |
utils.py
|
g4idrijs/CardiacUltrasoundPhaseEstimation
|
6bd2e157240133b6e306a7ca931d3d3b96647b88
|
[
"Apache-2.0"
] | 1 |
2020-06-28T09:19:02.000Z
|
2020-06-28T09:19:02.000Z
|
import os, time
import numpy as np
import scipy.signal
import scipy.misc
import scipy.ndimage.filters
import matplotlib.pyplot as plt
import PIL
from PIL import ImageDraw
import angles
import cv2
import SimpleITK as sitk
def fig2data(fig):
"""
@brief Convert a Matplotlib figure to a 4D numpy array with
RGBA channels and return it
@param fig a matplotlib figure
@return a numpy 3D array of RGBA values
"""
# draw the renderer
fig.canvas.draw()
# Get the RGBA buffer from the figure
w, h = fig.canvas.get_width_height()
buf = np.fromstring(fig.canvas.tostring_argb(), dtype=np.uint8)
buf.shape = (w, h, 4)
# canvas.tostring_argb give pixmap in ARGB mode.
# Roll the ALPHA channel to have it in RGBA mode
buf = np.roll(buf, 3, axis=2)
return buf
| 24.241758 | 79 | 0.592248 |
077a977fb0ed578109f21b4a8ba0c330e1e23efb
| 1,441 |
py
|
Python
|
weasyl/emailer.py
|
akash143143/weasyl
|
be42a2313e657e97c4a48432379e37b6a3d4a4af
|
[
"Apache-2.0"
] | null | null | null |
weasyl/emailer.py
|
akash143143/weasyl
|
be42a2313e657e97c4a48432379e37b6a3d4a4af
|
[
"Apache-2.0"
] | null | null | null |
weasyl/emailer.py
|
akash143143/weasyl
|
be42a2313e657e97c4a48432379e37b6a3d4a4af
|
[
"Apache-2.0"
] | null | null | null |
from __future__ import absolute_import
import re
from email.mime.text import MIMEText
from smtplib import SMTP
from weasyl import define, macro
EMAIL_ADDRESS = re.compile(r"^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\.[a-zA-Z0-9-.]+\Z")
def normalize_address(address):
"""
Converts an e-mail address to a consistent representation.
Returns None if the given address is not considered valid.
"""
address = address.strip()
if not EMAIL_ADDRESS.match(address):
return None
local, domain = address.split("@", 1)
return "%s@%s" % (local, domain.lower())
def send(mailto, subject, content):
"""Send an e-mail.
`mailto` must be a normalized e-mail address to send this e-mail to. The
system email will be designated as the sender.
"""
message = MIMEText(content.strip())
message["To"] = mailto
message["From"] = macro.MACRO_EMAIL_ADDRESS
message["Subject"] = subject
# smtp.sendmail() only converts CR and LF (produced by MIMEText and our templates) to CRLF in Python 3. In Python 2, we need this:
msg_crlf = re.sub(r"\r\n|[\r\n]", "\r\n", message.as_string())
smtp = SMTP(define.config_read_setting('host', "localhost", section='smtp'))
try:
smtp.sendmail(
from_addr=macro.MACRO_EMAIL_ADDRESS,
to_addrs=[mailto],
msg=msg_crlf,
)
finally:
smtp.quit()
define.metric('increment', 'emails')
| 26.685185 | 134 | 0.646079 |
077ab159d3a90c5c7c3094919ba408b1a2cadaa4
| 663 |
py
|
Python
|
tests/test_missing_process.py
|
ricklupton/sphinx_probs_rdf
|
bcae27a37162c1a4c4b329af6759a0b5b52cab7a
|
[
"MIT"
] | 1 |
2021-07-31T10:06:50.000Z
|
2021-07-31T10:06:50.000Z
|
tests/test_missing_process.py
|
ricklupton/sphinx_probs_rdf
|
bcae27a37162c1a4c4b329af6759a0b5b52cab7a
|
[
"MIT"
] | 1 |
2021-05-05T18:15:48.000Z
|
2021-05-05T18:15:48.000Z
|
tests/test_missing_process.py
|
ricklupton/sphinx_probs_rdf
|
bcae27a37162c1a4c4b329af6759a0b5b52cab7a
|
[
"MIT"
] | null | null | null |
import pytest
from rdflib import Graph, Namespace, Literal
from rdflib.namespace import RDF, RDFS
from sphinx_probs_rdf.directives import PROBS
SYS = Namespace("http://example.org/system/")
| 36.833333 | 153 | 0.764706 |
077afe0d8f015a761ad56ef674705600c184e8fe
| 1,721 |
py
|
Python
|
analysis_functionarcademix.py
|
thekushalpokhrel/Python_Programs_SoftDev_DataAnalysis
|
e56e0e853aca4367ebf99ae18e920b80f39bd133
|
[
"MIT"
] | null | null | null |
analysis_functionarcademix.py
|
thekushalpokhrel/Python_Programs_SoftDev_DataAnalysis
|
e56e0e853aca4367ebf99ae18e920b80f39bd133
|
[
"MIT"
] | null | null | null |
analysis_functionarcademix.py
|
thekushalpokhrel/Python_Programs_SoftDev_DataAnalysis
|
e56e0e853aca4367ebf99ae18e920b80f39bd133
|
[
"MIT"
] | null | null | null |
#analysis function for three level game
| 30.732143 | 72 | 0.613016 |
077b64f9f341be6f03c89ac88afd5ce1383da321
| 2,246 |
py
|
Python
|
Hello_Cone.py
|
TechnoTanuki/Python_BMP
|
d6f7e7a4b74f7d6e8761d618c156d37c97726038
|
[
"MIT"
] | 3 |
2022-02-24T15:46:43.000Z
|
2022-03-30T13:17:03.000Z
|
Hello_Cone.py
|
TechnoTanuki/Python_BMP
|
d6f7e7a4b74f7d6e8761d618c156d37c97726038
|
[
"MIT"
] | null | null | null |
Hello_Cone.py
|
TechnoTanuki/Python_BMP
|
d6f7e7a4b74f7d6e8761d618c156d37c97726038
|
[
"MIT"
] | null | null | null |
notice = """
Cone Demo
-----------------------------------
| Copyright 2022 by Joel C. Alcarez |
| [[email protected]] |
|-----------------------------------|
| We make absolutely no warranty |
| of any kind, expressed or implied |
|-----------------------------------|
| This graphics library outputs |
| to a bitmap file. |
-----------------------------------
"""
from Python_BMP.BITMAPlib import(
newBMP,
centercoord,
plot3Dsolid,
getRGBfactors,
rotvec3D,
conevertandsurface,
saveBMP
)
import subprocess as proc
from os import path
if __name__=="__main__":
main()
| 38.724138 | 109 | 0.548531 |
077c2964f05f1e340c5f354633e006236a1d9021
| 2,001 |
py
|
Python
|
analysis/training_curve_6D.py
|
AndrewKirby2/data_synthesis
|
656858137a348fd5dcb57bcd04bdfece2b9eac1b
|
[
"MIT"
] | null | null | null |
analysis/training_curve_6D.py
|
AndrewKirby2/data_synthesis
|
656858137a348fd5dcb57bcd04bdfece2b9eac1b
|
[
"MIT"
] | null | null | null |
analysis/training_curve_6D.py
|
AndrewKirby2/data_synthesis
|
656858137a348fd5dcb57bcd04bdfece2b9eac1b
|
[
"MIT"
] | null | null | null |
""" Plot a training curve for the 6D data simulator of CT*
"""
import numpy as np
import matplotlib.pyplot as plt
from sklearn.preprocessing import StandardScaler
from sklearn.gaussian_process import GaussianProcessRegressor
from sklearn.gaussian_process.kernels import RBF, WhiteKernel, Matern
from sklearn.metrics import mean_squared_error
from sklearn.pipeline import Pipeline
import sys
sys.path.append(r'/home/andrewkirby72/phd_work/data_synthesis')
from GP_machine_learning.GP_machine_learning_functions import *
from regular_array_sampling.functions import regular_array_monte_carlo
# create array to store results for plotting
rmse = np.ones((25, 2))
noise = 0.01
# create array of sampled regular array layouts
#cand_points = regular_array_monte_carlo(10000)
# create testing points
X_test, y_test = create_testing_points_regular(noise)
n = 0
n_target = 0
n_train = 0
while n_train < 200:
n_target = 100 +100*n
# create training points
X_train, y_train, n_train = \
create_training_points_irregular(n_target, noise)
# fit GP regression and calculate rmse
kernel = 1.0 ** 2 * RBF(length_scale=[1., 1., 1., 1., 1., 1.]) \
+ WhiteKernel(noise_level=1e-5, noise_level_bounds=[1e-10, 1])
pipe = Pipeline([('scaler', StandardScaler()),
('gp', GaussianProcessRegressor(kernel=kernel,
n_restarts_optimizer=20))])
pipe.fit(X_train, y_train)
y_predict = pipe.predict(X_test)
mse = mean_squared_error(y_test, y_predict)
# report rmse
print(n_train, np.sqrt(mse))
rmse[n, 0] = n_train
rmse[n, 1] = np.sqrt(mse)
n += 1
plt.scatter(rmse[:, 0], rmse[:, 1])
plt.yscale('log')
plt.ylim([1e-3, 1e-1])
plt.xlim([0, 200])
plt.title('Training curve RBF - 6D 1% noise - irregular array training - max change halved')
plt.ylabel('RMSE')
plt.xlabel('Training points')
plt.savefig('analysis/GP_machine_learning_plots/\
gp_training_curve_RBF_irregular_training_maxchangehalved_regular_testing.png')
| 34.5 | 92 | 0.733633 |
077cc03c99f16d778bcf96bc07a8e66081bca025
| 5,210 |
py
|
Python
|
website/raspac.py
|
tpudlik/RaspAC
|
e0a01a8b9123e74f6e4fb53f084e4ddf3ea24677
|
[
"MIT"
] | 28 |
2015-04-03T05:01:14.000Z
|
2021-12-31T00:29:40.000Z
|
website/raspac.py
|
tpudlik/RaspAC
|
e0a01a8b9123e74f6e4fb53f084e4ddf3ea24677
|
[
"MIT"
] | 3 |
2017-03-20T15:47:24.000Z
|
2017-05-21T16:07:22.000Z
|
website/raspac.py
|
tpudlik/RaspAC
|
e0a01a8b9123e74f6e4fb53f084e4ddf3ea24677
|
[
"MIT"
] | 8 |
2015-08-27T07:33:08.000Z
|
2018-09-27T21:54:49.000Z
|
import sqlite3
import subprocess, datetime
from flask import Flask, request, session, g, redirect, url_for, \
abort, render_template, flash
from contextlib import closing
from tquery import get_latest_record
from config import *
app = Flask(__name__)
app.config.from_object(__name__)
# DB helper functions
def init_db():
"""Initializes the sqlite3 database. This function must be imported and
executed from the Python interpreter before the application is first run."""
with closing(connect_db()) as db:
with app.open_resource('schema.sql', mode='r') as f:
db.cursor().executescript(f.read())
db.commit()
# Auto-open and close DB when serving requests
def validate_AC_command(user_mode, user_temperature):
"""Validates and sanitizes user-input command; translates command
into irsend call."""
codes = dict()
if user_mode not in app.config['ACMODES']:
codes['mode_error'] = True
else:
codes['mode_error'] = False
if user_mode is not 'off' and user_temperature not in app.config['ACTEMPERATURES']:
codes['temperature_error'] = True
else:
codes['temperature_error'] = False
if not codes['mode_error'] and not codes['temperature_error']:
codes['mode'] = user_mode
codes['temperature'] = user_temperature
if codes['mode'] == 'off':
command_postfix = 'off'
elif codes['mode'] == 'heat':
command_postfix = 'heat' + codes['temperature']
else:
command_postfix = codes['temperature']
codes['command'] = command_postfix
return codes
def command_history():
"""Returns a list of dictionaries, each containing a command issued
to the AC previously. The list is ordered chronologically, from newest
to oldest."""
cur = g.db.execute('select command, ts, user from commands order by id desc')
command_history = []
for row in cur.fetchall():
if row[0][0] == 'h':
cmd = 'heat to ' + row[0][4:]
elif row[0] == 'off':
cmd = 'off'
else:
cmd = 'cool to ' + row[0]
command_history.append(dict(command=cmd, ts=row[1], user=row[2]))
return command_history
def last_record():
"""Returns the last temperature and humidity record data.
The returned object is a dict with keys ts, fahrenheit, celsius and
humidity.
"""
db_record = get_latest_record()
out_record = dict()
out_record['date'] = db_record[0].strftime("%Y-%m-%d")
out_record['time'] = db_record[0].strftime("%H:%M")
out_record['celsius'] = db_record[1]
out_record['fahrenheit'] = int(round(out_record['celsius']*9/5.0 + 32))
out_record['humidity'] = int(round(db_record[2]))
return out_record
if __name__ == '__main__':
app.run(host='0.0.0.0')
| 37.482014 | 87 | 0.616507 |
077cca840f68935d92c69072c7307ddde041c399
| 818 |
py
|
Python
|
tests/util_test.py
|
NLESC-JCER/pyspectra
|
b7ece1fff537039f3306b23e00812aa1c8ffc729
|
[
"Apache-2.0"
] | 1 |
2021-01-05T01:52:31.000Z
|
2021-01-05T01:52:31.000Z
|
tests/util_test.py
|
NLESC-JCER/pyspectra
|
b7ece1fff537039f3306b23e00812aa1c8ffc729
|
[
"Apache-2.0"
] | 2 |
2020-07-03T07:54:46.000Z
|
2020-07-03T07:59:08.000Z
|
tests/util_test.py
|
NLESC-JCER/pyspectra
|
b7ece1fff537039f3306b23e00812aa1c8ffc729
|
[
"Apache-2.0"
] | null | null | null |
"""Helper functions to tests."""
import numpy as np
def norm(vs: np.array) -> float:
"""Compute the norm of a vector."""
return np.sqrt(np.dot(vs, vs))
def create_random_matrix(size: int) -> np.array:
"""Create a numpy random matrix."""
return np.random.normal(size=size ** 2).reshape(size, size)
def create_symmetic_matrix(size: int) -> np.array:
"""Create a numpy symmetric matrix."""
xs = create_random_matrix(size)
return xs + xs.T
def check_eigenpairs(
matrix: np.ndarray, eigenvalues: np.ndarray,
eigenvectors: np.ndarray) -> bool:
"""Check that the eigenvalue equation holds."""
for i, value in enumerate(eigenvalues):
residue = np.dot(
matrix, eigenvectors[:, i]) - value * eigenvectors[:, i]
assert norm(residue) < 1e-8
| 27.266667 | 68 | 0.640587 |
077e6b7b62074f7defc4bfc023b3cef03e6c40c9
| 1,046 |
py
|
Python
|
solutions/Interview-03-shu-zu-zhong-zhong-fu-de-shu-zi-lcof/03.py
|
leetcode-notebook/wonz
|
9ffd2ce9b5f3a544ee958f5a0673215afd176c2b
|
[
"MIT"
] | 12 |
2020-04-21T01:09:14.000Z
|
2022-01-13T08:42:03.000Z
|
solutions/Interview-03-shu-zu-zhong-zhong-fu-de-shu-zi-lcof/03.py
|
leetcode-notebook/wonz
|
9ffd2ce9b5f3a544ee958f5a0673215afd176c2b
|
[
"MIT"
] | null | null | null |
solutions/Interview-03-shu-zu-zhong-zhong-fu-de-shu-zi-lcof/03.py
|
leetcode-notebook/wonz
|
9ffd2ce9b5f3a544ee958f5a0673215afd176c2b
|
[
"MIT"
] | 4 |
2020-03-31T03:06:16.000Z
|
2021-07-06T07:27:44.000Z
|
from typing import List
if __name__ == "__main__":
nums = [2, 3, 1, 0, 2, 5, 3]
print(Solution().findRepeatNumber(nums))
| 26.15 | 63 | 0.43499 |
07800b91cf15e2b3fdf48ab87571db57cf0566dc
| 1,702 |
py
|
Python
|
examples/test_network.py
|
Charles-Peeke/gwu_nn
|
3f5e9937abf2bfb81a74a2d6f3653a661e705f67
|
[
"MIT"
] | 4 |
2020-11-17T00:31:40.000Z
|
2021-11-11T01:56:27.000Z
|
examples/test_network.py
|
Charles-Peeke/gwu_nn
|
3f5e9937abf2bfb81a74a2d6f3653a661e705f67
|
[
"MIT"
] | 1 |
2020-10-12T17:41:40.000Z
|
2020-10-12T17:41:40.000Z
|
examples/test_network.py
|
jdk514/gwu_nn
|
3f5e9937abf2bfb81a74a2d6f3653a661e705f67
|
[
"MIT"
] | 5 |
2020-11-12T21:13:35.000Z
|
2021-11-30T22:15:28.000Z
|
import numpy as np
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from gwu_nn.gwu_network import GWUNetwork
from gwu_nn.layers import Dense
from gwu_nn.activation_layers import Sigmoid
np.random.seed(8)
num_obs = 8000
# Create our features to draw from two distinct 2D normal distributions
x1 = np.random.multivariate_normal([0, 0], [[1, .75],[.75, 1]], num_obs)
x2 = np.random.multivariate_normal([3, 8], [[1, .25],[.25, 1]], num_obs)
# Stack our inputs into one feature space
X = np.vstack((x1, x2))
print(X.shape)
y = np.hstack((np.zeros(num_obs), np.ones(num_obs)))
print(y.shape)
# colors = ['red'] * num_obs + ['blue'] * num_obs
# plt.figure(figsize=(12,8))
# plt.scatter(X[:, 0], X[:, 1], c = colors, alpha = 0.5)
# Lets randomly split things into training and testing sets so we don't cheat
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33, random_state=42)
# Create our model
network = GWUNetwork()
network.add(Dense(2, 1, True, 'sigmoid'))
network.add(Sigmoid())
#network.set_loss('mse')
network.compile('log_loss', 0.001)
network.fit(X_train, y_train, epochs=100)
from scipy.special import logit
colors = ['red'] * num_obs + ['blue'] * num_obs
plt.figure(figsize=(12, 8))
plt.scatter(X[:, 0], X[:, 1], c=colors, alpha=0.5)
# Range of our X values
start_x1 = -5
end_x1 = 7
weights = network.layers[0].weights.reshape(-1).tolist()
bias = network.layers[0].bias[0][0]
start_y = (bias + start_x1 * weights[0] - logit(0.5)) / - weights[1]
end_y = (bias + end_x1 * weights[0] - logit(0.5)) / -weights[1]
plt.plot([start_x1, end_x1], [start_y, end_y], color='grey')
| 30.392857 | 91 | 0.675676 |
07806fd3652fda3f2cbc32b699b1c68679d17f6c
| 6,227 |
py
|
Python
|
scattering/van_hove.py
|
XiaoboLinlin/scattering
|
0173b63f3243bdbcccfa562dbf5e3714920cded2
|
[
"MIT"
] | null | null | null |
scattering/van_hove.py
|
XiaoboLinlin/scattering
|
0173b63f3243bdbcccfa562dbf5e3714920cded2
|
[
"MIT"
] | null | null | null |
scattering/van_hove.py
|
XiaoboLinlin/scattering
|
0173b63f3243bdbcccfa562dbf5e3714920cded2
|
[
"MIT"
] | null | null | null |
import itertools as it
import numpy as np
import mdtraj as md
from progressbar import ProgressBar
from scattering.utils.utils import get_dt
from scattering.utils.constants import get_form_factor
def compute_van_hove(trj, chunk_length, water=False,
r_range=(0, 1.0), bin_width=0.005, n_bins=None,
self_correlation=True, periodic=True, opt=True, partial=False):
"""Compute the partial van Hove function of a trajectory
Parameters
----------
trj : mdtraj.Trajectory
trajectory on which to compute the Van Hove function
chunk_length : int
length of time between restarting averaging
water : bool
use X-ray form factors for water that account for polarization
r_range : array-like, shape=(2,), optional, default=(0.0, 1.0)
Minimum and maximum radii.
bin_width : float, optional, default=0.005
Width of the bins in nanometers.
n_bins : int, optional, default=None
The number of bins. If specified, this will override the `bin_width`
parameter.
self_correlation : bool, default=True
Whether or not to include the self-self correlations
Returns
-------
r : numpy.ndarray
r positions generated by histogram binning
g_r_t : numpy.ndarray
Van Hove function at each time and position
"""
n_physical_atoms = len([a for a in trj.top.atoms if a.element.mass > 0])
unique_elements = list(set([a.element for a in trj.top.atoms if a.element.mass > 0]))
partial_dict = dict()
for elem1, elem2 in it.combinations_with_replacement(unique_elements[::-1], 2):
print('doing {0} and {1} ...'.format(elem1, elem2))
r, g_r_t_partial = compute_partial_van_hove(trj=trj,
chunk_length=chunk_length,
selection1='element {}'.format(elem1.symbol),
selection2='element {}'.format(elem2.symbol),
r_range=r_range,
bin_width=bin_width,
n_bins=n_bins,
self_correlation=self_correlation,
periodic=periodic,
opt=opt)
partial_dict[(elem1, elem2)] = g_r_t_partial
if partial:
return partial_dict
norm = 0
g_r_t = None
for key, val in partial_dict.items():
elem1, elem2 = key
concentration1 = trj.atom_slice(trj.top.select('element {}'.format(elem1.symbol))).n_atoms / n_physical_atoms
concentration2 = trj.atom_slice(trj.top.select('element {}'.format(elem2.symbol))).n_atoms / n_physical_atoms
form_factor1 = get_form_factor(element_name=elem1.symbol, water=water)
form_factor2 = get_form_factor(element_name=elem2.symbol, water=water)
coeff = form_factor1 * concentration1 * form_factor2 * concentration2
if g_r_t is None:
g_r_t = np.zeros_like(val)
g_r_t += val * coeff
norm += coeff
# Reshape g_r_t to better represent the discretization in both r and t
g_r_t_final = np.empty(shape=(chunk_length, len(r)))
for i in range(chunk_length):
g_r_t_final[i, :] = np.mean(g_r_t[i::chunk_length], axis=0)
g_r_t_final /= norm
t = trj.time[:chunk_length]
return r, t, g_r_t_final
def compute_partial_van_hove(trj, chunk_length=10, selection1=None, selection2=None,
r_range=(0, 1.0), bin_width=0.005, n_bins=200,
self_correlation=True, periodic=True, opt=True):
"""Compute the partial van Hove function of a trajectory
Parameters
----------
trj : mdtraj.Trajectory
trajectory on which to compute the Van Hove function
chunk_length : int
length of time between restarting averaging
selection1 : str
selection to be considered, in the style of MDTraj atom selection
selection2 : str
selection to be considered, in the style of MDTraj atom selection
r_range : array-like, shape=(2,), optional, default=(0.0, 1.0)
Minimum and maximum radii.
bin_width : float, optional, default=0.005
Width of the bins in nanometers.
n_bins : int, optional, default=None
The number of bins. If specified, this will override the `bin_width`
parameter.
self_correlation : bool, default=True
Whether or not to include the self-self correlations
Returns
-------
r : numpy.ndarray
r positions generated by histogram binning
g_r_t : numpy.ndarray
Van Hove function at each time and position
"""
unique_elements = (
set([a.element for a in trj.atom_slice(trj.top.select(selection1)).top.atoms]),
set([a.element for a in trj.atom_slice(trj.top.select(selection2)).top.atoms]),
)
if any([len(val) > 1 for val in unique_elements]):
raise UserWarning(
'Multiple elements found in a selection(s). Results may not be '
'direcitly comprable to scattering experiments.'
)
# Don't need to store it, but this serves to check that dt is constant
dt = get_dt(trj)
pairs = trj.top.select_pairs(selection1=selection1, selection2=selection2)
n_chunks = int(trj.n_frames / chunk_length)
g_r_t = None
pbar = ProgressBar()
for i in pbar(range(n_chunks)):
times = list()
for j in range(chunk_length):
times.append([chunk_length*i, chunk_length*i+j])
r, g_r_t_frame = md.compute_rdf_t(
traj=trj,
pairs=pairs,
times=times,
r_range=r_range,
bin_width=bin_width,
n_bins=n_bins,
period_length=chunk_length,
self_correlation=self_correlation,
periodic=periodic,
opt=opt,
)
if g_r_t is None:
g_r_t = np.zeros_like(g_r_t_frame)
g_r_t += g_r_t_frame
return r, g_r_t
| 36.629412 | 117 | 0.604464 |
07807421fe3926d24423bc71ab071f0fe56cfec8
| 785 |
py
|
Python
|
nn_benchmark/networks/__init__.py
|
QDucasse/nn_benchmark
|
0a32db241e75853c7d78dccf6d7b6940e5a0e4d0
|
[
"MIT"
] | 18 |
2020-06-10T10:30:55.000Z
|
2022-02-21T08:28:00.000Z
|
nn_benchmark/networks/__init__.py
|
QDucasse/nn_benchmark
|
0a32db241e75853c7d78dccf6d7b6940e5a0e4d0
|
[
"MIT"
] | 29 |
2020-06-24T09:09:55.000Z
|
2021-09-09T10:10:21.000Z
|
nn_benchmark/networks/__init__.py
|
QDucasse/nn_benchmark
|
0a32db241e75853c7d78dccf6d7b6940e5a0e4d0
|
[
"MIT"
] | 2 |
2020-10-30T23:36:42.000Z
|
2021-09-07T12:58:06.000Z
|
# -*- coding: utf-8 -*-
# nn_benchmark
# author - Quentin Ducasse
# https://github.com/QDucasse
# [email protected]
from __future__ import absolute_import
__all__ = ["lenet","lenet5","quant_lenet5",
"quant_cnv", "quant_tfc",
"mobilenetv1","quant_mobilenetv1",
"vggnet", "quant_vggnet",
"common", "alexnet", "quant_alexnet"]
from .alexnet import *
from .lenet import *
from .lenet5 import *
from .mobilenetv1 import *
from .quant_mobilenetv1 import *
from .quant_alexnet import *
from .quant_lenet5 import *
from .quant_cnv import *
from .quant_tfc import *
from .vggnet import *
from .quant_vggnet import *
from .common import *
| 28.035714 | 48 | 0.614013 |
078077ca30c799c1dd1930850adac76494c46916
| 820 |
py
|
Python
|
Section1_Basics/contours.py
|
NeeharikaDva/opencv_course
|
234515ab59a1228c8dfd3c69f310dbc1d86c6089
|
[
"MIT"
] | null | null | null |
Section1_Basics/contours.py
|
NeeharikaDva/opencv_course
|
234515ab59a1228c8dfd3c69f310dbc1d86c6089
|
[
"MIT"
] | null | null | null |
Section1_Basics/contours.py
|
NeeharikaDva/opencv_course
|
234515ab59a1228c8dfd3c69f310dbc1d86c6089
|
[
"MIT"
] | null | null | null |
#pylint:disable=no-member
import cv2 as cv
import numpy as np
img = cv.imread('/Users/webileapp/Desktop/niharika_files/projects/opencv_course_master/Resources/Photos/cats.jpg')
cv.imshow('Cats', img)
#
blank = np.zeros(img.shape[:2], dtype='uint8')
cv.imshow('Blank', blank)
gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)
cv.imshow('Gray', gray)
#
blur = cv.GaussianBlur(gray, (5,5), cv.BORDER_DEFAULT)
cv.imshow('Blur', blur)
canny = cv.Canny(blur, 125, 175)
cv.imshow('Canny Edges', canny)
#
ret, thresh = cv.threshold(gray, 125, 255, cv.THRESH_BINARY)
cv.imshow('Thresh', thresh)
#
contours, hierarchies = cv.findContours(canny, cv.RETR_LIST, cv.CHAIN_APPROX_SIMPLE)
print(f'{len(contours)} contour(s) found!')
#
cv.drawContours(blank, contours, -1, (200,120,100), 1)
cv.imshow('Contours Drawn', blank)
cv.waitKey(0)
| 27.333333 | 114 | 0.729268 |
078092a018208fa66acdafddfde2bd709ed8859d
| 51 |
py
|
Python
|
mmdet/ops/fcosr_tools/__init__.py
|
RangeKing/FCOSR
|
b92f0cee2e89d6a268884bacd02fb28881cd44a4
|
[
"Apache-2.0"
] | 38 |
2021-11-23T02:58:47.000Z
|
2022-03-21T08:16:59.000Z
|
mmdet/ops/fcosr_tools/__init__.py
|
RangeKing/FCOSR
|
b92f0cee2e89d6a268884bacd02fb28881cd44a4
|
[
"Apache-2.0"
] | 15 |
2021-12-06T07:51:04.000Z
|
2022-03-29T11:35:29.000Z
|
mmdet/ops/fcosr_tools/__init__.py
|
RangeKing/FCOSR
|
b92f0cee2e89d6a268884bacd02fb28881cd44a4
|
[
"Apache-2.0"
] | 12 |
2021-11-23T02:58:50.000Z
|
2022-03-24T01:18:23.000Z
|
from . import fcosr_tools
__all__ = ['fcosr_tools']
| 25.5 | 25 | 0.764706 |
0780e5ec41de515a271f19ff0412527f1e12bd8f
| 287 |
py
|
Python
|
health_care/health_care/doctype/practitioner/practitioner.py
|
Jnalis/frappe-health-care
|
ed347c216f568cc044c1365965d35945697cf7dc
|
[
"MIT"
] | null | null | null |
health_care/health_care/doctype/practitioner/practitioner.py
|
Jnalis/frappe-health-care
|
ed347c216f568cc044c1365965d35945697cf7dc
|
[
"MIT"
] | null | null | null |
health_care/health_care/doctype/practitioner/practitioner.py
|
Jnalis/frappe-health-care
|
ed347c216f568cc044c1365965d35945697cf7dc
|
[
"MIT"
] | null | null | null |
# Copyright (c) 2022, Juve and contributors
# For license information, please see license.txt
# import frappe
from frappe.model.document import Document
| 28.7 | 77 | 0.777003 |
07827562174365b2eae2774f05f5a4b7e0a35d24
| 1,617 |
py
|
Python
|
install-hooks.py
|
JustasGau/DonjinKrawler
|
faff50dcfcebf82028c9af10434359f975247d33
|
[
"MIT"
] | null | null | null |
install-hooks.py
|
JustasGau/DonjinKrawler
|
faff50dcfcebf82028c9af10434359f975247d33
|
[
"MIT"
] | 9 |
2020-10-11T13:55:12.000Z
|
2020-12-09T16:28:06.000Z
|
install-hooks.py
|
JustasGau/DonjinKrawler
|
faff50dcfcebf82028c9af10434359f975247d33
|
[
"MIT"
] | null | null | null |
import sys
from os import path
import urllib; from urllib.request import urlretrieve
from subprocess import call
if __name__ == "__main__":
if (len(sys.argv) < 2):
print("Enter a directory to install hooks")
else:
if (path.exists(sys.argv[1])):
install_hooks(sys.argv[1])
| 52.16129 | 150 | 0.678417 |
078375b28d8a3c360255f33574ef015988b175c4
| 3,962 |
py
|
Python
|
09_MicroServer_Cookies/micro_server.py
|
Rockfish/PythonCourse
|
1d650e49950d1987d052028139fcdfcb0bbfcc70
|
[
"MIT"
] | null | null | null |
09_MicroServer_Cookies/micro_server.py
|
Rockfish/PythonCourse
|
1d650e49950d1987d052028139fcdfcb0bbfcc70
|
[
"MIT"
] | null | null | null |
09_MicroServer_Cookies/micro_server.py
|
Rockfish/PythonCourse
|
1d650e49950d1987d052028139fcdfcb0bbfcc70
|
[
"MIT"
] | null | null | null |
"""
Micro webapp based on WebOb, Jinja2, WSGI with a simple router
"""
import os
import hmac
import hashlib
import mimetypes
from wsgiref.simple_server import WSGIServer, WSGIRequestHandler
from webob import Request
from webob import Response
from jinja2 import Environment, FileSystemLoader
| 33.576271 | 100 | 0.649419 |
0784261b13ef0cb016c21f71e6bc994a164d136e
| 2,234 |
py
|
Python
|
apps/addons/management/commands/jetpackers.py
|
clouserw/olympia
|
1d5755b08a526372ec66e6bc64ab636018181969
|
[
"BSD-3-Clause"
] | 1 |
2017-07-14T19:22:39.000Z
|
2017-07-14T19:22:39.000Z
|
apps/addons/management/commands/jetpackers.py
|
clouserw/olympia
|
1d5755b08a526372ec66e6bc64ab636018181969
|
[
"BSD-3-Clause"
] | 6 |
2021-02-02T23:08:48.000Z
|
2021-09-08T02:47:17.000Z
|
apps/addons/management/commands/jetpackers.py
|
clouserw/olympia
|
1d5755b08a526372ec66e6bc64ab636018181969
|
[
"BSD-3-Clause"
] | 1 |
2021-03-13T00:33:12.000Z
|
2021-03-13T00:33:12.000Z
|
import logging
from django.core import mail
from django.conf import settings
from django.core.management.base import BaseCommand
import amo.utils
from users.models import UserProfile
log = logging.getLogger('z.mailer')
FROM = settings.DEFAULT_FROM_EMAIL
SUBJECT = 'Instructions for Automatic Upgrade to Add-on SDK 1.0'
MSG = """\
Hello Mozilla Add-ons Developer!
With the final version of the Add-on SDK only a week away, we wanted to
get in touch with all add-on developers who have existing SDK-based
(Jetpack) add-ons. We would like you to know that going forward AMO
will be auto-updating add-ons with new versions of the Add-on SDK upon
release.
To ensure that your add-on(s) are auto-updated with the 1.0 final
version of the SDK, we would ask that you download the latest release
candidate build -
https://ftp.mozilla.org/pub/mozilla.org/labs/jetpack/addon-sdk-1.0rc2.tar.gz,
https://ftp.mozilla.org/pub/mozilla.org/labs/jetpack/addon-sdk-1.0rc2.zip
- and update your add-on(s) on AMO. After the 1.0 release, we will scan
our add-ons database and automatically upgrade any SDK-based add-ons we
find that are using verions 1.0RC2 or greater to the 1.0 final version
of the SDK. Any add-ons we find using versions of the SDK below 1.0RC2
will not be auto-updated and you will need to upgrade them to the 1.0
version of the SDK manually.
Thank you for participating in the early stages of the Add-on SDK's
development. Feedback and engagement from developers like you are the
foundations for success in our open source community!
Sincerely,
The Mozilla Add-ons Team
"""
| 34.369231 | 80 | 0.717995 |
07851008cd92498c823e9a48b615278bda99bf7d
| 49,605 |
py
|
Python
|
astroplan/constraints.py
|
edose/astroplan
|
b3cf55340c50ccf69ec363889c1fe8ff2f93cada
|
[
"BSD-3-Clause"
] | 160 |
2015-09-09T00:07:34.000Z
|
2022-03-15T22:22:49.000Z
|
astroplan/constraints.py
|
edose/astroplan
|
b3cf55340c50ccf69ec363889c1fe8ff2f93cada
|
[
"BSD-3-Clause"
] | 414 |
2015-08-25T20:22:09.000Z
|
2022-03-31T13:01:10.000Z
|
astroplan/constraints.py
|
edose/astroplan
|
b3cf55340c50ccf69ec363889c1fe8ff2f93cada
|
[
"BSD-3-Clause"
] | 90 |
2015-08-27T20:53:48.000Z
|
2022-01-25T06:11:16.000Z
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Specify and constraints to determine which targets are observable for
an observer.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
# Standard library
from abc import ABCMeta, abstractmethod
import datetime
import time
import warnings
# Third-party
from astropy.time import Time
import astropy.units as u
from astropy.coordinates import get_body, get_sun, get_moon, Galactic, SkyCoord
from astropy import table
import numpy as np
from numpy.lib.stride_tricks import as_strided
# Package
from .moon import moon_illumination
from .utils import time_grid_from_range
from .target import get_skycoord
__all__ = ["AltitudeConstraint", "AirmassConstraint", "AtNightConstraint",
"is_observable", "is_always_observable", "time_grid_from_range",
"GalacticLatitudeConstraint", "SunSeparationConstraint",
"MoonSeparationConstraint", "MoonIlluminationConstraint",
"LocalTimeConstraint", "PrimaryEclipseConstraint",
"SecondaryEclipseConstraint", "Constraint", "TimeConstraint",
"observability_table", "months_observable", "max_best_rescale",
"min_best_rescale", "PhaseConstraint", "is_event_observable"]
_current_year = time.localtime().tm_year # needed for backward compatibility
_current_year_time_range = Time( # needed for backward compatibility
[str(_current_year) + '-01-01',
str(_current_year) + '-12-31']
)
def _make_cache_key(times, targets):
"""
Make a unique key to reference this combination of ``times`` and ``targets``.
Often, we wish to store expensive calculations for a combination of
``targets`` and ``times`` in a cache on an ``observer``` object. This
routine will provide an appropriate, hashable, key to store these
calculations in a dictionary.
Parameters
----------
times : `~astropy.time.Time`
Array of times on which to test the constraint.
targets : `~astropy.coordinates.SkyCoord`
Target or list of targets.
Returns
-------
cache_key : tuple
A hashable tuple for use as a cache key
"""
# make a tuple from times
try:
timekey = tuple(times.jd) + times.shape
except BaseException: # must be scalar
timekey = (times.jd,)
# make hashable thing from targets coords
try:
if hasattr(targets, 'frame'):
# treat as a SkyCoord object. Accessing the longitude
# attribute of the frame data should be unique and is
# quicker than accessing the ra attribute.
targkey = tuple(targets.frame.data.lon.value.ravel()) + targets.shape
else:
# assume targets is a string.
targkey = (targets,)
except BaseException:
targkey = (targets.frame.data.lon,)
return timekey + targkey
def _get_altaz(times, observer, targets, force_zero_pressure=False):
"""
Calculate alt/az for ``target`` at times linearly spaced between
the two times in ``time_range`` with grid spacing ``time_resolution``
for ``observer``.
Cache the result on the ``observer`` object.
Parameters
----------
times : `~astropy.time.Time`
Array of times on which to test the constraint.
targets : {list, `~astropy.coordinates.SkyCoord`, `~astroplan.FixedTarget`}
Target or list of targets.
observer : `~astroplan.Observer`
The observer who has constraints ``constraints``.
force_zero_pressure : bool
Forcefully use 0 pressure.
Returns
-------
altaz_dict : dict
Dictionary containing two key-value pairs. (1) 'times' contains the
times for the alt/az computations, (2) 'altaz' contains the
corresponding alt/az coordinates at those times.
"""
if not hasattr(observer, '_altaz_cache'):
observer._altaz_cache = {}
# convert times, targets to tuple for hashing
aakey = _make_cache_key(times, targets)
if aakey not in observer._altaz_cache:
try:
if force_zero_pressure:
observer_old_pressure = observer.pressure
observer.pressure = 0
altaz = observer.altaz(times, targets, grid_times_targets=False)
observer._altaz_cache[aakey] = dict(times=times,
altaz=altaz)
finally:
if force_zero_pressure:
observer.pressure = observer_old_pressure
return observer._altaz_cache[aakey]
def _get_moon_data(times, observer, force_zero_pressure=False):
"""
Calculate moon altitude az and illumination for an array of times for
``observer``.
Cache the result on the ``observer`` object.
Parameters
----------
times : `~astropy.time.Time`
Array of times on which to test the constraint.
observer : `~astroplan.Observer`
The observer who has constraints ``constraints``.
force_zero_pressure : bool
Forcefully use 0 pressure.
Returns
-------
moon_dict : dict
Dictionary containing three key-value pairs. (1) 'times' contains the
times for the computations, (2) 'altaz' contains the
corresponding alt/az coordinates at those times and (3) contains
the moon illumination for those times.
"""
if not hasattr(observer, '_moon_cache'):
observer._moon_cache = {}
# convert times to tuple for hashing
aakey = _make_cache_key(times, 'moon')
if aakey not in observer._moon_cache:
try:
if force_zero_pressure:
observer_old_pressure = observer.pressure
observer.pressure = 0
altaz = observer.moon_altaz(times)
illumination = np.array(moon_illumination(times))
observer._moon_cache[aakey] = dict(times=times,
illum=illumination,
altaz=altaz)
finally:
if force_zero_pressure:
observer.pressure = observer_old_pressure
return observer._moon_cache[aakey]
def _get_meridian_transit_times(times, observer, targets):
"""
Calculate next meridian transit for an array of times for ``targets`` and
``observer``.
Cache the result on the ``observer`` object.
Parameters
----------
times : `~astropy.time.Time`
Array of times on which to test the constraint
observer : `~astroplan.Observer`
The observer who has constraints ``constraints``
targets : {list, `~astropy.coordinates.SkyCoord`, `~astroplan.FixedTarget`}
Target or list of targets
Returns
-------
time_dict : dict
Dictionary containing a key-value pair. 'times' contains the
meridian_transit times.
"""
if not hasattr(observer, '_meridian_transit_cache'):
observer._meridian_transit_cache = {}
# convert times to tuple for hashing
aakey = _make_cache_key(times, targets)
if aakey not in observer._meridian_transit_cache:
meridian_transit_times = observer.target_meridian_transit_time(times, targets)
observer._meridian_transit_cache[aakey] = dict(times=meridian_transit_times)
return observer._meridian_transit_cache[aakey]
def compute_constraint(self, times, observer, targets):
solar_altitude = self._get_solar_altitudes(times, observer, targets)
mask = solar_altitude <= self.max_solar_altitude
return mask
class GalacticLatitudeConstraint(Constraint):
"""
Constrain the distance between the Galactic plane and some targets.
"""
def __init__(self, min=None, max=None):
"""
Parameters
----------
min : `~astropy.units.Quantity` or `None` (optional)
Minimum acceptable Galactic latitude of target (inclusive).
`None` indicates no limit.
max : `~astropy.units.Quantity` or `None` (optional)
Minimum acceptable Galactic latitude of target (inclusive).
`None` indicates no limit.
"""
self.min = min
self.max = max
class SunSeparationConstraint(Constraint):
"""
Constrain the distance between the Sun and some targets.
"""
def __init__(self, min=None, max=None):
"""
Parameters
----------
min : `~astropy.units.Quantity` or `None` (optional)
Minimum acceptable separation between Sun and target (inclusive).
`None` indicates no limit.
max : `~astropy.units.Quantity` or `None` (optional)
Maximum acceptable separation between Sun and target (inclusive).
`None` indicates no limit.
"""
self.min = min
self.max = max
class MoonSeparationConstraint(Constraint):
"""
Constrain the distance between the Earth's moon and some targets.
"""
def __init__(self, min=None, max=None, ephemeris=None):
"""
Parameters
----------
min : `~astropy.units.Quantity` or `None` (optional)
Minimum acceptable separation between moon and target (inclusive).
`None` indicates no limit.
max : `~astropy.units.Quantity` or `None` (optional)
Maximum acceptable separation between moon and target (inclusive).
`None` indicates no limit.
ephemeris : str, optional
Ephemeris to use. If not given, use the one set with
``astropy.coordinates.solar_system_ephemeris.set`` (which is
set to 'builtin' by default).
"""
self.min = min
self.max = max
self.ephemeris = ephemeris
class MoonIlluminationConstraint(Constraint):
"""
Constrain the fractional illumination of the Earth's moon.
Constraint is also satisfied if the Moon has set.
"""
def __init__(self, min=None, max=None, ephemeris=None):
"""
Parameters
----------
min : float or `None` (optional)
Minimum acceptable fractional illumination (inclusive). `None`
indicates no limit.
max : float or `None` (optional)
Maximum acceptable fractional illumination (inclusive). `None`
indicates no limit.
ephemeris : str, optional
Ephemeris to use. If not given, use the one set with
`~astropy.coordinates.solar_system_ephemeris` (which is
set to 'builtin' by default).
"""
self.min = min
self.max = max
self.ephemeris = ephemeris
def compute_constraint(self, times, observer, targets):
# first is the moon up?
cached_moon = _get_moon_data(times, observer)
moon_alt = cached_moon['altaz'].alt
moon_down_mask = moon_alt < 0
moon_up_mask = moon_alt >= 0
illumination = cached_moon['illum']
if self.min is None and self.max is not None:
mask = (self.max >= illumination) | moon_down_mask
elif self.max is None and self.min is not None:
mask = (self.min <= illumination) & moon_up_mask
elif self.min is not None and self.max is not None:
mask = ((self.min <= illumination) &
(illumination <= self.max)) & moon_up_mask
else:
raise ValueError("No max and/or min specified in "
"MoonSeparationConstraint.")
return mask
class LocalTimeConstraint(Constraint):
"""
Constrain the observable hours.
"""
def __init__(self, min=None, max=None):
"""
Parameters
----------
min : `~datetime.time`
Earliest local time (inclusive). `None` indicates no limit.
max : `~datetime.time`
Latest local time (inclusive). `None` indicates no limit.
Examples
--------
Constrain the observations to targets that are observable between
23:50 and 04:08 local time:
>>> from astroplan import Observer
>>> from astroplan.constraints import LocalTimeConstraint
>>> import datetime as dt
>>> subaru = Observer.at_site("Subaru", timezone="US/Hawaii")
>>> # bound times between 23:50 and 04:08 local Hawaiian time
>>> constraint = LocalTimeConstraint(min=dt.time(23,50), max=dt.time(4,8))
"""
self.min = min
self.max = max
if self.min is None and self.max is None:
raise ValueError("You must at least supply either a minimum or a maximum time.")
if self.min is not None:
if not isinstance(self.min, datetime.time):
raise TypeError("Time limits must be specified as datetime.time objects.")
if self.max is not None:
if not isinstance(self.max, datetime.time):
raise TypeError("Time limits must be specified as datetime.time objects.")
class TimeConstraint(Constraint):
"""Constrain the observing time to be within certain time limits.
An example use case for this class would be to associate an acceptable
time range with a specific observing block. This can be useful if not
all observing blocks are valid over the time limits used in calls
to `is_observable` or `is_always_observable`.
"""
def __init__(self, min=None, max=None):
"""
Parameters
----------
min : `~astropy.time.Time`
Earliest time (inclusive). `None` indicates no limit.
max : `~astropy.time.Time`
Latest time (inclusive). `None` indicates no limit.
Examples
--------
Constrain the observations to targets that are observable between
2016-03-28 and 2016-03-30:
>>> from astroplan import Observer
>>> from astropy.time import Time
>>> subaru = Observer.at_site("Subaru")
>>> t1 = Time("2016-03-28T12:00:00")
>>> t2 = Time("2016-03-30T12:00:00")
>>> constraint = TimeConstraint(t1,t2)
"""
self.min = min
self.max = max
if self.min is None and self.max is None:
raise ValueError("You must at least supply either a minimum or a "
"maximum time.")
if self.min is not None:
if not isinstance(self.min, Time):
raise TypeError("Time limits must be specified as "
"astropy.time.Time objects.")
if self.max is not None:
if not isinstance(self.max, Time):
raise TypeError("Time limits must be specified as "
"astropy.time.Time objects.")
class PrimaryEclipseConstraint(Constraint):
"""
Constrain observations to times during primary eclipse.
"""
def __init__(self, eclipsing_system):
"""
Parameters
----------
eclipsing_system : `~astroplan.periodic.EclipsingSystem`
System which must be in primary eclipse.
"""
self.eclipsing_system = eclipsing_system
class SecondaryEclipseConstraint(Constraint):
"""
Constrain observations to times during secondary eclipse.
"""
def __init__(self, eclipsing_system):
"""
Parameters
----------
eclipsing_system : `~astroplan.periodic.EclipsingSystem`
System which must be in secondary eclipse.
"""
self.eclipsing_system = eclipsing_system
class PhaseConstraint(Constraint):
"""
Constrain observations to times in some range of phases for a periodic event
(e.g.~transiting exoplanets, eclipsing binaries).
"""
def __init__(self, periodic_event, min=None, max=None):
"""
Parameters
----------
periodic_event : `~astroplan.periodic.PeriodicEvent` or subclass
System on which to compute the phase. For example, the system
could be an eclipsing or non-eclipsing binary, or exoplanet system.
min : float (optional)
Minimum phase (inclusive) on interval [0, 1). Default is zero.
max : float (optional)
Maximum phase (inclusive) on interval [0, 1). Default is one.
Examples
--------
To constrain observations on orbital phases between 0.4 and 0.6,
>>> from astroplan import PeriodicEvent
>>> from astropy.time import Time
>>> import astropy.units as u
>>> binary = PeriodicEvent(epoch=Time('2017-01-01 02:00'), period=1*u.day)
>>> constraint = PhaseConstraint(binary, min=0.4, max=0.6)
The minimum and maximum phase must be described on the interval [0, 1).
To constrain observations on orbital phases between 0.6 and 1.2, for
example, you should subtract one from the second number:
>>> constraint = PhaseConstraint(binary, min=0.6, max=0.2)
"""
self.periodic_event = periodic_event
if (min < 0) or (min > 1) or (max < 0) or (max > 1):
raise ValueError('The minimum of the PhaseConstraint must be within'
' the interval [0, 1).')
self.min = min if min is not None else 0.0
self.max = max if max is not None else 1.0
def is_always_observable(constraints, observer, targets, times=None,
time_range=None, time_grid_resolution=0.5*u.hour):
"""
A function to determine whether ``targets`` are always observable throughout
``time_range`` given constraints in the ``constraints_list`` for a
particular ``observer``.
Parameters
----------
constraints : list or `~astroplan.constraints.Constraint`
Observational constraint(s)
observer : `~astroplan.Observer`
The observer who has constraints ``constraints``
targets : {list, `~astropy.coordinates.SkyCoord`, `~astroplan.FixedTarget`}
Target or list of targets
times : `~astropy.time.Time` (optional)
Array of times on which to test the constraint
time_range : `~astropy.time.Time` (optional)
Lower and upper bounds on time sequence, with spacing
``time_resolution``. This will be passed as the first argument into
`~astroplan.time_grid_from_range`.
time_grid_resolution : `~astropy.units.Quantity` (optional)
If ``time_range`` is specified, determine whether constraints are met
between test times in ``time_range`` by checking constraint at
linearly-spaced times separated by ``time_resolution``. Default is 0.5
hours.
Returns
-------
ever_observable : list
List of booleans of same length as ``targets`` for whether or not each
target is observable in the time range given the constraints.
"""
if not hasattr(constraints, '__len__'):
constraints = [constraints]
applied_constraints = [constraint(observer, targets, times=times,
time_range=time_range,
time_grid_resolution=time_grid_resolution,
grid_times_targets=True)
for constraint in constraints]
constraint_arr = np.logical_and.reduce(applied_constraints)
return np.all(constraint_arr, axis=1)
def is_observable(constraints, observer, targets, times=None,
time_range=None, time_grid_resolution=0.5*u.hour):
"""
Determines if the ``targets`` are observable during ``time_range`` given
constraints in ``constraints_list`` for a particular ``observer``.
Parameters
----------
constraints : list or `~astroplan.constraints.Constraint`
Observational constraint(s)
observer : `~astroplan.Observer`
The observer who has constraints ``constraints``
targets : {list, `~astropy.coordinates.SkyCoord`, `~astroplan.FixedTarget`}
Target or list of targets
times : `~astropy.time.Time` (optional)
Array of times on which to test the constraint
time_range : `~astropy.time.Time` (optional)
Lower and upper bounds on time sequence, with spacing
``time_resolution``. This will be passed as the first argument into
`~astroplan.time_grid_from_range`.
time_grid_resolution : `~astropy.units.Quantity` (optional)
If ``time_range`` is specified, determine whether constraints are met
between test times in ``time_range`` by checking constraint at
linearly-spaced times separated by ``time_resolution``. Default is 0.5
hours.
Returns
-------
ever_observable : list
List of booleans of same length as ``targets`` for whether or not each
target is ever observable in the time range given the constraints.
"""
if not hasattr(constraints, '__len__'):
constraints = [constraints]
applied_constraints = [constraint(observer, targets, times=times,
time_range=time_range,
time_grid_resolution=time_grid_resolution,
grid_times_targets=True)
for constraint in constraints]
constraint_arr = np.logical_and.reduce(applied_constraints)
return np.any(constraint_arr, axis=1)
def is_event_observable(constraints, observer, target, times=None,
times_ingress_egress=None):
"""
Determines if the ``target`` is observable at each time in ``times``, given
constraints in ``constraints`` for a particular ``observer``.
Parameters
----------
constraints : list or `~astroplan.constraints.Constraint`
Observational constraint(s)
observer : `~astroplan.Observer`
The observer who has constraints ``constraints``
target : {list, `~astropy.coordinates.SkyCoord`, `~astroplan.FixedTarget`}
Target
times : `~astropy.time.Time` (optional)
Array of mid-event times on which to test the constraints
times_ingress_egress : `~astropy.time.Time` (optional)
Array of ingress and egress times for ``N`` events, with shape
(``N``, 2).
Returns
-------
event_observable : `~numpy.ndarray`
Array of booleans of same length as ``times`` for whether or not the
target is ever observable at each time, given the constraints.
"""
if not hasattr(constraints, '__len__'):
constraints = [constraints]
if times is not None:
applied_constraints = [constraint(observer, target, times=times,
grid_times_targets=True)
for constraint in constraints]
constraint_arr = np.logical_and.reduce(applied_constraints)
else:
times_ing = times_ingress_egress[:, 0]
times_egr = times_ingress_egress[:, 1]
applied_constraints_ing = [constraint(observer, target, times=times_ing,
grid_times_targets=True)
for constraint in constraints]
applied_constraints_egr = [constraint(observer, target, times=times_egr,
grid_times_targets=True)
for constraint in constraints]
constraint_arr = np.logical_and(np.logical_and.reduce(applied_constraints_ing),
np.logical_and.reduce(applied_constraints_egr))
return constraint_arr
def months_observable(constraints, observer, targets,
time_range=_current_year_time_range,
time_grid_resolution=0.5*u.hour):
"""
Determines which month the specified ``targets`` are observable for a
specific ``observer``, given the supplied ``constraints``.
Parameters
----------
constraints : list or `~astroplan.constraints.Constraint`
Observational constraint(s)
observer : `~astroplan.Observer`
The observer who has constraints ``constraints``
targets : {list, `~astropy.coordinates.SkyCoord`, `~astroplan.FixedTarget`}
Target or list of targets
time_range : `~astropy.time.Time` (optional)
Lower and upper bounds on time sequence
If ``time_range`` is not specified, defaults to current year (localtime)
time_grid_resolution : `~astropy.units.Quantity` (optional)
If ``time_range`` is specified, determine whether constraints are met
between test times in ``time_range`` by checking constraint at
linearly-spaced times separated by ``time_resolution``. Default is 0.5
hours.
Returns
-------
observable_months : list
List of sets of unique integers representing each month that a target is
observable, one set per target. These integers are 1-based so that
January maps to 1, February maps to 2, etc.
"""
# TODO: This method could be sped up a lot by dropping to the trigonometric
# altitude calculations.
if not hasattr(constraints, '__len__'):
constraints = [constraints]
times = time_grid_from_range(time_range, time_grid_resolution)
# TODO: This method could be sped up a lot by dropping to the trigonometric
# altitude calculations.
applied_constraints = [constraint(observer, targets,
times=times,
grid_times_targets=True)
for constraint in constraints]
constraint_arr = np.logical_and.reduce(applied_constraints)
months_observable = []
for target, observable in zip(targets, constraint_arr):
s = set([t.datetime.month for t in times[observable]])
months_observable.append(s)
return months_observable
def observability_table(constraints, observer, targets, times=None,
time_range=None, time_grid_resolution=0.5*u.hour):
"""
Creates a table with information about observability for all the ``targets``
over the requested ``time_range``, given the constraints in
``constraints_list`` for ``observer``.
Parameters
----------
constraints : list or `~astroplan.constraints.Constraint`
Observational constraint(s)
observer : `~astroplan.Observer`
The observer who has constraints ``constraints``
targets : {list, `~astropy.coordinates.SkyCoord`, `~astroplan.FixedTarget`}
Target or list of targets
times : `~astropy.time.Time` (optional)
Array of times on which to test the constraint
time_range : `~astropy.time.Time` (optional)
Lower and upper bounds on time sequence, with spacing
``time_resolution``. This will be passed as the first argument into
`~astroplan.time_grid_from_range`. If a single (scalar) time, the table
will be for a 24 hour period centered on that time.
time_grid_resolution : `~astropy.units.Quantity` (optional)
If ``time_range`` is specified, determine whether constraints are met
between test times in ``time_range`` by checking constraint at
linearly-spaced times separated by ``time_resolution``. Default is 0.5
hours.
Returns
-------
observability_table : `~astropy.table.Table`
A Table containing the observability information for each of the
``targets``. The table contains four columns with information about the
target and it's observability: ``'target name'``, ``'ever observable'``,
``'always observable'``, and ``'fraction of time observable'``. The
column ``'time observable'`` will also be present if the ``time_range``
is given as a scalar. It also contains metadata entries ``'times'``
(with an array of all the times), ``'observer'`` (the
`~astroplan.Observer` object), and ``'constraints'`` (containing the
supplied ``constraints``).
"""
if not hasattr(constraints, '__len__'):
constraints = [constraints]
is_24hr_table = False
if hasattr(time_range, 'isscalar') and time_range.isscalar:
time_range = (time_range-12*u.hour, time_range+12*u.hour)
is_24hr_table = True
applied_constraints = [constraint(observer, targets, times=times,
time_range=time_range,
time_grid_resolution=time_grid_resolution,
grid_times_targets=True)
for constraint in constraints]
constraint_arr = np.logical_and.reduce(applied_constraints)
colnames = ['target name', 'ever observable', 'always observable',
'fraction of time observable']
target_names = [target.name for target in targets]
ever_obs = np.any(constraint_arr, axis=1)
always_obs = np.all(constraint_arr, axis=1)
frac_obs = np.sum(constraint_arr, axis=1) / constraint_arr.shape[1]
tab = table.Table(names=colnames, data=[target_names, ever_obs, always_obs,
frac_obs])
if times is None and time_range is not None:
times = time_grid_from_range(time_range,
time_resolution=time_grid_resolution)
if is_24hr_table:
tab['time observable'] = tab['fraction of time observable'] * 24*u.hour
tab.meta['times'] = times.datetime
tab.meta['observer'] = observer
tab.meta['constraints'] = constraints
return tab
def min_best_rescale(vals, min_val, max_val, less_than_min=1):
"""
rescales an input array ``vals`` to be a score (between zero and one),
where the ``min_val`` goes to one, and the ``max_val`` goes to zero.
Parameters
----------
vals : array-like
the values that need to be rescaled to be between 0 and 1
min_val : float
worst acceptable value (rescales to 0)
max_val : float
best value cared about (rescales to 1)
less_than_min : 0 or 1
what is returned for ``vals`` below ``min_val``. (in some cases
anything less than ``min_val`` should also return one,
in some cases it should return zero)
Returns
-------
array of floats between 0 and 1 inclusive rescaled so that
``vals`` equal to ``max_val`` equal 0 and those equal to
``min_val`` equal 1
Examples
--------
rescale airmasses to between 0 and 1, with the best (1)
and worst (2.25). All values outside the range should
return 0.
>>> from astroplan.constraints import min_best_rescale
>>> import numpy as np
>>> airmasses = np.array([1, 1.5, 2, 3, 0])
>>> min_best_rescale(airmasses, 1, 2.25, less_than_min = 0) # doctest: +FLOAT_CMP
array([ 1. , 0.6, 0.2, 0. , 0. ])
"""
rescaled = (vals - max_val) / (min_val - max_val)
below = vals < min_val
above = vals > max_val
rescaled[below] = less_than_min
rescaled[above] = 0
return rescaled
def max_best_rescale(vals, min_val, max_val, greater_than_max=1):
"""
rescales an input array ``vals`` to be a score (between zero and one),
where the ``max_val`` goes to one, and the ``min_val`` goes to zero.
Parameters
----------
vals : array-like
the values that need to be rescaled to be between 0 and 1
min_val : float
worst acceptable value (rescales to 0)
max_val : float
best value cared about (rescales to 1)
greater_than_max : 0 or 1
what is returned for ``vals`` above ``max_val``. (in some cases
anything higher than ``max_val`` should also return one,
in some cases it should return zero)
Returns
-------
array of floats between 0 and 1 inclusive rescaled so that
``vals`` equal to ``min_val`` equal 0 and those equal to
``max_val`` equal 1
Examples
--------
rescale an array of altitudes to be between 0 and 1,
with the best (60) going to 1 and worst (35) going to
0. For values outside the range, the rescale should
return 0 below 35 and 1 above 60.
>>> from astroplan.constraints import max_best_rescale
>>> import numpy as np
>>> altitudes = np.array([20, 30, 40, 45, 55, 70])
>>> max_best_rescale(altitudes, 35, 60) # doctest: +FLOAT_CMP
array([ 0. , 0. , 0.2, 0.4, 0.8, 1. ])
"""
rescaled = (vals - min_val) / (max_val - min_val)
below = vals < min_val
above = vals > max_val
rescaled[below] = 0
rescaled[above] = greater_than_max
return rescaled
| 37.353163 | 100 | 0.617579 |
078516b1582562801fbf63851c5ac10efbd5d833
| 6,191 |
py
|
Python
|
backend/views.py
|
Raulios/django-blog
|
ff25c8f21a3f6644e77a2ef5bb7bf7026770e0c2
|
[
"MIT"
] | null | null | null |
backend/views.py
|
Raulios/django-blog
|
ff25c8f21a3f6644e77a2ef5bb7bf7026770e0c2
|
[
"MIT"
] | null | null | null |
backend/views.py
|
Raulios/django-blog
|
ff25c8f21a3f6644e77a2ef5bb7bf7026770e0c2
|
[
"MIT"
] | null | null | null |
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from django.core.paginator import Paginator, PageNotAnInteger, EmptyPage
from django.core.urlresolvers import reverse
from django.shortcuts import render
from django.http import HttpResponseRedirect
from core.models import Post, Category, Tag
from backend.forms import PostForm, CategoryForm, TagForm
# Create your views here.
| 25.166667 | 75 | 0.663059 |
0785423db820435be27b39e1842db52b66a25a8e
| 2,953 |
py
|
Python
|
tiktorch/server/session/process.py
|
FynnBe/tiktorch
|
60c6fa9700e7ff73e44338e8755c56c6e8846f2f
|
[
"MIT"
] | null | null | null |
tiktorch/server/session/process.py
|
FynnBe/tiktorch
|
60c6fa9700e7ff73e44338e8755c56c6e8846f2f
|
[
"MIT"
] | null | null | null |
tiktorch/server/session/process.py
|
FynnBe/tiktorch
|
60c6fa9700e7ff73e44338e8755c56c6e8846f2f
|
[
"MIT"
] | null | null | null |
import dataclasses
import io
import multiprocessing as _mp
import uuid
import zipfile
from concurrent.futures import Future
from multiprocessing.connection import Connection
from typing import List, Optional, Tuple
import numpy
from tiktorch import log
from tiktorch.rpc import Shutdown
from tiktorch.rpc import mp as _mp_rpc
from tiktorch.rpc.mp import MPServer
from tiktorch.server.reader import eval_model_zip
from .backend import base
from .rpc_interface import IRPCModelSession
def _run_model_session_process(
conn: Connection, model_zip: bytes, devices: List[str], log_queue: Optional[_mp.Queue] = None
):
try:
# from: https://github.com/pytorch/pytorch/issues/973#issuecomment-346405667
import resource
rlimit = resource.getrlimit(resource.RLIMIT_NOFILE)
resource.setrlimit(resource.RLIMIT_NOFILE, (4096, rlimit[1]))
except ModuleNotFoundError:
pass # probably running on windows
if log_queue:
log.configure(log_queue)
session_proc = ModelSessionProcess(model_zip, devices)
srv = MPServer(session_proc, conn)
srv.listen()
def start_model_session_process(
model_zip: bytes, devices: List[str], log_queue: Optional[_mp.Queue] = None
) -> Tuple[_mp.Process, IRPCModelSession]:
client_conn, server_conn = _mp.Pipe()
proc = _mp.Process(
target=_run_model_session_process,
name="ModelSessionProcess",
kwargs={"conn": server_conn, "devices": devices, "log_queue": log_queue, "model_zip": model_zip},
)
proc.start()
return proc, _mp_rpc.create_client(IRPCModelSession, client_conn)
| 30.132653 | 105 | 0.691162 |
078596cc2ee665e19eee2250f95d62feca0bd3b2
| 1,816 |
py
|
Python
|
openpype/modules/ftrack/event_handlers_server/event_del_avalon_id_from_new.py
|
dangerstudios/OpenPype
|
10ddcc4699137888616eec57cd7fac9648189714
|
[
"MIT"
] | null | null | null |
openpype/modules/ftrack/event_handlers_server/event_del_avalon_id_from_new.py
|
dangerstudios/OpenPype
|
10ddcc4699137888616eec57cd7fac9648189714
|
[
"MIT"
] | null | null | null |
openpype/modules/ftrack/event_handlers_server/event_del_avalon_id_from_new.py
|
dangerstudios/OpenPype
|
10ddcc4699137888616eec57cd7fac9648189714
|
[
"MIT"
] | null | null | null |
from openpype.modules.ftrack.lib import BaseEvent
from openpype.modules.ftrack.lib.avalon_sync import CUST_ATTR_ID_KEY
from openpype.modules.ftrack.event_handlers_server.event_sync_to_avalon import (
SyncToAvalonEvent
)
def register(session):
'''Register plugin. Called when used as an plugin.'''
DelAvalonIdFromNew(session).register()
| 33.018182 | 80 | 0.562225 |
0785a9ecd33b25242a059e89ddfae1ca8b3c0298
| 518 |
py
|
Python
|
tests/workflow/test_workflow_ingest_accepted_submission.py
|
elifesciences/elife-bot
|
d3a102c8030e4b7ec83cbd45e5f839dba4f9ffd9
|
[
"MIT"
] | 17 |
2015-02-10T07:10:29.000Z
|
2021-05-14T22:24:45.000Z
|
tests/workflow/test_workflow_ingest_accepted_submission.py
|
elifesciences/elife-bot
|
d3a102c8030e4b7ec83cbd45e5f839dba4f9ffd9
|
[
"MIT"
] | 459 |
2015-03-31T18:24:23.000Z
|
2022-03-30T19:44:40.000Z
|
tests/workflow/test_workflow_ingest_accepted_submission.py
|
elifesciences/elife-bot
|
d3a102c8030e4b7ec83cbd45e5f839dba4f9ffd9
|
[
"MIT"
] | 9 |
2015-04-18T16:57:31.000Z
|
2020-10-30T11:49:13.000Z
|
import unittest
import tests.settings_mock as settings_mock
from tests.activity.classes_mock import FakeLogger
from workflow.workflow_IngestAcceptedSubmission import workflow_IngestAcceptedSubmission
| 34.533333 | 88 | 0.783784 |
078638d293ec315b963165f9210a9060b0e09180
| 2,436 |
py
|
Python
|
go/token/views.py
|
lynnUg/vumi-go
|
852f906c46d5d26940bd6699f11488b73bbc3742
|
[
"BSD-3-Clause"
] | null | null | null |
go/token/views.py
|
lynnUg/vumi-go
|
852f906c46d5d26940bd6699f11488b73bbc3742
|
[
"BSD-3-Clause"
] | null | null | null |
go/token/views.py
|
lynnUg/vumi-go
|
852f906c46d5d26940bd6699f11488b73bbc3742
|
[
"BSD-3-Clause"
] | null | null | null |
from urllib import urlencode
import urlparse
from django.shortcuts import Http404, redirect
from django.contrib.auth.views import logout
from django.contrib import messages
from django.core.urlresolvers import reverse
from django.contrib.auth.decorators import login_required
from vumi.utils import load_class_by_string
from go.base.utils import vumi_api
| 35.304348 | 79 | 0.70197 |
078810f30530e12e24a60251c7822cc072db8c3d
| 1,142 |
py
|
Python
|
typogrify/templatetags/typogrify_tags.py
|
tylerbutler/typogrify
|
7b7a67348a2d51400fd38c0b61e30e34ca98994e
|
[
"BSD-3-Clause"
] | null | null | null |
typogrify/templatetags/typogrify_tags.py
|
tylerbutler/typogrify
|
7b7a67348a2d51400fd38c0b61e30e34ca98994e
|
[
"BSD-3-Clause"
] | null | null | null |
typogrify/templatetags/typogrify_tags.py
|
tylerbutler/typogrify
|
7b7a67348a2d51400fd38c0b61e30e34ca98994e
|
[
"BSD-3-Clause"
] | null | null | null |
from typogrify.filters import amp, caps, initial_quotes, smartypants, titlecase, typogrify, widont, TypogrifyError
from functools import wraps
from django.conf import settings
from django import template
from django.utils.safestring import mark_safe
from django.utils.encoding import force_unicode
register = template.Library()
def make_safe(f):
"""
A function wrapper to make typogrify play nice with django's
unicode support.
"""
wrapper.is_safe = True
return wrapper
register.filter('amp', make_safe(amp))
register.filter('caps', make_safe(caps))
register.filter('initial_quotes', make_safe(initial_quotes))
register.filter('smartypants', make_safe(smartypants))
register.filter('titlecase', make_safe(titlecase))
register.filter('typogrify', make_safe(typogrify))
register.filter('widont', make_safe(widont))
| 27.853659 | 114 | 0.69965 |
0789092717eaaad8fa74b92820df7d2a61d9ba9b
| 18,065 |
py
|
Python
|
bvbabel/vmr.py
|
carbrock/bvbabel
|
baac12d106455e34d9924309eadb4df991d3d8c9
|
[
"MIT"
] | 7 |
2021-08-02T09:58:08.000Z
|
2022-03-17T21:13:34.000Z
|
bvbabel/vmr.py
|
carbrock/bvbabel
|
baac12d106455e34d9924309eadb4df991d3d8c9
|
[
"MIT"
] | 2 |
2021-08-09T14:57:38.000Z
|
2022-03-28T13:25:19.000Z
|
bvbabel/vmr.py
|
carbrock/bvbabel
|
baac12d106455e34d9924309eadb4df991d3d8c9
|
[
"MIT"
] | 4 |
2021-08-09T07:45:59.000Z
|
2022-03-22T23:02:15.000Z
|
"""Read, write, create Brainvoyager VMR file format."""
import struct
import numpy as np
from bvbabel.utils import (read_variable_length_string,
write_variable_length_string)
# =============================================================================
def read_vmr(filename):
"""Read Brainvoyager VMR file.
Parameters
----------
filename : string
Path to file.
Returns
-------
header : dictionary
Pre-data and post-data headers.
data : 3D numpy.array
Image data.
"""
header = dict()
with open(filename, 'rb') as f:
# ---------------------------------------------------------------------
# VMR Pre-Data Header
# ---------------------------------------------------------------------
# NOTE(Developer Guide 2.6): VMR files contain anatomical 3D data sets,
# typically containing the whole brain (head) of subjects. The
# intensity values are stored as a series of bytes. See the V16 format
# for a version storing each intensity value with two bytes (short
# integers). The VMR format contains a small header followed by the
# actual data followed by a second, more extensive, header. The current
# version of VMR files is "4", which is only slightly different from
# version 3 (as indicated below). Version 3 added offset values to
# format 2 in order to represent large data sets efficiently, e.g. in
# the context of advanced segmentation processing. Compared to the
# original file version "1", file versions 2 and higher contain
# additional header information after the actual data ("post-data
# header"). This allows to read VMR data sets with minimal header
# checking if the extended information is not needed. The information
# in the post-data header contains position information (if available)
# and stores a series of spatial transformations, which might have been
# performed to the original data set ("history record"). The
# post-header data can be probably ignored for custom routines, but is
# important in BrainVoyager QX for spatial transformation and
# coregistration routines as well as for proper visualization.
# Expected binary data: unsigned short int (2 bytes)
data, = struct.unpack('<H', f.read(2))
header["File version"] = data
data, = struct.unpack('<H', f.read(2))
header["DimX"] = data
data, = struct.unpack('<H', f.read(2))
header["DimY"] = data
data, = struct.unpack('<H', f.read(2))
header["DimZ"] = data
# ---------------------------------------------------------------------
# VMR Data
# ---------------------------------------------------------------------
# NOTE(Developer Guide 2.6): Each data element (intensity value) is
# represented in 1 byte. The data is organized in three loops:
# DimZ
# DimY
# DimX
#
# The axes terminology follows the internal BrainVoyager (BV) format.
# The mapping to Talairach axes is as follows:
# BV (X front -> back) [axis 2 after np.reshape] = Y in Tal space
# BV (Y top -> bottom) [axis 1 after np.reshape] = Z in Tal space
# BV (Z left -> right) [axis 0 after np.reshape] = X in Tal space
# Expected binary data: unsigned char (1 byte)
data_img = np.zeros((header["DimZ"] * header["DimY"] * header["DimX"]),
dtype="<B")
for i in range(data_img.size):
data_img[i], = struct.unpack('<B', f.read(1))
data_img = np.reshape(
data_img, (header["DimZ"], header["DimY"], header["DimX"]))
data_img = np.transpose(data_img, (0, 2, 1)) # BV to Tal
data_img = data_img[::-1, ::-1, ::-1] # Flip BV axes
# ---------------------------------------------------------------------
# VMR Post-Data Header
# ---------------------------------------------------------------------
# NOTE(Developer Guide 2.6): The first four entries of the post-data
# header are new since file version "3" and contain offset values for
# each dimension as well as a value indicating the size of a cube with
# iso-dimensions to which the data set will be internally "expanded"
# for certain operations. The axes labels are in terms of
# BrainVoyager's internal format. These four entries are followed by
# scan position information from the original file headers, e.g. from
# DICOM files. The coordinate axes labels in these entries are not in
# terms of BrainVoyager's internal conventions but follow the DICOM
# standard. Then follows eventually a section listing spatial
# transformations which have been eventually performed to create the
# current VMR (e.g. ACPC transformation). Finally, additional
# information further descries the data set, including the assumed
# left-right convention, the reference space (e.g. Talairach after
# normalization) and voxel resolution.
if header["File version"] >= 3:
# NOTE(Developer Guide 2.6): These four entries have been added in
# file version "3" with BrainVoyager QX 1.7. All other entries are
# identical to file version "2".
# Expected binary data: short int (2 bytes)
data, = struct.unpack('<h', f.read(2))
header["OffsetX"] = data
data, = struct.unpack('<h', f.read(2))
header["OffsetY"] = data
data, = struct.unpack('<h', f.read(2))
header["OffsetZ"] = data
data, = struct.unpack('<h', f.read(2))
header["FramingCubeDim"] = data
# Expected binary data: int (4 bytes)
data, = struct.unpack('<i', f.read(4))
header["PosInfosVerified"] = data
data, = struct.unpack('<i', f.read(4))
header["CoordinateSystem"] = data
# Expected binary data: float (4 bytes)
data, = struct.unpack('<f', f.read(4))
header["Slice1CenterX"] = data # First slice center X coordinate
data, = struct.unpack('<f', f.read(4))
header["Slice1CenterY"] = data # First slice center Y coordinate
data, = struct.unpack('<f', f.read(4))
header["Slice1CenterZ"] = data # First slice center Z coordinate
data, = struct.unpack('<f', f.read(4))
header["SliceNCenterX"] = data # Last slice center X coordinate
data, = struct.unpack('<f', f.read(4))
header["SliceNCenterY"] = data # Last slice center Y coordinate
data, = struct.unpack('<f', f.read(4))
header["SliceNCenterZ"] = data # Last slice center Z coordinate
data, = struct.unpack('<f', f.read(4))
header["RowDirX"] = data # Slice row direction vector X component
data, = struct.unpack('<f', f.read(4))
header["RowDirY"] = data # Slice row direction vector Y component
data, = struct.unpack('<f', f.read(4))
header["RowDirZ"] = data # Slice row direction vector Z component
data, = struct.unpack('<f', f.read(4))
header["ColDirX"] = data # Slice column direction vector X component
data, = struct.unpack('<f', f.read(4))
header["ColDirY"] = data # Slice column direction vector Y component
data, = struct.unpack('<f', f.read(4))
header["ColDirZ"] = data # Slice column direction vector Z component
# Expected binary data: int (4 bytes)
data, = struct.unpack('<i', f.read(4))
header["NRows"] = data # Nr of rows of slice image matrix
data, = struct.unpack('<i', f.read(4))
header["NCols"] = data # Nr of columns of slice image matrix
# Expected binary data: float (4 bytes)
data, = struct.unpack('<f', f.read(4))
header["FoVRows"] = data # Field of view extent in row direction [mm]
data, = struct.unpack('<f', f.read(4))
header["FoVCols"] = data # Field of view extent in column dir. [mm]
data, = struct.unpack('<f', f.read(4))
header["SliceThickness"] = data # Slice thickness [mm]
data, = struct.unpack('<f', f.read(4))
header["GapThickness"] = data # Gap thickness [mm]
# Expected binary data: int (4 bytes)
data, = struct.unpack('<i', f.read(4))
header["NrOfPastSpatialTransformations"] = data
if header["NrOfPastSpatialTransformations"] != 0:
# NOTE(Developer Guide 2.6): For each past transformation, the
# information specified in the following table is stored. The
# "type of transformation" is a value determining how many
# subsequent values define the transformation:
# "1": Rigid body+scale (3 translation, 3 rotation, 3 scale)
# "2": Affine transformation (16 values, 4x4 matrix)
# "4": Talairach transformation
# "5": Un-Talairach transformation (1 - 5 -> BV axes)
header["PastTransformation"] = []
for i in range(header["NrOfPastSpatialTransformations"]):
header["PastTransformation"].append(dict())
# Expected binary data: variable-length string
data = read_variable_length_string(f)
header["PastTransformation"][i]["Name"] = data
# Expected binary data: int (4 bytes)
data, = struct.unpack('<i', f.read(4))
header["PastTransformation"][i]["Type"] = data
# Expected binary data: variable-length string
data = read_variable_length_string(f)
header["PastTransformation"][i]["SourceFileName"] = data
# Expected binary data: int (4 bytes)
data, = struct.unpack('<i', f.read(4))
header["PastTransformation"][i]["NrOfValues"] = data
# Store transformation values as a list
trans_values = []
for j in range(header["PastTransformation"][i]["NrOfValues"]):
# Expected binary data: float (4 bytes)
data, = struct.unpack('<f', f.read(4))
trans_values.append(data)
header["PastTransformation"][i]["Values"] = trans_values
# Expected binary data: char (1 byte)
data, = struct.unpack('<B', f.read(1))
header["LeftRightConvention"] = data # modified in v4
data, = struct.unpack('<B', f.read(1))
header["ReferenceSpaceVMR"] = data # new in v4
# Expected binary data: float (4 bytes)
data, = struct.unpack('<f', f.read(4))
header["VoxelSizeX"] = data # Voxel resolution along X axis
data, = struct.unpack('<f', f.read(4))
header["VoxelSizeY"] = data # Voxel resolution along Y axis
data, = struct.unpack('<f', f.read(4))
header["VoxelSizeZ"] = data # Voxel resolution along Z axis
# Expected binary data: char (1 byte)
data, = struct.unpack('<B', f.read(1))
header["VoxelResolutionVerified"] = data
data, = struct.unpack('<B', f.read(1))
header["VoxelResolutionInTALmm"] = data
# Expected binary data: int (4 bytes)
data, = struct.unpack('<i', f.read(4))
header["VMROrigV16MinValue"] = data # 16-bit data min intensity
data, = struct.unpack('<i', f.read(4))
header["VMROrigV16MeanValue"] = data # 16-bit data mean intensity
data, = struct.unpack('<i', f.read(4))
header["VMROrigV16MaxValue"] = data # 16-bit data max intensity
return header, data_img
# =============================================================================
def write_vmr(filename, header, data_img):
"""Protocol to write Brainvoyager VMR file.
Parameters
----------
filename : string
Output filename.
header : dictionary
Header of VMR file.
data_img : numpy.array, 3D
Image.
"""
with open(filename, 'wb') as f:
# ---------------------------------------------------------------------
# VMR Pre-Data Header
# ---------------------------------------------------------------------
# Expected binary data: unsigned short int (2 bytes)
data = header["File version"]
f.write(struct.pack('<H', data))
data = header["DimX"]
f.write(struct.pack('<H', data))
data = header["DimY"]
f.write(struct.pack('<H', data))
data = header["DimZ"]
f.write(struct.pack('<H', data))
# ---------------------------------------------------------------------
# VMR Data
# ---------------------------------------------------------------------
# Convert axes from Nifti standard back to BV standard
data_img = data_img[::-1, ::-1, ::-1] # Flip BV axes
data_img = np.transpose(data_img, (0, 2, 1)) # BV to Tal
# Expected binary data: unsigned char (1 byte)
data_img = data_img.flatten()
for i in range(data_img.size):
f.write(struct.pack('<B', data_img[i]))
# ---------------------------------------------------------------------
# VMR Post-Data Header
# ---------------------------------------------------------------------
if header["File version"] >= 3:
# Expected binary data: short int (2 bytes)
data = header["OffsetX"]
f.write(struct.pack('<h', data))
data = header["OffsetY"]
f.write(struct.pack('<h', data))
data = header["OffsetZ"]
f.write(struct.pack('<h', data))
data = header["FramingCubeDim"]
f.write(struct.pack('<h', data))
# Expected binary data: int (4 bytes)
data = header["PosInfosVerified"]
f.write(struct.pack('<i', data))
data = header["CoordinateSystem"]
f.write(struct.pack('<i', data))
# Expected binary data: float (4 bytes)
data = header["Slice1CenterX"]
f.write(struct.pack('<f', data))
data = header["Slice1CenterY"]
f.write(struct.pack('<f', data))
data = header["Slice1CenterZ"]
f.write(struct.pack('<f', data))
data = header["SliceNCenterX"]
f.write(struct.pack('<f', data))
data = header["SliceNCenterY"]
f.write(struct.pack('<f', data))
data = header["SliceNCenterZ"]
f.write(struct.pack('<f', data))
data = header["RowDirX"]
f.write(struct.pack('<f', data))
data = header["RowDirY"]
f.write(struct.pack('<f', data))
data = header["RowDirZ"]
f.write(struct.pack('<f', data))
data = header["ColDirX"]
f.write(struct.pack('<f', data))
data = header["ColDirY"]
f.write(struct.pack('<f', data))
data = header["ColDirZ"]
f.write(struct.pack('<f', data))
# Expected binary data: int (4 bytes)
data = header["NRows"]
f.write(struct.pack('<i', data))
data = header["NCols"]
f.write(struct.pack('<i', data))
# Expected binary data: float (4 bytes)
data = header["FoVRows"]
f.write(struct.pack('<f', data))
data = header["FoVCols"]
f.write(struct.pack('<f', data))
data = header["SliceThickness"]
f.write(struct.pack('<f', data))
data = header["GapThickness"]
f.write(struct.pack('<f', data))
# Expected binary data: int (4 bytes)
data = header["NrOfPastSpatialTransformations"]
f.write(struct.pack('<i', data))
if header["NrOfPastSpatialTransformations"] != 0:
for i in range(header["NrOfPastSpatialTransformations"]):
# Expected binary data: variable-length string
data = header["PastTransformation"][i]["Name"]
write_variable_length_string(f, data)
# Expected binary data: int (4 bytes)
data = header["PastTransformation"][i]["Type"]
f.write(struct.pack('<i', data))
# Expected binary data: variable-length string
data = header["PastTransformation"][i]["SourceFileName"]
write_variable_length_string(f, data)
# Expected binary data: int (4 bytes)
data = header["PastTransformation"][i]["NrOfValues"]
f.write(struct.pack('<i', data))
# Transformation values are stored as a list
trans_values = header["PastTransformation"][i]["Values"]
for j in range(header["PastTransformation"][i]["NrOfValues"]):
# Expected binary data: float (4 bytes)
f.write(struct.pack('<f', trans_values[j]))
# Expected binary data: char (1 byte)
data = header["LeftRightConvention"]
f.write(struct.pack('<B', data))
data = header["ReferenceSpaceVMR"]
f.write(struct.pack('<B', data))
# Expected binary data: float (4 bytes)
data = header["VoxelSizeX"]
f.write(struct.pack('<f', data))
data = header["VoxelSizeY"]
f.write(struct.pack('<f', data))
data = header["VoxelSizeZ"]
f.write(struct.pack('<f', data))
# Expected binary data: char (1 byte)
data = header["VoxelResolutionVerified"]
f.write(struct.pack('<B', data))
data = header["VoxelResolutionInTALmm"]
f.write(struct.pack('<B', data))
# Expected binary data: int (4 bytes)
data = header["VMROrigV16MinValue"]
f.write(struct.pack('<i', data))
data = header["VMROrigV16MeanValue"]
f.write(struct.pack('<i', data))
data = header["VMROrigV16MaxValue"]
f.write(struct.pack('<i', data))
return print("VMR saved.")
| 45.049875 | 79 | 0.546305 |
0789c9270fff78d0b163f2215a2a6a958e9cdb11
| 2,279 |
py
|
Python
|
example/image-classification/test_score.py
|
Vikas-kum/incubator-mxnet
|
ba02bf2fe2da423caa59ddb3fd5e433b90b730bf
|
[
"Apache-2.0"
] | 399 |
2017-05-30T05:12:48.000Z
|
2022-01-29T05:53:08.000Z
|
example/image-classification/test_score.py
|
Vikas-kum/incubator-mxnet
|
ba02bf2fe2da423caa59ddb3fd5e433b90b730bf
|
[
"Apache-2.0"
] | 187 |
2018-03-16T23:44:43.000Z
|
2021-12-14T21:19:54.000Z
|
example/image-classification/test_score.py
|
Vikas-kum/incubator-mxnet
|
ba02bf2fe2da423caa59ddb3fd5e433b90b730bf
|
[
"Apache-2.0"
] | 107 |
2017-05-30T05:53:22.000Z
|
2021-06-24T02:43:31.000Z
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
test pretrained models
"""
from __future__ import print_function
import mxnet as mx
from common import find_mxnet, modelzoo
from score import score
VAL_DATA='data/val-5k-256.rec'
if __name__ == '__main__':
gpus = mx.test_utils.list_gpus()
assert len(gpus) > 0
batch_size = 16 * len(gpus)
gpus = ','.join([str(i) for i in gpus])
kwargs = {'gpus':gpus, 'batch_size':batch_size, 'max_num_examples':500}
download_data()
test_imagenet1k_resnet(**kwargs)
test_imagenet1k_inception_bn(**kwargs)
| 36.174603 | 78 | 0.662571 |
078a7ff149f5f6902b3df48444c9f900c3b57349
| 139,271 |
py
|
Python
|
verticapy/vcolumn.py
|
vertica/vertica_ml_python
|
9e82dba94afe8447bfa2492f343af6669128e2fb
|
[
"Apache-2.0"
] | 7 |
2018-05-10T08:16:31.000Z
|
2018-05-15T00:59:26.000Z
|
verticapy/vcolumn.py
|
vertica/vertica_ml_python
|
9e82dba94afe8447bfa2492f343af6669128e2fb
|
[
"Apache-2.0"
] | 1 |
2018-05-15T00:15:35.000Z
|
2018-05-15T13:40:19.000Z
|
verticapy/vcolumn.py
|
vertica/vertica_ml_python
|
9e82dba94afe8447bfa2492f343af6669128e2fb
|
[
"Apache-2.0"
] | null | null | null |
# (c) Copyright [2018-2022] Micro Focus or one of its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# You may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# |_ |~) _ _| _ /~\ _ |.
# |_)\/ |_)(_|(_|| \_/|_|(_|||
# /
# ____________ ______
# / __ `\ / /
# | \/ / / /
# |______ / / /
# |____/ / /
# _____________ / /
# \ / / /
# \ / / /
# \_______/ / /
# ______ / /
# \ / / /
# \ / / /
# \/ / /
# / /
# / /
# \ /
# \ /
# \/
# _
# \ / _ __|_. _ _ |_)
# \/ (/_| | |(_(_|| \/
# /
# VerticaPy is a Python library with scikit-like functionality for conducting
# data science projects on data stored in Vertica, taking advantage Verticas
# speed and built-in analytics and machine learning features. It supports the
# entire data science life cycle, uses a pipeline mechanism to sequentialize
# data transformation operations, and offers beautiful graphical options.
#
# VerticaPy aims to do all of the above. The idea is simple: instead of moving
# data around for processing, VerticaPy brings the logic to the data.
#
#
# Modules
#
# Standard Python Modules
import math, re, decimal, warnings, datetime
from collections.abc import Iterable
from typing import Union
# VerticaPy Modules
import verticapy
from verticapy.utilities import *
from verticapy.toolbox import *
from verticapy.errors import *
##
#
# __ __ ______ ______ __ __ __ __ __ __ __
# /\ \ / / /\ ___\ /\ __ \ /\ \ /\ \/\ \ /\ "-./ \ /\ "-.\ \
# \ \ \'/ \ \ \____ \ \ \/\ \ \ \ \____ \ \ \_\ \ \ \ \-./\ \ \ \ \-. \
# \ \__| \ \_____\ \ \_____\ \ \_____\ \ \_____\ \ \_\ \ \_\ \ \_\\"\_\
# \/_/ \/_____/ \/_____/ \/_____/ \/_____/ \/_/ \/_/ \/_/ \/_/
#
#
# ---#
| 34.261009 | 378 | 0.486139 |
078a84b6d5f9b27e924368e0c1490273227caf78
| 6,918 |
py
|
Python
|
booktags/flaskapp/book/views.py
|
MagicSword/Booktags
|
44142e19aec5ce75266233964d7ab21503bbe57c
|
[
"Apache-1.1"
] | null | null | null |
booktags/flaskapp/book/views.py
|
MagicSword/Booktags
|
44142e19aec5ce75266233964d7ab21503bbe57c
|
[
"Apache-1.1"
] | 9 |
2019-12-20T15:24:38.000Z
|
2021-12-13T20:28:48.000Z
|
booktags/flaskapp/book/views.py
|
MagicSword/BookTags
|
44142e19aec5ce75266233964d7ab21503bbe57c
|
[
"Apache-1.1"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
example.py
~~~~~~~~~
A simple command line application to run flask apps.
:copyright: 2019 Miller
:license: BSD-3-Clause
"""
# Known bugs that can't be fixed here:
# - synopsis() cannot be prevented from clobbering existing
# loaded modules.
# - If the __file__ attribute on a module is a relative path and
# the current directory is changed with os.chdir(), an incorrect
# path will be displayed.
from flask import render_template, redirect, request, url_for, flash,jsonify,current_app
from flask_login import login_user, logout_user, login_required, current_user
from . import book
from flask_sqlalchemy import get_debug_queries
from sqlalchemy.sql.expression import cast
from datatables import ColumnDT, DataTables
from .. import auth
from .. import db
from .forms import EditBookForm, HackmdMeta
# from booktags.db.basemodels import Book
from booktags.flaskapp.model.models import BookMain
# --------------------------------------------------------- common routines
# @book.route('/list/', methods=['GET', 'POST'])
# def list_book():
# """
#
# :param field: col name
# :param order: asc or desc
# :return: renew query
# """
# books = BookMain.get_all_book()
# return render_template('book/list_book.html',books=books)
if __name__ == '__main__':
pass
| 30.746667 | 88 | 0.645273 |
078ba56d9b68af88a26ed1e2d4bb4466a1a8bcb9
| 429 |
py
|
Python
|
narwhallet/core/kws/http/enumerations/mediatypes.py
|
Snider/narwhallet
|
0d528763c735f1e68b8264e302854d41e7cf1956
|
[
"MIT"
] | 3 |
2021-12-29T11:25:13.000Z
|
2022-01-16T13:57:17.000Z
|
narwhallet/core/kws/http/enumerations/mediatypes.py
|
Snider/narwhallet
|
0d528763c735f1e68b8264e302854d41e7cf1956
|
[
"MIT"
] | null | null | null |
narwhallet/core/kws/http/enumerations/mediatypes.py
|
Snider/narwhallet
|
0d528763c735f1e68b8264e302854d41e7cf1956
|
[
"MIT"
] | 1 |
2022-01-16T13:57:20.000Z
|
2022-01-16T13:57:20.000Z
|
from enum import Enum
| 23.833333 | 68 | 0.613054 |
078c924330df0f8ffe4dba08335fcd4ca824b7c3
| 125 |
py
|
Python
|
electrondiffraction/__init__.py
|
drix00/ElectronDiffraction
|
9dc258d90d0b73745b904b1bb6e1e3e794403a27
|
[
"Apache-2.0"
] | 1 |
2020-07-23T12:24:09.000Z
|
2020-07-23T12:24:09.000Z
|
python/xraynomenclature/xraynomenclature/__init__.py
|
drix00/xray-nomenclature
|
92d3e15b9ebb505482a084522c6639ba09bbdd02
|
[
"Apache-2.0"
] | null | null | null |
python/xraynomenclature/xraynomenclature/__init__.py
|
drix00/xray-nomenclature
|
92d3e15b9ebb505482a084522c6639ba09bbdd02
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
__author__ = """Hendrix Demers"""
__email__ = '[email protected]'
__version__ = '0.1.0'
| 20.833333 | 43 | 0.648 |
078df566472a507b372dad23be527536aa2fa29f
| 4,082 |
py
|
Python
|
storelet.py
|
markembling/storelet
|
9951368e2f143855d2c14509bdb8cf796d6e54b8
|
[
"BSD-3-Clause"
] | 1 |
2015-09-07T17:19:40.000Z
|
2015-09-07T17:19:40.000Z
|
storelet.py
|
markembling/storelet
|
9951368e2f143855d2c14509bdb8cf796d6e54b8
|
[
"BSD-3-Clause"
] | 1 |
2016-01-05T13:18:16.000Z
|
2016-01-05T14:16:57.000Z
|
storelet.py
|
markembling/storelet
|
9951368e2f143855d2c14509bdb8cf796d6e54b8
|
[
"BSD-3-Clause"
] | 1 |
2019-02-21T09:20:48.000Z
|
2019-02-21T09:20:48.000Z
|
import os
import logging
from tempfile import mkstemp, mkdtemp
from shutil import rmtree
from zipfile import ZipFile, ZIP_DEFLATED
from datetime import datetime
from boto.s3.connection import S3Connection
from boto.s3.key import Key
__version__ = "0.1.8"
__author__ = "Mark Embling"
__email__ = "[email protected]"
logger = logging.getLogger(__name__)
logger.addHandler(logging.NullHandler())
LOGGING_DEFAULTS = {"level": logging.INFO,
"format": "%(asctime)s [%(levelname)s]: %(message)s"}
def setup_logging(**kwargs):
"""Convenience function for setting up some sane logging defaults"""
opts = dict(LOGGING_DEFAULTS.items() + kwargs.items())
logging.basicConfig(**opts)
| 36.123894 | 82 | 0.607055 |
078eac42052a5c2213460643ce82f3d54d3402ee
| 963 |
py
|
Python
|
apps/delivery/migrations/0001_initial.py
|
jimforit/lagou
|
165593a15597012092b5e0ba34158fbc1d1c213d
|
[
"MIT"
] | 2 |
2019-03-11T03:58:19.000Z
|
2020-03-06T06:45:28.000Z
|
apps/delivery/migrations/0001_initial.py
|
jimforit/lagou
|
165593a15597012092b5e0ba34158fbc1d1c213d
|
[
"MIT"
] | 5 |
2020-06-05T20:04:20.000Z
|
2021-09-08T00:53:52.000Z
|
apps/delivery/migrations/0001_initial.py
|
jimforit/lagou
|
165593a15597012092b5e0ba34158fbc1d1c213d
|
[
"MIT"
] | null | null | null |
# Generated by Django 2.0.2 on 2019-03-08 13:03
from django.db import migrations, models
| 33.206897 | 157 | 0.559709 |
078f93641653dd1d0323ab98d96a9fc89761a30c
| 721 |
py
|
Python
|
elementary/date-and-time-convertor.py
|
vargad/exercises
|
1a2fc2557672749d590ebdf596f99f53405320a1
|
[
"MIT"
] | 1 |
2018-02-24T10:51:07.000Z
|
2018-02-24T10:51:07.000Z
|
elementary/date-and-time-convertor.py
|
vargad/exercises
|
1a2fc2557672749d590ebdf596f99f53405320a1
|
[
"MIT"
] | null | null | null |
elementary/date-and-time-convertor.py
|
vargad/exercises
|
1a2fc2557672749d590ebdf596f99f53405320a1
|
[
"MIT"
] | 1 |
2019-02-13T21:41:07.000Z
|
2019-02-13T21:41:07.000Z
|
#!/usr/bin/env python3
if __name__ == '__main__':
print(date_time("01.01.2018 00:00"))
assert date_time("01.01.2018 00:00") == "1 January 2018 year 0 hours 0 minutes"
assert date_time("04.08.1984 08:15") == "4 August 1984 year 8 hours 15 minutes"
assert date_time("17.12.1990 07:42") == "17 December 1990 year 7 hours 42 minutes"
| 51.5 | 153 | 0.62552 |
078fda9e491f07fc6685fbdf4be7377dd5f3e4a3
| 10,361 |
py
|
Python
|
lbry/wallet/server/peer.py
|
snapperVibes/lbry-sdk
|
77a51d1ad43404e5dc52af715a7bebfaeb3fee16
|
[
"MIT"
] | 2 |
2021-04-14T07:37:37.000Z
|
2021-05-18T13:20:11.000Z
|
lbry/wallet/server/peer.py
|
lucianolb76/lbry-sdk
|
0c09f24cbf5bd0959dedca63363ff7ffadd45d66
|
[
"MIT"
] | null | null | null |
lbry/wallet/server/peer.py
|
lucianolb76/lbry-sdk
|
0c09f24cbf5bd0959dedca63363ff7ffadd45d66
|
[
"MIT"
] | null | null | null |
# Copyright (c) 2017, Neil Booth
#
# All rights reserved.
#
# The MIT License (MIT)
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""Representation of a peer server."""
from ipaddress import ip_address
from lbry.wallet.server import util
from lbry.wallet.server.util import cachedproperty
from typing import Dict
def update_features(self, features):
"""Update features in-place."""
try:
tmp = Peer(self.host, features)
except Exception:
pass
else:
self.update_features_from_peer(tmp)
def connection_port_pairs(self):
"""Return a list of (kind, port) pairs to try when making a
connection."""
# Use a list not a set - it's important to try the registered
# ports first.
pairs = [('SSL', self.ssl_port), ('TCP', self.tcp_port)]
while self.other_port_pairs:
pairs.append(self.other_port_pairs.pop())
return [pair for pair in pairs if pair[1]]
def mark_bad(self):
"""Mark as bad to avoid reconnects but also to remember for a
while."""
self.bad = True
def check_ports(self, other):
"""Remember differing ports in case server operator changed them
or removed one."""
if other.ssl_port != self.ssl_port:
self.other_port_pairs.add(('SSL', other.ssl_port))
if other.tcp_port != self.tcp_port:
self.other_port_pairs.add(('TCP', other.tcp_port))
return bool(self.other_port_pairs)
def bucket(self):
if self.is_tor:
return 'onion'
if not self.ip_addr:
return ''
return tuple(self.ip_addr.split('.')[:2])
def serialize(self):
"""Serialize to a dictionary."""
return {attr: getattr(self, attr) for attr in self.ATTRS}
def _port(self, key):
hosts = self.features.get('hosts')
if isinstance(hosts, dict):
host = hosts.get(self.host)
port = self._integer(key, host)
if port and 0 < port < 65536:
return port
return None
def _integer(self, key, d=None):
d = d or self.features
result = d.get(key) if isinstance(d, dict) else None
if isinstance(result, str):
try:
result = int(result)
except ValueError:
pass
return result if isinstance(result, int) else None
def _string(self, key):
result = self.features.get(key)
return result if isinstance(result, str) else None
def _protocol_version_string(self, key):
version_str = self.features.get(key)
ptuple = util.protocol_tuple(version_str)
return util.version_string(ptuple)
def to_tuple(self):
"""The tuple ((ip, host, details) expected in response
to a peers subscription."""
details = self.real_name().split()[1:]
return (self.ip_addr or self.host, self.host, details)
def real_name(self):
"""Real name of this peer as used on IRC."""
parts = [self.host, 'v' + self.protocol_max]
if self.pruning:
parts.append(f'p{self.pruning:d}')
for letter, port in (('s', self.ssl_port), ('t', self.tcp_port)):
if port:
parts.append(port_text(letter, port))
return ' '.join(parts)
| 34.194719 | 78 | 0.599266 |
078fed475eb9a6e7954859be5858da011d4c522e
| 1,787 |
py
|
Python
|
tests/components/deconz/test_diagnostics.py
|
aomann/core
|
5e71e7b775461cd4849c36075c6a1459a7d0ad22
|
[
"Apache-2.0"
] | null | null | null |
tests/components/deconz/test_diagnostics.py
|
aomann/core
|
5e71e7b775461cd4849c36075c6a1459a7d0ad22
|
[
"Apache-2.0"
] | 24 |
2021-11-03T06:20:16.000Z
|
2022-03-31T06:23:17.000Z
|
tests/components/deconz/test_diagnostics.py
|
aomann/core
|
5e71e7b775461cd4849c36075c6a1459a7d0ad22
|
[
"Apache-2.0"
] | null | null | null |
"""Test deCONZ diagnostics."""
from unittest.mock import patch
from pydeconz.websocket import STATE_RUNNING
from homeassistant.const import Platform
from .test_gateway import DECONZ_CONFIG, setup_deconz_integration
from tests.components.diagnostics import get_diagnostics_for_config_entry
| 32.490909 | 73 | 0.582541 |
079020088d5707b9fc6c67fde0c7358e446490f2
| 32,821 |
py
|
Python
|
jax_md/partition.py
|
l1zp/jax-md
|
2440794aebb1c77116459e2ec2051d537a94ecf4
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
jax_md/partition.py
|
l1zp/jax-md
|
2440794aebb1c77116459e2ec2051d537a94ecf4
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
jax_md/partition.py
|
l1zp/jax-md
|
2440794aebb1c77116459e2ec2051d537a94ecf4
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Code to transform functions on individual tuples of particles to sets."""
from absl import logging
from functools import reduce, partial
from collections import namedtuple
from enum import Enum
from typing import Any, Callable, Optional, Dict, Tuple, Generator, Union
import math
from operator import mul
import numpy as onp
from jax import lax
from jax import ops
from jax import jit, vmap, eval_shape
from jax.abstract_arrays import ShapedArray
from jax.interpreters import partial_eval as pe
import jax.numpy as jnp
from jax_md import quantity, space, dataclasses, util
import jraph
# Types
Array = util.Array
f32 = util.f32
f64 = util.f64
i32 = util.i32
i64 = util.i64
Box = space.Box
DisplacementOrMetricFn = space.DisplacementOrMetricFn
MetricFn = space.MetricFn
# Cell List
def _cell_dimensions(spatial_dimension: int,
box_size: Box,
minimum_cell_size: float) -> Tuple[Box, Array, Array, int]:
"""Compute the number of cells-per-side and total number of cells in a box."""
if isinstance(box_size, int) or isinstance(box_size, float):
box_size = float(box_size)
# NOTE(schsam): Should we auto-cast based on box_size? I can't imagine a case
# in which the box_size would not be accurately represented by an f32.
if (isinstance(box_size, onp.ndarray) and
(box_size.dtype == jnp.int32 or box_size.dtype == jnp.int64)):
box_size = float(box_size)
cells_per_side = onp.floor(box_size / minimum_cell_size)
cell_size = box_size / cells_per_side
cells_per_side = onp.array(cells_per_side, dtype=jnp.int64)
if isinstance(box_size, onp.ndarray):
if box_size.ndim == 1 or box_size.ndim == 2:
assert box_size.size == spatial_dimension
flat_cells_per_side = onp.reshape(cells_per_side, (-1,))
for cells in flat_cells_per_side:
if cells < 3:
raise ValueError(
('Box must be at least 3x the size of the grid spacing in each '
'dimension.'))
cell_count = reduce(mul, flat_cells_per_side, 1)
elif box_size.ndim == 0:
cell_count = cells_per_side ** spatial_dimension
else:
raise ValueError('Box must either be a scalar or a vector.')
else:
cell_count = cells_per_side ** spatial_dimension
return box_size, cell_size, cells_per_side, int(cell_count)
def count_cell_filling(R: Array,
box_size: Box,
minimum_cell_size: float) -> Array:
"""Counts the number of particles per-cell in a spatial partition."""
dim = int(R.shape[1])
box_size, cell_size, cells_per_side, cell_count = \
_cell_dimensions(dim, box_size, minimum_cell_size)
hash_multipliers = _compute_hash_constants(dim, cells_per_side)
particle_index = jnp.array(R / cell_size, dtype=jnp.int64)
particle_hash = jnp.sum(particle_index * hash_multipliers, axis=1)
filling = ops.segment_sum(jnp.ones_like(particle_hash),
particle_hash,
cell_count)
return filling
def cell_list(box_size: Box,
minimum_cell_size: float,
cell_capacity_or_example_R: Union[int, Array],
buffer_size_multiplier: float=1.1
) -> Callable[[Array], CellList]:
r"""Returns a function that partitions point data spatially.
Given a set of points {x_i \in R^d} with associated data {k_i \in R^m} it is
often useful to partition the points / data spatially. A simple partitioning
that can be implemented efficiently within XLA is a dense partition into a
uniform grid called a cell list.
Since XLA requires that shapes be statically specified, we allocate fixed
sized buffers for each cell. The size of this buffer can either be specified
manually or it can be estimated automatically from a set of positions. Note,
if the distribution of points changes significantly it is likely the buffer
the buffer sizes will have to be adjusted.
This partitioning will likely form the groundwork for parallelizing
simulations over different accelerators.
Args:
box_size: A float or an ndarray of shape [spatial_dimension] specifying the
size of the system. Note, this code is written for the case where the
boundaries are periodic. If this is not the case, then the current code
will be slightly less efficient.
minimum_cell_size: A float specifying the minimum side length of each cell.
Cells are enlarged so that they exactly fill the box.
cell_capacity_or_example_R: Either an integer specifying the size
number of particles that can be stored in each cell or an ndarray of
positions of shape [particle_count, spatial_dimension] that is used to
estimate the cell_capacity.
buffer_size_multiplier: A floating point multiplier that multiplies the
estimated cell capacity to allow for fluctuations in the maximum cell
occupancy.
Returns:
A function `cell_list_fn(R, **kwargs)` that partitions positions, `R`, and
side data specified by kwargs into a cell list. Returns a CellList
containing the partition.
"""
if util.is_array(box_size):
box_size = onp.array(box_size)
if len(box_size.shape) == 1:
box_size = jnp.reshape(box_size, (1, -1))
if util.is_array(minimum_cell_size):
minimum_cell_size = onp.array(minimum_cell_size)
cell_capacity = cell_capacity_or_example_R
if _is_variable_compatible_with_positions(cell_capacity):
cell_capacity = _estimate_cell_capacity(
cell_capacity, box_size, minimum_cell_size, buffer_size_multiplier)
elif not isinstance(cell_capacity, int):
msg = (
'cell_capacity_or_example_positions must either be an integer '
'specifying the cell capacity or a set of positions that will be used '
'to estimate a cell capacity. Found {}.'.format(type(cell_capacity))
)
raise ValueError(msg)
return build_cells
def _displacement_or_metric_to_metric_sq(
displacement_or_metric: DisplacementOrMetricFn) -> MetricFn:
"""Checks whether or not a displacement or metric was provided."""
for dim in range(1, 4):
try:
R = ShapedArray((dim,), f32)
dR_or_dr = eval_shape(displacement_or_metric, R, R, t=0)
if len(dR_or_dr.shape) == 0:
return lambda Ra, Rb, **kwargs: \
displacement_or_metric(Ra, Rb, **kwargs) ** 2
else:
return lambda Ra, Rb, **kwargs: space.square_distance(
displacement_or_metric(Ra, Rb, **kwargs))
except TypeError:
continue
except ValueError:
continue
raise ValueError(
'Canonicalize displacement not implemented for spatial dimension larger'
'than 4.')
NeighborFn = Callable[[Array, Optional[NeighborList], Optional[int]],
NeighborList]
return NeighborListFns(lambda R, extra_capacity=0, **kwargs:
neighbor_list_fn(R,
extra_capacity=extra_capacity,
**kwargs),
lambda R, nbrs, **kwargs: # pytype: disable=wrong-arg-count
neighbor_list_fn(R, nbrs, **kwargs))
def neighbor_list_mask(neighbor: NeighborList, mask_self: bool=False) -> Array:
"""Compute a mask for neighbor list."""
if is_sparse(neighbor.format):
mask = neighbor.idx[0] < len(neighbor.reference_position)
if mask_self:
mask = mask & (neighbor.idx[0] != neighbor.idx[1])
return mask
mask = neighbor.idx < len(neighbor.idx)
if mask_self:
N = len(neighbor.reference_position)
self_mask = neighbor.idx != jnp.reshape(jnp.arange(N), (N, 1))
mask = mask & self_mask
return mask
def to_jraph(neighbor: NeighborList, mask: Array=None) -> jraph.GraphsTuple:
"""Convert a sparse neighbor list to a `jraph.GraphsTuple`.
As in jraph, padding here is accomplished by adding a ficticious graph with a
single node.
Args:
neighbor: A neighbor list that we will convert to the jraph format. Must be
sparse.
mask: An optional mask on the edges.
Returns:
A `jraph.GraphsTuple` that contains the topology of the neighbor list.
"""
if not is_sparse(neighbor.format):
raise ValueError('Cannot convert a dense neighbor list to jraph format. '
'Please use either NeighborListFormat.Sparse or '
'NeighborListFormat.OrderedSparse.')
receivers, senders = neighbor.idx
N = len(neighbor.reference_position)
_mask = neighbor_list_mask(neighbor)
if mask is not None:
_mask = _mask & mask
cumsum = jnp.cumsum(_mask)
index = jnp.where(_mask, cumsum - 1, len(receivers))
ordered = N * jnp.ones((len(receivers) + 1,), jnp.int32)
receivers = ordered.at[index].set(receivers)[:-1]
senders = ordered.at[index].set(senders)[:-1]
mask = receivers < N
return jraph.GraphsTuple(
nodes=None,
edges=None,
receivers=receivers,
senders=senders,
globals=None,
n_node=jnp.array([N, 1]),
n_edge=jnp.array([jnp.sum(_mask), jnp.sum(~_mask)]),
)
def to_dense(neighbor: NeighborList) -> Array:
"""Converts a sparse neighbor list to dense ids. Cannot be JIT."""
if neighbor.format is not Sparse:
raise ValueError('Can only convert sparse neighbor lists to dense ones.')
receivers, senders = neighbor.idx
mask = neighbor_list_mask(neighbor)
receivers = receivers[mask]
senders = senders[mask]
N = len(neighbor.reference_position)
count = ops.segment_sum(jnp.ones(len(receivers), jnp.int32), receivers, N)
max_count = jnp.max(count)
offset = jnp.tile(jnp.arange(max_count), N)[:len(senders)]
hashes = senders * max_count + offset
dense_idx = N * jnp.ones((N * max_count,), jnp.int32)
dense_idx = dense_idx.at[hashes].set(receivers).reshape((N, max_count))
return dense_idx
Dense = NeighborListFormat.Dense
Sparse = NeighborListFormat.Sparse
OrderedSparse = NeighborListFormat.OrderedSparse
| 38.34229 | 85 | 0.681728 |
0790c876aeebfd734072b2709676f53a6053af06
| 964 |
py
|
Python
|
rhucrl_experiments/evaluate/launch_evaluate_mass.py
|
sebascuri/rhucrl
|
27663e1302f3bbc636dff28495c6f2667bb7c1da
|
[
"MIT"
] | 1 |
2021-11-19T11:46:48.000Z
|
2021-11-19T11:46:48.000Z
|
rhucrl_experiments/evaluate/launch_evaluate_mass.py
|
sebascuri/rhucrl
|
27663e1302f3bbc636dff28495c6f2667bb7c1da
|
[
"MIT"
] | 1 |
2021-11-22T07:48:03.000Z
|
2021-11-22T07:48:03.000Z
|
rhucrl_experiments/evaluate/launch_evaluate_mass.py
|
sebascuri/rhucrl
|
27663e1302f3bbc636dff28495c6f2667bb7c1da
|
[
"MIT"
] | 1 |
2022-03-26T10:18:01.000Z
|
2022-03-26T10:18:01.000Z
|
"""Run from rhucrl_experiments.evaluate folder."""
import socket
from lsf_runner import init_runner, make_commands
from rhucrl_experiments.evaluate.utilities import ENVIRONMENTS
RARL_DIR = "../../runs/RARLAgent"
ZERO_SUM_DIR = "../../runs/ZeroSumAgent"
SCRIPT = "evaluate_mass_change.py"
EXPERIMENTS = {
"supermodularity": {"algorithm": "RARL_MF", "base-dir": RARL_DIR},
"shallow": {"algorithm": "RHUCRL", "base-dir": ZERO_SUM_DIR},
"greedy": {"algorithm": "RHUCRL", "base-dir": ZERO_SUM_DIR},
"lazy": {"algorithm": "HUCRL", "base-dir": RARL_DIR},
}.get(socket.gethostname(), {"algorithm": "RARL", "base-dir": RARL_DIR})
runner = init_runner("EvaluateMassChange.", num_threads=4)
for seed in [0, 1, 2, 3, 4]:
base_args = {"num-runs": 10, "seed": seed}
base_args.update(**EXPERIMENTS)
commands = make_commands(
SCRIPT, base_args=base_args, common_hyper_args={"environment": ENVIRONMENTS}
)
runner.run_batch(commands)
| 37.076923 | 84 | 0.693983 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.