hexsha
stringlengths 40
40
| size
int64 3
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
972
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
972
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
972
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 3
1.03M
| avg_line_length
float64 1.13
941k
| max_line_length
int64 2
941k
| alphanum_fraction
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
a81b2379c348bbf473cf4eb8724fdc28b37848dd
| 123 |
py
|
Python
|
tests/context.py
|
astercrono/python-datareport
|
4162ecebea27645559161f46b24fd46917827bbd
|
[
"BSD-3-Clause"
] | null | null | null |
tests/context.py
|
astercrono/python-datareport
|
4162ecebea27645559161f46b24fd46917827bbd
|
[
"BSD-3-Clause"
] | null | null | null |
tests/context.py
|
astercrono/python-datareport
|
4162ecebea27645559161f46b24fd46917827bbd
|
[
"BSD-3-Clause"
] | 1 |
2019-02-05T20:40:29.000Z
|
2019-02-05T20:40:29.000Z
|
import os
import sys
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
import datareport
| 20.5 | 82 | 0.739837 |
0e207b8147b3af628969cdd197abf18cbd7316e9
| 3,675 |
py
|
Python
|
scripts3/crypto.py
|
AlexOConnorHub/GitCTF
|
406fd761961019c74b9f9b714eef0b2044a1e5fc
|
[
"Apache-2.0"
] | null | null | null |
scripts3/crypto.py
|
AlexOConnorHub/GitCTF
|
406fd761961019c74b9f9b714eef0b2044a1e5fc
|
[
"Apache-2.0"
] | null | null | null |
scripts3/crypto.py
|
AlexOConnorHub/GitCTF
|
406fd761961019c74b9f9b714eef0b2044a1e5fc
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
###############################################################################
# Git-based CTF
###############################################################################
#
# Author: SeongIl Wi <[email protected]>
# Jaeseung Choi <[email protected]>
# Sang Kil Cha <[email protected]>
#
# Copyright (c) 2018 SoftSec Lab. KAIST
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import os
import json
import subprocess
import re
import shutil
import zipfile
from ctf_utils import random_string, rmdir, rmfile, remove_trailing_slash
from command import run_command
def decrypt_exploit(encrypted_exploit_path, config, team, out_dir=None, \
expected_signer=None):
if out_dir is None:
out_dir = "exploit"
rmdir(out_dir)
tmpzip = f"/tmp/gitctf_{random_string(6)}.zip"
tmpdir = f"/tmp/gitctf_{random_string(6)}"
tmpgpg = f"/tmp/gitctf_{random_string(6)}.gpg"
if expected_signer == None:
decrypt_cmd = f'gpg -o {tmpzip} {encrypted_exploit_path}'
else:
instructor_id = config['teams']['instructor']['pub_key_id']
team_id = config['teams'][team]['pub_key_id']
expected_signer_id = config['individual'][expected_signer]['pub_key_id']
# Make keyring
run_command(f"gpg -o {tmpgpg} --export {expected_signer_id} {instructor_id} \
{team_id}", os.getcwd())
decrypt_cmd = f"gpg --no-default-keyring --keyring {tmpgpg} -o {tmpzip} {encrypted_exploit_path}"
_, err, r = run_command(decrypt_cmd, os.getcwd())
if r != 0:
print(f"[*] Failed to decrypt/verify {encrypted_exploit_path}")
print(err)
return None
run_command(f'unzip {tmpzip} -d {tmpdir}', os.getcwd())
shutil.move(tmpdir, out_dir)
rmfile(tmpzip)
rmfile(tmpgpg)
rmdir(tmpdir)
return out_dir
def encrypt_exploit(exploit_dir, target_team, config, signer=None):
# Remove trailing slash, for user convenience
exploit_dir = remove_trailing_slash(exploit_dir)
out_file = exploit_dir + ".zip.pgp"
# Retrieve information from config
teams = config["teams"]
instructor_pubkey = teams["instructor"]["pub_key_id"]
target_pubkey = teams[target_team]['pub_key_id']
# Zip the directory
tmp_path = f"/tmp/gitctf_{random_string(6)}"
shutil.make_archive(tmp_path, "zip", exploit_dir)
zip_file = tmp_path + ".zip" # make_archive() automatically appends suffix.
# Encrypt the zipped file
encrypt_cmd = f"gpg -o {out_file} "
if signer is not None:
signer_pubkey = config["individual"][signer]['pub_key_id']
encrypt_cmd += f"--default-key {signer_pubkey} --sign "
encrypt_cmd += f"-e -r {target_pubkey} -r {instructor_pubkey} "
encrypt_cmd += f"--armor {zip_file}"
_, err, ret = run_command(encrypt_cmd, None)
rmfile(zip_file) # Clean up zip file.
if ret != 0:
print(f"[*] Failed to sign/encrypt {zip_file}")
print(err)
return None
return out_file
# TODO : maybe we can add main function so this can be used like
# "python crypto.py ENCRYPT ..." or "python crypto.py DECRYPT ..."
| 34.027778 | 105 | 0.651973 |
37b28651b2b161393e5a235b04bbe2bf84d8cacb
| 1,560 |
py
|
Python
|
discord/types/sticker.py
|
alexyy802/GlowCord
|
af92f1a11843157aa5484c1781417456175a8ab3
|
[
"MIT"
] | null | null | null |
discord/types/sticker.py
|
alexyy802/GlowCord
|
af92f1a11843157aa5484c1781417456175a8ab3
|
[
"MIT"
] | null | null | null |
discord/types/sticker.py
|
alexyy802/GlowCord
|
af92f1a11843157aa5484c1781417456175a8ab3
|
[
"MIT"
] | null | null | null |
"""
The MIT License (MIT)
Copyright (c) 2015-present Rapptz
Copyright (c) 2021-present tag-epic
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
--------------
Aliased moodule. See the same file in the glowcord folder for more information
Autogenerated by aliasgen.py
"""
from glowcord.types.sticker import BaseSticker, CreateGuildSticker, EditGuildSticker, GuildSticker, List, ListPremiumStickerPacks, Literal, Snowflake, StandardSticker, Sticker, StickerFormatType, StickerItem, StickerPack, TypedDict, Union, User, _CreateGuildStickerOptional, _GuildStickerOptional, annotations
| 50.322581 | 309 | 0.804487 |
eef38bb24d449850a71010179e9a82eeea12c951
| 929 |
py
|
Python
|
mark_cities.py
|
omarTronto/best-flight-algo
|
4db60f6a95add6765c0c8eb6e44c2fb9fe6c598b
|
[
"Apache-2.0"
] | 1 |
2019-07-11T07:38:19.000Z
|
2019-07-11T07:38:19.000Z
|
mark_cities.py
|
omarTronto/best-flight-algo
|
4db60f6a95add6765c0c8eb6e44c2fb9fe6c598b
|
[
"Apache-2.0"
] | null | null | null |
mark_cities.py
|
omarTronto/best-flight-algo
|
4db60f6a95add6765c0c8eb6e44c2fb9fe6c598b
|
[
"Apache-2.0"
] | 1 |
2019-11-06T22:09:23.000Z
|
2019-11-06T22:09:23.000Z
|
import gmplot
import geocoder
def plot_map_with_marked_cities (cities_list, output_path, center_lat =0, center_long=0, zoom=2):
cities_lats_dict = {}
cities_longs_dict = {}
for each_city in cities_list:
g = geocoder.google(each_city, key="AIzaSyDwDkLHO-xUfosP6CeNGmJwQhPiTK6qyiU")
latlng = g.latlng
cities_lats_dict [each_city] = latlng[0]
cities_longs_dict [each_city] = latlng[1]
lats_tupl = tuple(cities_lats_dict.values())
longs_tupl = tuple(cities_longs_dict.values())
gmap = gmplot.GoogleMapPlotter(center_lat, center_long, zoom)
gmap.scatter(lats_tupl, longs_tupl, color= 'red',size = 100000, marker = False)
gmap.apikey = "AIzaSyDwDkLHO-xUfosP6CeNGmJwQhPiTK6qyiU"
gmap.draw(output_path)
"""list1 = ["Tokyo","New Delhi","Kuala Lumpur","Dubai","Ankara","Beijing","Seoul","Taipei","Singapore","Islamabad"]
plot_map_with_marked_cities(list1, "C:\\Users\\aminq\\Desktop\\maps\\mapx.html")"""
| 35.730769 | 115 | 0.749193 |
abd147431caba36879ff06af0775fac1e687b313
| 541 |
py
|
Python
|
peptide_generation/immunopeptide/util_misc.py
|
yjzhang2013/pancanatlas_code_public
|
cd095a7ac1f9e8124a6849be5aa9a4ccf932c31b
|
[
"MIT"
] | 32 |
2018-08-04T12:34:47.000Z
|
2022-03-21T16:16:49.000Z
|
peptide_generation/immunopeptide/util_misc.py
|
yjzhang2013/pancanatlas_code_public
|
cd095a7ac1f9e8124a6849be5aa9a4ccf932c31b
|
[
"MIT"
] | 6 |
2019-02-19T19:24:24.000Z
|
2022-03-01T06:47:11.000Z
|
peptide_generation/immunopeptide/util_misc.py
|
ratschlab/pancanatlas_code_public
|
c50e29440ab4c0d772b3585d43e4668190ec4ae9
|
[
"MIT"
] | 16 |
2018-08-22T11:06:23.000Z
|
2021-11-01T05:05:30.000Z
|
''' Misc. utility functions'''
# Returns header labels corresponding to donor_id and mutation_mode
def header_labels(donor_id, mutation_mode):
if mutation_mode is None:
mutation_type = "REFERENCE"
elif mutation_mode == "both":
mutation_type = "GERM_SOMATIC"
elif mutation_mode == "somatic_only":
mutation_type = "SOMATIC"
elif mutation_mode == "germline_only":
mutation_type = "GERM"
peptide_type = "REFERENCE" if donor_id == "ref" else donor_id
return (peptide_type, mutation_type)
| 33.8125 | 67 | 0.696858 |
afe31e595bcf6c48ec71a834d4a9a506fd6eb2ec
| 323 |
py
|
Python
|
AlexaSongBot/__init__.py
|
zooloozoo/AlexaSongBot
|
2588ff38cb49cb1feaf0f0a1e06d280999bd46cb
|
[
"MIT"
] | null | null | null |
AlexaSongBot/__init__.py
|
zooloozoo/AlexaSongBot
|
2588ff38cb49cb1feaf0f0a1e06d280999bd46cb
|
[
"MIT"
] | null | null | null |
AlexaSongBot/__init__.py
|
zooloozoo/AlexaSongBot
|
2588ff38cb49cb1feaf0f0a1e06d280999bd46cb
|
[
"MIT"
] | null | null | null |
import logging
from pyrogram import Client
from config import API_HASH, API_ID, BOT_TOKEN
logging.basicConfig(
format="%(asctime)s - %(name)s - %(levelname)s - %(message)s", level=logging.INFO
)
LOGGER = logging.getLogger(__name__)
app = Client("AlexaSongBot", bot_token=BOT_TOKEN, api_hash=API_HASH, api_id=API_ID)
| 26.916667 | 85 | 0.755418 |
d881693b1b2f4a37334d9704e52274f5b5f76dc6
| 4,015 |
py
|
Python
|
bib2xml.py
|
Devsart/bib2xml
|
4e42aee42dc4cfe03b8d5b07e1291d148b288c41
|
[
"MIT"
] | 2 |
2021-12-22T14:07:27.000Z
|
2022-01-17T17:03:33.000Z
|
bib2xml.py
|
Devsart/bib2xml
|
4e42aee42dc4cfe03b8d5b07e1291d148b288c41
|
[
"MIT"
] | null | null | null |
bib2xml.py
|
Devsart/bib2xml
|
4e42aee42dc4cfe03b8d5b07e1291d148b288c41
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
from optparse import OptionParser
from pybtex.database.input import bibtex # https://github.com/chbrown/pybtex
import sys
import xml.etree.cElementTree as ET
import pybtext_conversion_helper
import codecs
parser = OptionParser()
parser.add_option('-a', '--append', dest='inxml', action='store',
help='existing filename (e.g. Sources.xml) to append elements to')
parser.add_option('-d', '--debug', dest='debug', action='store_true',
default=False,
help='debug (useful for broken .bib entries)')
parser.add_option('-i', '--input', dest='bibtexfile', type='string',
help='input bibtex filename', action='store')
parser.add_option('-o', '--output', dest='xmlfile', type='string',
default=sys.stdout,
help='output filename', action='store')
(options, args) = parser.parse_args()
parser = bibtex.Parser()
try:
bibdata = parser.parse_file(options.bibtexfile)
except NameError:
print >> sys.stderr, 'Need an input filename. See --help'
sys.exit(1)
if len(args) > 0:
print >> sys.stderr, 'Warning: extra arguments ignored: ' % ' '.join(args)
try:
ET.register_namespace('', "http://schemas.microsoft.com/office/word/2004/10/bibliography")
ET.register_namespace('b', "http://schemas.microsoft.com/office/word/2004/10/bibliography")
root = ET.parse(options.inxml).getroot()
except TypeError:
root = ET.Element('b:Sources', {'xmlns:b': "http://schemas.microsoft.com/office/word/2004/10/bibliography"""})
for key, entry in bibdata.entries.items():
if options.debug:
print(key)
source = ET.SubElement(root, 'b:Source')
tag = ET.SubElement(source, 'b:Tag')
tag.text = key
b = bibdata.entries[key].fields
srctypes = {'book': 'Book',
'article': 'ArticleInAPeriodical',
'incollection': 'ArticleInAPeriodical',
'inproceedings': 'ConferenceProceedings',
'misc': 'Misc',
'phdthesis': 'Report',
'techreport': 'Report'}
try:
srctype = ET.SubElement(source, 'b:SourceType')
srctype.text = srctypes.get(entry.type)
except KeyError:
source.remove(srctype)
def add_element(source, tagname, keyname):
try:
tag = ET.SubElement(source, tagname)
tag.text = b[keyname]
except KeyError:
source.remove(tag)
return source
# mapping of MSFT tag to Bibtex field names
xlate = (('b:Title', 'title'), ('b:Year', 'year'), ('b:City', 'city'),
('b:Publisher', 'publisher'), ('b:ConferenceName', 'organization'),
('b:URL', 'url'), ('b:BookTitle', 'booktitle'), ('b:ChapterNumber', 'chapter'),
('b:Edition', 'edition'), ('b:Institution', 'institution'), ('b:JournalName', 'journal'),
('b:Month', 'month'), ('b:Volume', 'number'), ('b:Pages', 'pages'),
('b:Type', 'type'), ('b:URL', 'howpublished'))
for msft, bibtex in xlate:
source = add_element(source, msft, bibtex)
authors0 = ET.SubElement(source, 'b:Author')
authors1 = ET.SubElement(authors0, 'b:Author')
namelist = ET.SubElement(authors1, 'b:NameList')
for author in bibdata.entries[key].persons["author"]:
person = ET.SubElement(namelist, 'b:Person')
first = ET.SubElement(person, 'b:First')
try: first.text = author.first_names[0]
except IndexError:
first.text = ''
last = ET.SubElement(person, 'b:Last')
last.text = author.last_names[0]
# hack, unable to get register_namespace to work right when parsing the doc
output = ET.tostring(root)
output2 = pybtext_conversion_helper.convert(output)
##xml_file = ET.fromstring(output2)
##tree = ET.ElementTree(xml_file)
##tree.write("xml_output.xml")
try:
with open(options.xmlfile, 'wb') as f:
f.write(output2.encode('utf-8')[2:-1])
except TypeError:
print(output2)
| 38.605769 | 114 | 0.617933 |
539c36cc02e3951cfac09722ca23f15f4d761ce6
| 2,051 |
py
|
Python
|
{{cookiecutter.project_name}}/docs/conf.py
|
kelockhart/acutter
|
3f7fc9bece1554d1637e147608a4d7eb567aa036
|
[
"MIT"
] | null | null | null |
{{cookiecutter.project_name}}/docs/conf.py
|
kelockhart/acutter
|
3f7fc9bece1554d1637e147608a4d7eb567aa036
|
[
"MIT"
] | null | null | null |
{{cookiecutter.project_name}}/docs/conf.py
|
kelockhart/acutter
|
3f7fc9bece1554d1637e147608a4d7eb567aa036
|
[
"MIT"
] | 2 |
2022-02-09T15:09:52.000Z
|
2022-03-16T20:03:05.000Z
|
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# if I ever want to make it nicer: https://github.com/construct/construct/tree/master/docs
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- Project information -----------------------------------------------------
project = "{{ cookiecutter.project_name }}"
copyright = "2020, {{ cookiecutter.full_name }}"
author = "{{ cookiecutter.full_name }}"
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"myst_parser",
]
# The suffix of source filenames.
source_suffix = [".rst", ".md"]
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = []
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = "haiku"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
| 35.362069 | 90 | 0.664554 |
1f57d938a12d9c1ca631ee1d25286d09af4a38fe
| 394 |
py
|
Python
|
Dataset/Leetcode/train/20/572.py
|
kkcookies99/UAST
|
fff81885aa07901786141a71e5600a08d7cb4868
|
[
"MIT"
] | null | null | null |
Dataset/Leetcode/train/20/572.py
|
kkcookies99/UAST
|
fff81885aa07901786141a71e5600a08d7cb4868
|
[
"MIT"
] | null | null | null |
Dataset/Leetcode/train/20/572.py
|
kkcookies99/UAST
|
fff81885aa07901786141a71e5600a08d7cb4868
|
[
"MIT"
] | null | null | null |
class Solution:
def XXX(self, s: str) -> bool:
d ={"{":"}","(":")","[":"]"}
t = []
for i in s:
if i in d:
t.append(i)
else:
if len(t)!=0 and d[t[-1]] == i:
t.pop()
else:
return False
if len(t) == 0:
return True
return False
| 23.176471 | 47 | 0.30203 |
d00f2bac4f575af360e100733c81d34caa20dc8b
| 5,239 |
py
|
Python
|
mne/commands/mne_coreg.py
|
blakecaldwell/mne-python
|
9b5ae5104ecdeaea13a88273c712382bf131162c
|
[
"BSD-3-Clause"
] | 1 |
2020-07-28T16:09:54.000Z
|
2020-07-28T16:09:54.000Z
|
mne/commands/mne_coreg.py
|
gkmaro634/mne-python
|
5409a89233b764f3f3f3136cf9bf6b8d5fb0a4fe
|
[
"BSD-3-Clause"
] | 1 |
2019-08-16T13:59:53.000Z
|
2019-08-19T16:37:35.000Z
|
mne/commands/mne_coreg.py
|
gkmaro634/mne-python
|
5409a89233b764f3f3f3136cf9bf6b8d5fb0a4fe
|
[
"BSD-3-Clause"
] | 1 |
2019-12-10T02:59:18.000Z
|
2019-12-10T02:59:18.000Z
|
#!/usr/bin/env python
# Authors: Christian Brodbeck <[email protected]>
"""Open the coregistration GUI.
Examples
--------
.. code-block:: console
$ mne coreg
"""
import os.path as op
import sys
import mne
from mne.utils import ETSContext
def run():
"""Run command."""
from mne.commands.utils import get_optparser
parser = get_optparser(__file__)
parser.add_option("-d", "--subjects-dir", dest="subjects_dir",
default=None, help="Subjects directory")
parser.add_option("-s", "--subject", dest="subject", default=None,
help="Subject name")
parser.add_option("-f", "--fiff", dest="inst", default=None,
help="FIFF file with digitizer data for coregistration")
parser.add_option("-t", "--tabbed", dest="tabbed", action="store_true",
default=False, help="Option for small screens: Combine "
"the data source panel and the coregistration panel "
"into a single panel with tabs.")
parser.add_option("--no-guess-mri", dest="guess_mri_subject",
action='store_false', default=None,
help="Prevent the GUI from automatically guessing and "
"changing the MRI subject when a new head shape source "
"file is selected.")
parser.add_option("--head-opacity", type=float, default=None,
dest="head_opacity",
help="The opacity of the head surface, in the range "
"[0, 1].")
parser.add_option("--high-res-head",
action='store_true', default=False, dest="high_res_head",
help="Use a high-resolution head surface.")
parser.add_option("--low-res-head",
action='store_true', default=False, dest="low_res_head",
help="Use a low-resolution head surface.")
parser.add_option('--trans', dest='trans', default=None,
help='Head<->MRI transform FIF file ("-trans.fif")')
parser.add_option('--project-eeg', dest='project_eeg',
action='store_true', default=None,
help="Project EEG electrodes to the head surface ("
"for visualization purposes only)")
parser.add_option('--orient-to-surface',
action='store_true', default=None,
dest='orient_to_surface',
help='Orient points to the surface.')
parser.add_option('--scale-by-distance',
action='store_true', default=None,
dest='scale_by_distance',
help='Scale points by distance from the surface.')
parser.add_option('--mark-inside',
action='store_true', default=None,
dest='mark_inside',
help='Mark points inside the head using a different '
'color.')
parser.add_option('--interaction',
type=str, default=None, dest='interaction',
help='Interaction style to use, can be "trackball" or '
'"terrain".')
parser.add_option('--scale',
type=float, default=None, dest='scale',
help='Scale factor for the scene.')
parser.add_option('--verbose', action='store_true', dest='verbose',
help='Turn on verbose mode.')
parser.add_option('--simple-rendering', action='store_false',
dest='advanced_rendering',
help='Use simplified OpenGL rendering')
options, args = parser.parse_args()
if options.low_res_head:
if options.high_res_head:
raise ValueError("Can't specify --high-res-head and "
"--low-res-head at the same time.")
head_high_res = False
elif options.high_res_head:
head_high_res = True
else:
head_high_res = None
# expanduser allows ~ for --subjects-dir
subjects_dir = options.subjects_dir
if subjects_dir is not None:
subjects_dir = op.expanduser(subjects_dir)
trans = options.trans
if trans is not None:
trans = op.expanduser(trans)
try:
import faulthandler
faulthandler.enable()
except ImportError:
pass # old Python2
with ETSContext():
mne.gui.coregistration(
options.tabbed, inst=options.inst, subject=options.subject,
subjects_dir=subjects_dir,
guess_mri_subject=options.guess_mri_subject,
head_opacity=options.head_opacity, head_high_res=head_high_res,
trans=trans, scrollable=True, project_eeg=options.project_eeg,
orient_to_surface=options.orient_to_surface,
scale_by_distance=options.scale_by_distance,
mark_inside=options.mark_inside, interaction=options.interaction,
scale=options.scale,
advanced_rendering=options.advanced_rendering,
verbose=options.verbose)
if is_main:
sys.exit(0)
is_main = (__name__ == '__main__')
if is_main:
run()
| 40.929688 | 79 | 0.578927 |
1792e2d0830dc734d5adbbbac757591146e3f6b2
| 470 |
py
|
Python
|
infrastructure/user/password.py
|
atlednolispe/crawler_tools
|
6cf9e03986ea72b67b54facb70f3c2c80b9b1f0b
|
[
"MIT"
] | null | null | null |
infrastructure/user/password.py
|
atlednolispe/crawler_tools
|
6cf9e03986ea72b67b54facb70f3c2c80b9b1f0b
|
[
"MIT"
] | null | null | null |
infrastructure/user/password.py
|
atlednolispe/crawler_tools
|
6cf9e03986ea72b67b54facb70f3c2c80b9b1f0b
|
[
"MIT"
] | null | null | null |
import random
def generate_password():
lowers = [chr(i) for i in range(97, 123)]
uppers = [chr(i) for i in range(65, 91)]
numbers = [chr(i) for i in range(48, 58)]
symbols = [i for i in '!#$%^*()']
parts = [lowers, uppers, symbols, numbers]
pw = [''.join(random.choices(part, k=2)) for part in parts]
password = ''.join(pw)
password = ''.join(random.sample(password, k=len(password)))
return {
'password': password,
}
| 26.111111 | 64 | 0.580851 |
ac930a7a471da4c0770e7df1af4b68406406b8d7
| 2,402 |
py
|
Python
|
iglu_client/core.py
|
jhosteny/iglu-python-client
|
b00548f55eb99c146241be4f724f3da988cd243d
|
[
"Apache-2.0"
] | null | null | null |
iglu_client/core.py
|
jhosteny/iglu-python-client
|
b00548f55eb99c146241be4f724f3da988cd243d
|
[
"Apache-2.0"
] | null | null | null |
iglu_client/core.py
|
jhosteny/iglu-python-client
|
b00548f55eb99c146241be4f724f3da988cd243d
|
[
"Apache-2.0"
] | null | null | null |
import re
import jsonschema
# Regular expression to extract metadata from self-describing JSON
URI_REGEX = "^iglu:([a-zA-Z0-9\\-_.]+)/([a-zA-Z0-9\\-_]+)/([a-zA-Z0-9\\-_]+)/([1-9][0-9]*(?:-(?:0|[1-9][0-9]*)){2})$"
# Regular expression to extract all parts of SchemaVer: MODEL, REVISION,
# ADDITION
SCHEMAVER_REGEX = "^([1-9][0-9]*)-(0|[1-9][0-9]*)-(0|[1-9][0-9]*)$"
# Let jsonschema know about the self-describing meta-schema
jsonschema.validators.meta_schemas[
"http://iglucentral.com/schemas/com.snowplowanalytics.self-desc/schema/jsonschema/1-0-0"
] = jsonschema.validators.Draft4Validator
class SchemaVer(object):
def __init__(self, model: int, revision: int, addition: int):
self.model = model
self.revision = revision
self.addition = addition
def as_string(self) -> str:
return "%d-%d-%d" % (self.model, self.revision, self.addition)
@staticmethod
def parse_schemaver(version: str):
m = re.match(SCHEMAVER_REGEX, version)
if not m:
raise IgluError(
"Schema version {version} is not a valid Iglu SchemaVer".format(
version=version
)
)
else:
model, revision, addition = m.groups()
return SchemaVer(int(model), int(revision), int(addition))
class SchemaKey(object):
def __init__(self, vendor: str, name: str, format: str, version: SchemaVer):
self.vendor = vendor
self.name = name
self.format = format
self.version = version
def as_uri(self) -> str:
return "iglu:{path}".format(path=self.as_path())
def as_path(self) -> str:
return "{vendor}/{name}/{format}/{version}".format(
vendor=self.vendor,
name=self.name,
format=self.format,
version=self.version.as_string(),
)
# Construct SchemaKey from URI
@staticmethod
def parse_key(key):
m = re.match(URI_REGEX, key)
if not m:
raise IgluError("Schema key [{key}] is not valid Iglu URI".format(key=key))
else:
vendor, name, format, version = m.groups()
schema_ver = SchemaVer.parse_schemaver(version)
return SchemaKey(vendor, name, format, schema_ver)
# Common Iglu error
class IgluError(Exception):
def __init__(self, message):
self.message = message
| 32.026667 | 117 | 0.606578 |
954f2a7b44de714ed6e74b56fe96480e12b3cb89
| 15,911 |
py
|
Python
|
example/finetune_ner.py
|
kunde122/ERNIE1
|
050327e968b2d7d9090ab882a5dd6b0fdeca80b4
|
[
"Apache-2.0"
] | 1 |
2020-10-19T09:41:11.000Z
|
2020-10-19T09:41:11.000Z
|
example/finetune_ner.py
|
kunde122/ERNIE1
|
050327e968b2d7d9090ab882a5dd6b0fdeca80b4
|
[
"Apache-2.0"
] | null | null | null |
example/finetune_ner.py
|
kunde122/ERNIE1
|
050327e968b2d7d9090ab882a5dd6b0fdeca80b4
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import os
import re
import time
from random import random
from functools import reduce, partial
import numpy as np
import multiprocessing
import logging
import six
import re
import paddle
import paddle.fluid as F
import paddle.fluid.layers as L
from model.ernie import ErnieModel
from optimization import optimization
import utils.data
from propeller import log
log.setLevel(logging.DEBUG)
import propeller.paddle as propeller
class SequenceLabelErnieModel(propeller.train.Model):
"""propeller Model wraper for paddle-ERNIE """
def __init__(self, hparam, mode, run_config):
self.hparam = hparam
self.mode = mode
self.run_config = run_config
self.num_label = len(hparam['label_list'])
def forward(self, features):
src_ids, sent_ids, input_seqlen = features
zero = L.fill_constant([1], dtype='int64', value=0)
input_mask = L.cast(L.equal(src_ids, zero), 'float32') # assume pad id == 0
#input_mask = L.unsqueeze(input_mask, axes=[2])
d_shape = L.shape(src_ids)
seqlen = d_shape[1]
batch_size = d_shape[0]
pos_ids = L.unsqueeze(L.range(0, seqlen, 1, dtype='int32'), axes=[0])
pos_ids = L.expand(pos_ids, [batch_size, 1])
pos_ids = L.unsqueeze(pos_ids, axes=[2])
pos_ids = L.cast(pos_ids, 'int64')
pos_ids.stop_gradient = True
input_mask.stop_gradient = True
task_ids = L.zeros_like(src_ids) + self.hparam.task_id #this shit wont use at the moment
task_ids.stop_gradient = True
model = ErnieModel(
src_ids=src_ids,
position_ids=pos_ids,
sentence_ids=sent_ids,
task_ids=task_ids,
input_mask=input_mask,
config=self.hparam,
use_fp16=self.hparam['use_fp16']
)
enc_out = model.get_sequence_output()
logits = L.fc(
input=enc_out,
size=self.num_label,
num_flatten_dims=2,
param_attr= F.ParamAttr(
name="cls_seq_label_out_w",
initializer= F.initializer.TruncatedNormal(scale=0.02)),
bias_attr=F.ParamAttr(
name="cls_seq_label_out_b",
initializer=F.initializer.Constant(0.)))
propeller.summary.histogram('pred', logits)
return logits, input_seqlen
def loss(self, predictions, labels):
logits, input_seqlen = predictions
logits = L.flatten(logits, axis=2)
labels = L.flatten(labels, axis=2)
ce_loss, probs = L.softmax_with_cross_entropy(
logits=logits, label=labels, return_softmax=True)
loss = L.mean(x=ce_loss)
return loss
def backward(self, loss):
scheduled_lr, _ = optimization(
loss=loss,
warmup_steps=int(self.run_config.max_steps * self.hparam['warmup_proportion']),
num_train_steps=self.run_config.max_steps,
learning_rate=self.hparam['learning_rate'],
train_program=F.default_main_program(),
startup_prog=F.default_startup_program(),
weight_decay=self.hparam['weight_decay'],
scheduler="linear_warmup_decay",)
propeller.summary.scalar('lr', scheduled_lr)
def metrics(self, predictions, label):
pred, seqlen = predictions
pred = L.argmax(pred, axis=-1)
pred = L.unsqueeze(pred, axes=[-1])
f1 = propeller.metrics.ChunkF1(label, pred, seqlen, self.num_label)
return {'f1': f1}
def make_sequence_label_dataset(name, input_files, label_list, tokenizer, batch_size, max_seqlen, is_train):
label_map = {v: i for i, v in enumerate(label_list)}
no_entity_id = label_map['O']
delimiter = b''
def read_bio_data(filename):
ds = propeller.data.Dataset.from_file(filename)
iterable = iter(ds)
def gen():
buf, size = [], 0
iterator = iter(ds)
while 1:
line = next(iterator)
cols = line.rstrip(b'\n').split(b'\t')
tokens = cols[0].split(delimiter)
labels = cols[1].split(delimiter)
if len(cols) != 2:
continue
if len(tokens) != len(labels) or len(tokens) == 0:
continue
yield [tokens, labels]
return propeller.data.Dataset.from_generator_func(gen)
def reseg_token_label(dataset):
def gen():
iterator = iter(dataset)
while True:
tokens, labels = next(iterator)
assert len(tokens) == len(labels)
ret_tokens = []
ret_labels = []
for token, label in zip(tokens, labels):
sub_token = tokenizer(token)
label = label.decode('utf8')
if len(sub_token) == 0:
continue
ret_tokens.extend(sub_token)
ret_labels.append(label)
if len(sub_token) < 2:
continue
sub_label = label
if label.startswith("B-"):
sub_label = "I-" + label[2:]
ret_labels.extend([sub_label] * (len(sub_token) - 1))
assert len(ret_tokens) == len(ret_labels)
yield ret_tokens, ret_labels
ds = propeller.data.Dataset.from_generator_func(gen)
return ds
def convert_to_ids(dataset):
def gen():
iterator = iter(dataset)
while True:
tokens, labels = next(iterator)
if len(tokens) > max_seqlen - 2:
tokens = tokens[: max_seqlen - 2]
labels = labels[: max_seqlen - 2]
tokens = ['[CLS]'] + tokens + ['[SEP]']
token_ids = [vocab[t] for t in tokens]
label_ids = [no_entity_id] + [label_map[x] for x in labels] + [no_entity_id]
token_type_ids = [0] * len(token_ids)
input_seqlen = len(token_ids)
token_ids = np.array(token_ids, dtype=np.int64)
label_ids = np.array(label_ids, dtype=np.int64)
token_type_ids = np.array(token_type_ids, dtype=np.int64)
input_seqlen = np.array(input_seqlen, dtype=np.int64)
yield token_ids, token_type_ids, input_seqlen, label_ids
ds = propeller.data.Dataset.from_generator_func(gen)
return ds
def after(*features):
return utils.data.expand_dims(*features)
dataset = propeller.data.Dataset.from_list(input_files)
if is_train:
dataset = dataset.repeat().shuffle(buffer_size=len(input_files))
dataset = dataset.interleave(map_fn=read_bio_data, cycle_length=len(input_files), block_length=1)
if is_train:
dataset = dataset.shuffle(buffer_size=100)
dataset = reseg_token_label(dataset)
dataset = convert_to_ids(dataset)
dataset = dataset.padded_batch(batch_size).map(after)
dataset.name = name
return dataset
def make_sequence_label_dataset_from_stdin(name, tokenizer, batch_size, max_seqlen):
delimiter = b''
def stdin_gen():
if six.PY3:
source = sys.stdin.buffer
else:
source = sys.stdin
while True:
line = source.readline()
if len(line) == 0:
break
yield line,
def read_bio_data(ds):
iterable = iter(ds)
def gen():
buf, size = [], 0
iterator = iter(ds)
while 1:
line, = next(iterator)
cols = line.rstrip(b'\n').split(b'\t')
tokens = cols[0].split(delimiter)
if len(cols) != 1:
continue
if len(tokens) == 0:
continue
yield tokens,
return propeller.data.Dataset.from_generator_func(gen)
def reseg_token_label(dataset):
def gen():
iterator = iter(dataset)
while True:
tokens, = next(iterator)
ret_tokens = []
for token in tokens:
sub_token = tokenizer(token)
if len(sub_token) == 0:
continue
ret_tokens.extend(sub_token)
if len(sub_token) < 2:
continue
yield ret_tokens,
ds = propeller.data.Dataset.from_generator_func(gen)
return ds
def convert_to_ids(dataset):
def gen():
iterator = iter(dataset)
while True:
tokens, = next(iterator)
if len(tokens) > max_seqlen - 2:
tokens = tokens[: max_seqlen - 2]
tokens = ['[CLS]'] + tokens + ['[SEP]']
token_ids = [vocab[t] for t in tokens]
token_type_ids = [0] * len(token_ids)
input_seqlen = len(token_ids)
token_ids = np.array(token_ids, dtype=np.int64)
token_type_ids = np.array(token_type_ids, dtype=np.int64)
input_seqlen = np.array(input_seqlen, dtype=np.int64)
yield token_ids, token_type_ids, input_seqlen
ds = propeller.data.Dataset.from_generator_func(gen)
return ds
def after(*features):
return utils.data.expand_dims(*features)
dataset = propeller.data.Dataset.from_generator_func(stdin_gen)
dataset = read_bio_data(dataset)
dataset = reseg_token_label(dataset)
dataset = convert_to_ids(dataset)
dataset = dataset.padded_batch(batch_size).map(after)
dataset.name = name
return dataset
if __name__ == '__main__':
parser = propeller.ArgumentParser('NER model with ERNIE')
parser.add_argument('--max_seqlen', type=int, default=128)
parser.add_argument('--data_dir', type=str, required=True)
parser.add_argument('--vocab_file', type=str, required=True)
parser.add_argument('--do_predict', action='store_true')
parser.add_argument('--use_sentence_piece_vocab', action='store_true')
parser.add_argument('--warm_start_from', type=str)
args = parser.parse_args()
run_config = propeller.parse_runconfig(args)
hparams = propeller.parse_hparam(args)
vocab = {j.strip().split('\t')[0]: i for i, j in enumerate(open(args.vocab_file, 'r', encoding='utf8'))}
tokenizer = utils.data.CharTokenizer(vocab, sentencepiece_style_vocab=args.use_sentence_piece_vocab)
sep_id = vocab['[SEP]']
cls_id = vocab['[CLS]']
unk_id = vocab['[UNK]']
pad_id = vocab['[PAD]']
label_list = ['B-PER', 'I-PER', 'B-ORG', 'I-ORG', 'B-LOC', 'I-LOC', 'O']
hparams['label_list'] = label_list
if not args.do_predict:
train_data_dir = os.path.join(args.data_dir, 'train')
train_input_files = [os.path.join(train_data_dir, filename) for filename in os.listdir(train_data_dir)]
dev_data_dir = os.path.join(args.data_dir, 'dev')
dev_input_files = [os.path.join(dev_data_dir, filename) for filename in os.listdir(dev_data_dir)]
test_data_dir = os.path.join(args.data_dir, 'test')
test_input_files = [os.path.join(test_data_dir, filename) for filename in os.listdir(test_data_dir)]
train_ds = make_sequence_label_dataset(name='train',
input_files=train_input_files,
label_list=label_list,
tokenizer=tokenizer,
batch_size=hparams.batch_size,
max_seqlen=args.max_seqlen,
is_train=True)
dev_ds = make_sequence_label_dataset(name='dev',
input_files=dev_input_files,
label_list=label_list,
tokenizer=tokenizer,
batch_size=hparams.batch_size,
max_seqlen=args.max_seqlen,
is_train=False)
test_ds = make_sequence_label_dataset(name='test',
input_files=test_input_files,
label_list=label_list,
tokenizer=tokenizer,
batch_size=hparams.batch_size,
max_seqlen=args.max_seqlen,
is_train=False)
shapes = ([-1, args.max_seqlen, 1], [-1, args.max_seqlen, 1], [-1, 1], [-1, args.max_seqlen, 1])
types = ('int64', 'int64', 'int64', 'int64')
train_ds.data_shapes = shapes
train_ds.data_types = types
dev_ds.data_shapes = shapes
dev_ds.data_types = types
test_ds.data_shapes = shapes
test_ds.data_types = types
varname_to_warmstart = re.compile(r'^encoder.*[wb]_0$|^.*embedding$|^.*bias$|^.*scale$|^pooled_fc.[wb]_0$')
warm_start_dir = args.warm_start_from
ws = propeller.WarmStartSetting(
predicate_fn=lambda v: varname_to_warmstart.match(v.name) and os.path.exists(os.path.join(warm_start_dir, v.name)),
from_dir=warm_start_dir
)
best_exporter = propeller.train.exporter.BestInferenceModelExporter(os.path.join(run_config.model_dir, 'best'), cmp_fn=lambda old, new: new['dev']['f1'] > old['dev']['f1'])
propeller.train.train_and_eval(
model_class_or_model_fn=SequenceLabelErnieModel,
params=hparams,
run_config=run_config,
train_dataset=train_ds,
eval_dataset={'dev': dev_ds, 'test': test_ds},
warm_start_setting=ws,
exporters=[best_exporter])
for k in best_exporter._best['dev'].keys():
if 'loss' in k:
continue
dev_v = best_exporter._best['dev'][k]
test_v = best_exporter._best['test'][k]
print('dev_%s\t%.5f\ntest_%s\t%.5f' % (k, dev_v, k, test_v))
else:
predict_ds = make_sequence_label_dataset_from_stdin(name='pred',
tokenizer=tokenizer,
batch_size=hparams.batch_size,
max_seqlen=args.max_seqlen)
shapes = ([-1, args.max_seqlen, 1], [-1, args.max_seqlen, 1], [-1, 1])
types = ('int64', 'int64', 'int64')
predict_ds.data_shapes = shapes
predict_ds.data_types = types
rev_label_map = {i: v for i, v in enumerate(label_list)}
learner = propeller.Learner(SequenceLabelErnieModel, run_config, hparams)
for pred, _ in learner.predict(predict_ds, ckpt=-1):
pred_str = ' '.join([rev_label_map[idx] for idx in np.argmax(pred, 1).tolist()])
print(pred_str)
| 39.977387 | 180 | 0.56904 |
4df8d4bf0cf19b3f54429dcc4a22aeaef4124811
| 5,146 |
py
|
Python
|
ppml/kms-client/KMS_Client.py
|
Laniakea94/BigDL
|
4d01734086dda893a7f08ba53251dc3c5c8ecfd1
|
[
"Apache-2.0"
] | null | null | null |
ppml/kms-client/KMS_Client.py
|
Laniakea94/BigDL
|
4d01734086dda893a7f08ba53251dc3c5c8ecfd1
|
[
"Apache-2.0"
] | null | null | null |
ppml/kms-client/KMS_Client.py
|
Laniakea94/BigDL
|
4d01734086dda893a7f08ba53251dc3c5c8ecfd1
|
[
"Apache-2.0"
] | null | null | null |
import argparse
import FileOperator, KeyManager
def generate_primary_key(ip, port):
KeyManager.generate_primary_key_ciphertext(ip, port)
def generate_data_key(ip, port, encrypted_primary_key_path):
KeyManager.generate_data_key_ciphertext(ip, port, encrypted_primary_key_path)
def encrypt_file_without_key(data_file_path, ip, port):
KeyManager.generate_primary_key_ciphertext(ip, port)
KeyManager.generate_data_key_ciphertext(ip, port, './encrypted_primary_key')
FileOperator.encrypt_data_file(ip, port, data_file_path, './encrypted_primary_key', './encrypted_data_key')
def encrypt_file_with_key(data_file_path, ip, port, encrypted_primary_key_path, encrypted_data_key_path):
FileOperator.encrypt_data_file(ip, port, data_file_path, encrypted_primary_key_path, encrypted_data_key_path)
def decrypt_file(data_file_path, ip, port, encrypted_primary_key_path, encrypted_data_key_path):
FileOperator.decrypt_data_file(ip, port, data_file_path, encrypted_primary_key_path, encrypted_data_key_path)
def encrypt_directory_with_key(dir_path, ip, port,encrypted_primary_key_path, encrypted_data_key_path, save_dir=None):
FileOperator.encrypt_directory_automation(ip, port, dir_path, encrypted_primary_key_path, encrypted_data_key_path,save_dir)
def encrypt_directory_without_key(dir_path, ip, port, save_dir=None):
KeyManager.generate_primary_key_ciphertext(ip, port)
KeyManager.generate_data_key_ciphertext(ip, port, './encrypted_primary_key')
FileOperator.encrypt_directory_automation(ip, port, dir_path, './encrypted_primary_key', './encrypted_data_key',save_dir)
def get_data_key_plaintext(ip, port, encrypted_primary_key_path, encrypted_data_key_path):
data_key_plaintext = KeyManager.retrieve_data_key_plaintext(ip, port, encrypted_primary_key_path, encrypted_data_key_path)
print(data_key_plaintext)
return data_key_plaintext
def decrypt_csv_columns(ip, port, encrypted_primary_key_path, encrypted_data_key_path, input_dir):
FileOperator.decrypt_csv_columns_automation(ip, port, encrypted_primary_key_path, encrypted_data_key_path, input_dir)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('-api', '--api', type=str, help='name of the API to use', required=True)
parser.add_argument('-ip', '--ip', type=str, help='ip address of the ehsm_kms_server', required=True)
parser.add_argument('-port', '--port', type=str, help='port of the ehsm_kms_server',default='3000', required=False)
parser.add_argument('-pkp', '--pkp', type=str, help='path of the primary key storage file', required=False)
parser.add_argument('-dkp', '--dkp', type=str, help='path of the data key storage file', required=False)
parser.add_argument('-dfp', '--dfp', type=str, help='path of the data file to encrypt', required=False)
parser.add_argument('-dir', '--dir', type=str, help='path of the directory containing column-encrypted CSVs or the directory to be encrypted', required=False)
parser.add_argument('-sdp', '--sdp', type=str, help='path of the save directory output to',required=False)
args = parser.parse_args()
api = args.api
ip = args.ip
port = args.port
if api == 'encrypt_file_without_key':
data_file_path = args.dfp
encrypt_file_without_key(data_file_path, ip, port)
elif api == 'generate_primary_key':
generate_primary_key(ip, port)
elif api == 'generate_data_key':
encrypted_primary_key_path = args.pkp
generate_data_key(ip, port, encrypted_primary_key_path)
elif api == 'encrypt_file_with_key':
data_file_path = args.dfp
encrypted_primary_key_path = args.pkp
encrypted_data_key_path = args.dkp
encrypt_file_with_key(data_file_path, ip, port, encrypted_primary_key_path, encrypted_data_key_path)
elif api == 'decrypt_file':
data_file_path = args.dfp
encrypted_primary_key_path = args.pkp
encrypted_data_key_path = args.dkp
decrypt_file(data_file_path, ip, port, encrypted_primary_key_path, encrypted_data_key_path)
elif api == 'encrypt_directory_without_key':
dir_path = args.dir
save_path = args.sdp
encrypt_directory(dir_path, ip, port, save_path)
elif api == 'encrypt_directory_with_key':
dir_path = args.dir
encrypted_primary_key_path = args.pkp
encrypted_data_key_path = args.dkp
save_path = args.sdp
encrypt_directory_with_key(dir_path, ip, port, encrypted_primary_key_path, encrypted_data_key_path,save_path)
elif api == 'get_data_key_plaintext':
encrypted_primary_key_path = args.pkp
encrypted_data_key_path = args.dkp
get_data_key_plaintext(ip, port, encrypted_primary_key_path, encrypted_data_key_path)
elif api == 'decrypt_csv_columns':
encrypted_primary_key_path = args.pkp
encrypted_data_key_path = args.dkp
input_dir = args.dir
decrypt_csv_columns(ip, port, encrypted_primary_key_path, encrypted_data_key_path, input_dir)
| 51.979798 | 163 | 0.74796 |
6c90b9f80fb4ad61a7c414f65cd177fdc1d527da
| 852 |
py
|
Python
|
qiskit/grover_algo.py
|
Lucaman99/Quantum-Computing
|
6b63911297829d7f4d5286bcf6fe93b2bf2fb231
|
[
"MIT"
] | 5 |
2019-11-12T19:40:23.000Z
|
2021-06-21T07:17:29.000Z
|
qiskit/grover_algo.py
|
Lucaman99/Quantum-Computing
|
6b63911297829d7f4d5286bcf6fe93b2bf2fb231
|
[
"MIT"
] | null | null | null |
qiskit/grover_algo.py
|
Lucaman99/Quantum-Computing
|
6b63911297829d7f4d5286bcf6fe93b2bf2fb231
|
[
"MIT"
] | 3 |
2021-04-11T06:22:39.000Z
|
2022-01-02T12:39:02.000Z
|
from qiskit import QuantumCircuit, ClassicalRegister, QuantumRegister, execute, Aer
#Grover's Algo to find 2 from 4 possible states
n = 4
q = QuantumRegister(n)
c = ClassicalRegister(n)
circuit = QuantumCircuit(q, c)
#Initialize gates
circuit.x(q[2])
circuit.h(q[0])
circuit.h(q[1])
circuit.h(q[2])
#Apply the oracle
circuit.ccx(q[0], q[1], q[2])
#Apply the Hadamard gates
circuit.h(q[0])
circuit.h(q[1])
#Apply the phase shift
#circuit.cx(q[0], q[2])
#circuit.cx(q[1], q[2])
circuit.x(q[0])
circuit.x(q[1])
circuit.ccx(q[0], q[1], q[2])
circuit.x(q[0])
circuit.x(q[1])
#Apply the second round of Hadamard gates
circuit.h(q[0])
circuit.h(q[1])
circuit.h(q[2])
circuit.measure(q, c)
print(circuit)
backend = Aer.get_backend('qasm_simulator')
job_sim = execute(circuit, backend)
sim_result = job_sim.result()
print(sim_result.get_counts(circuit))
| 20.285714 | 83 | 0.711268 |
a1f078db138b6653ab2db8c13d6629278dec1ecf
| 37,976 |
py
|
Python
|
models/networks.py
|
zhuxiaofan117/EECS598-Deep-Learning-Final-Project
|
f0fd47f581d68cf0e7992f3054b81011271b3822
|
[
"BSD-3-Clause"
] | null | null | null |
models/networks.py
|
zhuxiaofan117/EECS598-Deep-Learning-Final-Project
|
f0fd47f581d68cf0e7992f3054b81011271b3822
|
[
"BSD-3-Clause"
] | null | null | null |
models/networks.py
|
zhuxiaofan117/EECS598-Deep-Learning-Final-Project
|
f0fd47f581d68cf0e7992f3054b81011271b3822
|
[
"BSD-3-Clause"
] | null | null | null |
import torch
import torch.nn as nn
from torch.nn import init
import functools
from torch.optim import lr_scheduler
###############################################################################
# Helper Functions
###############################################################################
def get_norm_layer(norm_type='instance'):
"""Return a normalization layer
Parameters:
norm_type (str) -- the name of the normalization layer: batch | instance | none
For BatchNorm, we use learnable affine parameters and track running statistics (mean/stddev).
For InstanceNorm, we do not use learnable affine parameters. We do not track running statistics.
"""
if norm_type == 'batch':
norm_layer = functools.partial(nn.BatchNorm2d, affine=True, track_running_stats=True)
elif norm_type == 'instance':
norm_layer = functools.partial(nn.InstanceNorm2d, affine=False, track_running_stats=False)
elif norm_type == 'none':
norm_layer = None
else:
raise NotImplementedError('normalization layer [%s] is not found' % norm_type)
return norm_layer
def get_scheduler(optimizer, opt):
"""Return a learning rate scheduler
Parameters:
optimizer -- the optimizer of the network
opt (option class) -- stores all the experiment flags; needs to be a subclass of BaseOptions.
opt.lr_policy is the name of learning rate policy: linear | step | plateau | cosine
For 'linear', we keep the same learning rate for the first <opt.niter> epochs
and linearly decay the rate to zero over the next <opt.niter_decay> epochs.
For other schedulers (step, plateau, and cosine), we use the default PyTorch schedulers.
See https://pytorch.org/docs/stable/optim.html for more details.
"""
if opt.lr_policy == 'linear':
def lambda_rule(epoch):
lr_l = 1.0 - max(0, epoch + opt.epoch_count - opt.niter) / float(opt.niter_decay + 1)
return lr_l
scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lambda_rule)
elif opt.lr_policy == 'step':
scheduler = lr_scheduler.StepLR(optimizer, step_size=opt.lr_decay_iters, gamma=0.1)
elif opt.lr_policy == 'plateau':
scheduler = lr_scheduler.ReduceLROnPlateau(optimizer, mode='min', factor=0.2, threshold=0.01, patience=5)
elif opt.lr_policy == 'cosine':
scheduler = lr_scheduler.CosineAnnealingLR(optimizer, T_max=opt.niter, eta_min=0)
else:
return NotImplementedError('learning rate policy [%s] is not implemented', opt.lr_policy)
return scheduler
def init_weights(net, init_type='normal', init_gain=0.02):
"""Initialize network weights.
Parameters:
net (network) -- network to be initialized
init_type (str) -- the name of an initialization method: normal | xavier | kaiming | orthogonal
(float) -- scaling factor for normal, xavier and orthogonal.
We use 'normal' in the original pix2pix and CycleGAN paper. But xavier and kaiming might
work better for some applications. Feel free to try yourself.
"""
def init_func(m): # define the initialization function
classname = m.__class__.__name__
if hasattr(m, 'weight') and (classname.find('Conv') != -1 or classname.find('Linear') != -1):
if init_type == 'normal':
init.normal_(m.weight.data, 0.0, init_gain)
elif init_type == 'xavier':
init.xavier_normal_(m.weight.data, gain=init_gain)
elif init_type == 'kaiming':
init.kaiming_normal_(m.weight.data, a=0, mode='fan_in')
elif init_type == 'orthogonal':
init.orthogonal_(m.weight.data, gain=init_gain)
else:
raise NotImplementedError('initialization method [%s] is not implemented' % init_type)
if hasattr(m, 'bias') and m.bias is not None:
init.constant_(m.bias.data, 0.0)
elif classname.find('BatchNorm2d') != -1: # BatchNorm Layer's weight is not a matrix; only normal distribution applies.
init.normal_(m.weight.data, 1.0, init_gain)
init.constant_(m.bias.data, 0.0)
net.apply(init_func) # apply the initialization function <init_func>
def init_net(net, init_type='normal', init_gain=0.02, gpu_ids=[]):
"""Initialize a network: 1. register CPU/GPU device (with multi-GPU support); 2. initialize the network weights
Parameters:
net (network) -- the network to be initialized
init_type (str) -- the name of an initialization method: normal | xavier | kaiming | orthogonal
gain (float) -- scaling factor for normal, xavier and orthogonal.
gpu_ids (int list) -- which GPUs the network runs on: e.g., 0,1,2
Return an initialized network.
"""
if len(gpu_ids) > 0:
assert(torch.cuda.is_available())
net.to(gpu_ids[0])
net = torch.nn.DataParallel(net, gpu_ids) # multi-GPUs
init_weights(net, init_type, init_gain=init_gain)
return net
def define_G(input_nc, output_nc, ngf, netG, norm='batch', use_dropout=False, init_type='normal', init_gain=0.02, gpu_ids=[]):
"""Create a generator
Parameters:
input_nc (int) -- the number of channels in input images
output_nc (int) -- the number of channels in output images
ngf (int) -- the number of filters in the last conv layer
netG (str) -- the architecture's name: resnet_9blocks | resnet_6blocks | unet_256 | unet_128
norm (str) -- the name of normalization layers used in the network: batch | instance | none
use_dropout (bool) -- if use dropout layers.
init_type (str) -- the name of our initialization method.
init_gain (float) -- scaling factor for normal, xavier and orthogonal.
gpu_ids (int list) -- which GPUs the network runs on: e.g., 0,1,2
Returns a generator
Our current implementation provides two types of generators:
U-Net: [unet_128] (for 128x128 input images) and [unet_256] (for 256x256 input images)
The original U-Net paper: https://arxiv.org/abs/1505.04597
Resnet-based generator: [resnet_6blocks] (with 6 Resnet blocks) and [resnet_9blocks] (with 9 Resnet blocks)
Resnet-based generator consists of several Resnet blocks between a few downsampling/upsampling operations.
We adapt Torch code from Justin Johnson's neural style transfer project (https://github.com/jcjohnson/fast-neural-style).
The generator has been initialized by <init_net>. It uses RELU for non-linearity.
"""
net = None
norm_layer = get_norm_layer(norm_type=norm)
if netG == 'resnet_9blocks':
net = ResnetGenerator(input_nc, output_nc, ngf, norm_layer=norm_layer, use_dropout=use_dropout, n_blocks=9)
elif netG == 'resnet_6blocks':
net = ResnetGenerator(input_nc, output_nc, ngf, norm_layer=norm_layer, use_dropout=use_dropout, n_blocks=6)
elif netG == 'unet_128':
net = UnetGenerator(input_nc, output_nc, 7, ngf, norm_layer=norm_layer, use_dropout=use_dropout)
elif netG == 'unet_256':
net = UnetGenerator(input_nc, output_nc, 8, ngf, norm_layer=norm_layer, use_dropout=use_dropout)
elif netG == 'unet_Ske2Ink':
net = Ske2Ink_generator(input_nc, output_nc)
elif netG == 'unet_Ske2Ink_random':
net = Ske2InkRandom_generator(input_nc, output_nc)
else:
raise NotImplementedError('Generator model name [%s] is not recognized' % netG)
return init_net(net, init_type, init_gain, gpu_ids)
def define_D(input_nc, ndf, netD, n_layers_D=3, norm='batch', init_type='normal', init_gain=0.02, gpu_ids=[]):
"""Create a discriminator
Parameters:
input_nc (int) -- the number of channels in input images
ndf (int) -- the number of filters in the first conv layer
netD (str) -- the architecture's name: basic | n_layers | pixel
n_layers_D (int) -- the number of conv layers in the discriminator; effective when netD=='n_layers'
norm (str) -- the type of normalization layers used in the network.
init_type (str) -- the name of the initialization method.
init_gain (float) -- scaling factor for normal, xavier and orthogonal.
gpu_ids (int list) -- which GPUs the network runs on: e.g., 0,1,2
Returns a discriminator
Our current implementation provides three types of discriminators:
[basic]: 'PatchGAN' classifier described in the original pix2pix paper.
It can classify whether 70×70 overlapping patches are real or fake.
Such a patch-level discriminator architecture has fewer parameters
than a full-image discriminator and can work on arbitrarily-sized images
in a fully convolutional fashion.
[n_layers]: With this mode, you cna specify the number of conv layers in the discriminator
with the parameter <n_layers_D> (default=3 as used in [basic] (PatchGAN).)
[pixel]: 1x1 PixelGAN discriminator can classify whether a pixel is real or not.
It encourages greater color diversity but has no effect on spatial statistics.
The discriminator has been initialized by <init_net>. It uses Leakly RELU for non-linearity.
"""
net = None
norm_layer = get_norm_layer(norm_type=norm)
if netD == 'basic': # default PatchGAN classifier
net = NLayerDiscriminator(input_nc, ndf, n_layers=3, norm_layer=norm_layer)
elif netD == 'n_layers': # more options
net = NLayerDiscriminator(input_nc, ndf, n_layers_D, norm_layer=norm_layer)
elif netD == 'pixel': # classify if each pixel is real or fake
net = PixelDiscriminator(input_nc, ndf, norm_layer=norm_layer)
elif netD == 'new_basic':
net = NLayerDiscriminator_new(input_nc, ndf, n_layers=3, norm_layer=norm_layer)
else:
raise NotImplementedError('Discriminator model name [%s] is not recognized' % net)
return init_net(net, init_type, init_gain, gpu_ids)
def define_E(input_nc, ndf, init_type='normal', init_gain=0.02, gpu_ids=[]):
netE = Encoder(input_nc, ndf)
return init_net(netE, init_type, init_gain, gpu_ids)
##############################################################################
# Classes
##############################################################################
class GANLoss(nn.Module):
"""Define different GAN objectives.
The GANLoss class abstracts away the need to create the target label tensor
that has the same size as the input.
"""
def __init__(self, gan_mode, target_real_label=1.0, target_fake_label=0.0):
""" Initialize the GANLoss class.
Parameters:
gan_mode (str) - - the type of GAN objective. It currently supports vanilla, lsgan, and wgangp.
target_real_label (bool) - - label for a real image
target_fake_label (bool) - - label of a fake image
Note: Do not use sigmoid as the last layer of Discriminator.
LSGAN needs no sigmoid. vanilla GANs will handle it with BCEWithLogitsLoss.
"""
super(GANLoss, self).__init__()
self.register_buffer('real_label', torch.tensor(target_real_label))
self.register_buffer('fake_label', torch.tensor(target_fake_label))
self.gan_mode = gan_mode
if gan_mode == 'lsgan':
self.loss = nn.MSELoss()
elif gan_mode == 'vanilla':
self.loss = nn.BCEWithLogitsLoss()
elif gan_mode in ['wgangp']:
self.loss = None
else:
raise NotImplementedError('gan mode %s not implemented' % gan_mode)
def get_target_tensor(self, prediction, target_is_real):
"""Create label tensors with the same size as the input.
Parameters:
prediction (tensor) - - tpyically the prediction from a discriminator
target_is_real (bool) - - if the ground truth label is for real images or fake images
Returns:
A label tensor filled with ground truth label, and with the size of the input
"""
if target_is_real:
target_tensor = self.real_label
else:
target_tensor = self.fake_label
return target_tensor.expand_as(prediction)
def __call__(self, prediction, target_is_real):
"""Calculate loss given Discriminator's output and grount truth labels.
Parameters:
prediction (tensor) - - tpyically the prediction output from a discriminator
target_is_real (bool) - - if the ground truth label is for real images or fake images
Returns:
the calculated loss.
"""
if self.gan_mode in ['lsgan', 'vanilla']:
target_tensor = self.get_target_tensor(prediction, target_is_real)
loss = self.loss(prediction, target_tensor)
elif self.gan_mode == 'wgangp':
if target_is_real:
loss = -prediction.mean()
else:
loss = prediction.mean()
return loss
def cal_gradient_penalty(netD, real_data, fake_data, device, type='mixed', constant=1.0, lambda_gp=10.0):
"""Calculate the gradient penalty loss, used in WGAN-GP paper https://arxiv.org/abs/1704.00028
Arguments:
netD (network) -- discriminator network
real_data (tensor array) -- real images
fake_data (tensor array) -- generated images from the generator
device (str) -- GPU / CPU: from torch.device('cuda:{}'.format(self.gpu_ids[0])) if self.gpu_ids else torch.device('cpu')
type (str) -- if we mix real and fake data or not [real | fake | mixed].
constant (float) -- the constant used in formula ( | |gradient||_2 - constant)^2
lambda_gp (float) -- weight for this loss
Returns the gradient penalty loss
"""
if lambda_gp > 0.0:
if type == 'real': # either use real images, fake images, or a linear interpolation of two.
interpolatesv = real_data
elif type == 'fake':
interpolatesv = fake_data
elif type == 'mixed':
alpha = torch.rand(real_data.shape[0], 1)
alpha = alpha.expand(real_data.shape[0], real_data.nelement() // real_data.shape[0]).contiguous().view(*real_data.shape)
alpha = alpha.to(device)
interpolatesv = alpha * real_data + ((1 - alpha) * fake_data)
else:
raise NotImplementedError('{} not implemented'.format(type))
interpolatesv.requires_grad_(True)
disc_interpolates = netD(interpolatesv)
gradients = torch.autograd.grad(outputs=disc_interpolates, inputs=interpolatesv,
grad_outputs=torch.ones(disc_interpolates.size()).to(device),
create_graph=True, retain_graph=True, only_inputs=True)
gradients = gradients[0].view(real_data.size(0), -1) # flat the data
gradient_penalty = (((gradients + 1e-16).norm(2, dim=1) - constant) ** 2).mean() * lambda_gp # added eps
return gradient_penalty, gradients
else:
return 0.0, None
class ResnetGenerator(nn.Module):
"""Resnet-based generator that consists of Resnet blocks between a few downsampling/upsampling operations.
We adapt Torch code and idea from Justin Johnson's neural style transfer project(https://github.com/jcjohnson/fast-neural-style)
"""
def __init__(self, input_nc, output_nc, ngf=64, norm_layer=nn.BatchNorm2d, use_dropout=False, n_blocks=6, padding_type='reflect'):
"""Construct a Resnet-based generator
Parameters:
input_nc (int) -- the number of channels in input images
output_nc (int) -- the number of channels in output images
ngf (int) -- the number of filters in the last conv layer
norm_layer -- normalization layer
use_dropout (bool) -- if use dropout layers
n_blocks (int) -- the number of ResNet blocks
padding_type (str) -- the name of padding layer in conv layers: reflect | replicate | zero
"""
assert(n_blocks >= 0)
super(ResnetGenerator, self).__init__()
if type(norm_layer) == functools.partial:
use_bias = norm_layer.func == nn.InstanceNorm2d
else:
use_bias = norm_layer == nn.InstanceNorm2d
model = [nn.ReflectionPad2d(3),
nn.Conv2d(input_nc, ngf, kernel_size=7, padding=0, bias=use_bias),
norm_layer(ngf),
nn.ReLU(True)]
n_downsampling = 2
for i in range(n_downsampling): # add downsampling layers
mult = 2 ** i
model += [nn.Conv2d(ngf * mult, ngf * mult * 2, kernel_size=3, stride=2, padding=1, bias=use_bias),
norm_layer(ngf * mult * 2),
nn.ReLU(True)]
mult = 2 ** n_downsampling
for i in range(n_blocks): # add ResNet blocks
model += [ResnetBlock(ngf * mult, padding_type=padding_type, norm_layer=norm_layer, use_dropout=use_dropout, use_bias=use_bias)]
for i in range(n_downsampling): # add upsampling layers
mult = 2 ** (n_downsampling - i)
model += [nn.ConvTranspose2d(ngf * mult, int(ngf * mult / 2),
kernel_size=3, stride=2,
padding=1, output_padding=1,
bias=use_bias),
norm_layer(int(ngf * mult / 2)),
nn.ReLU(True)]
model += [nn.ReflectionPad2d(3)]
model += [nn.Conv2d(ngf, output_nc, kernel_size=7, padding=0)]
model += [nn.Tanh()]
self.model = nn.Sequential(*model)
def forward(self, input):
"""Standard forward"""
return self.model(input)
class ResnetBlock(nn.Module):
"""Define a Resnet block"""
def __init__(self, dim, padding_type, norm_layer, use_dropout, use_bias):
"""Initialize the Resnet block
A resnet block is a conv block with skip connections
We construct a conv block with build_conv_block function,
and implement skip connections in <forward> function.
Original Resnet paper: https://arxiv.org/pdf/1512.03385.pdf
"""
super(ResnetBlock, self).__init__()
self.conv_block = self.build_conv_block(dim, padding_type, norm_layer, use_dropout, use_bias)
def build_conv_block(self, dim, padding_type, norm_layer, use_dropout, use_bias):
"""Construct a convolutional block.
Parameters:
dim (int) -- the number of channels in the conv layer.
padding_type (str) -- the name of padding layer: reflect | replicate | zero
norm_layer -- normalization layer
use_dropout (bool) -- if use dropout layers.
use_bias (bool) -- if the conv layer uses bias or not
Returns a conv block (with a conv layer, a normalization layer, and a non-linearity layer (ReLU))
"""
conv_block = []
p = 0
if padding_type == 'reflect':
conv_block += [nn.ReflectionPad2d(1)]
elif padding_type == 'replicate':
conv_block += [nn.ReplicationPad2d(1)]
elif padding_type == 'zero':
p = 1
else:
raise NotImplementedError('padding [%s] is not implemented' % padding_type)
conv_block += [nn.Conv2d(dim, dim, kernel_size=3, padding=p, bias=use_bias), norm_layer(dim), nn.ReLU(True)]
if use_dropout:
conv_block += [nn.Dropout(0.5)]
p = 0
if padding_type == 'reflect':
conv_block += [nn.ReflectionPad2d(1)]
elif padding_type == 'replicate':
conv_block += [nn.ReplicationPad2d(1)]
elif padding_type == 'zero':
p = 1
else:
raise NotImplementedError('padding [%s] is not implemented' % padding_type)
conv_block += [nn.Conv2d(dim, dim, kernel_size=3, padding=p, bias=use_bias), norm_layer(dim)]
return nn.Sequential(*conv_block)
def forward(self, x):
"""Forward function (with skip connections)"""
out = x + self.conv_block(x) # add skip connections
return out
class UnetGenerator(nn.Module):
"""Create a Unet-based generator"""
def __init__(self, input_nc, output_nc, num_downs, ngf=64, norm_layer=nn.BatchNorm2d, use_dropout=False):
"""Construct a Unet generator
Parameters:
input_nc (int) -- the number of channels in input images
output_nc (int) -- the number of channels in output images
num_downs (int) -- the number of downsamplings in UNet. For example, # if |num_downs| == 7,
image of size 128x128 will become of size 1x1 # at the bottleneck
ngf (int) -- the number of filters in the last conv layer
norm_layer -- normalization layer
We construct the U-Net from the innermost layer to the outermost layer.
It is a recursive process.
"""
super(UnetGenerator, self).__init__()
# construct unet structure
unet_block = UnetSkipConnectionBlock(ngf * 8, ngf * 8, input_nc=None, submodule=None, norm_layer=norm_layer, innermost=True) # add the innermost layer
for i in range(num_downs - 5): # add intermediate layers with ngf * 8 filters
unet_block = UnetSkipConnectionBlock(ngf * 8, ngf * 8, input_nc=None, submodule=unet_block, norm_layer=norm_layer, use_dropout=use_dropout)
# gradually reduce the number of filters from ngf * 8 to ngf
unet_block = UnetSkipConnectionBlock(ngf * 4, ngf * 8, input_nc=None, submodule=unet_block, norm_layer=norm_layer)
unet_block = UnetSkipConnectionBlock(ngf * 2, ngf * 4, input_nc=None, submodule=unet_block, norm_layer=norm_layer)
unet_block = UnetSkipConnectionBlock(ngf, ngf * 2, input_nc=None, submodule=unet_block, norm_layer=norm_layer)
self.model = UnetSkipConnectionBlock(output_nc, ngf, input_nc=input_nc, submodule=unet_block, outermost=True, norm_layer=norm_layer) # add the outermost layer
def forward(self, input):
"""Standard forward"""
return self.model(input)
class UnetSkipConnectionBlock(nn.Module):
"""Defines the Unet submodule with skip connection.
X -------------------identity----------------------
|-- downsampling -- |submodule| -- upsampling --|
"""
def __init__(self, outer_nc, inner_nc, input_nc=None,
submodule=None, outermost=False, innermost=False, norm_layer=nn.BatchNorm2d, use_dropout=False):
"""Construct a Unet submodule with skip connections.
Parameters:
outer_nc (int) -- the number of filters in the outer conv layer
inner_nc (int) -- the number of filters in the inner conv layer
input_nc (int) -- the number of channels in input images/features
submodule (UnetSkipConnectionBlock) -- previously defined submodules
outermost (bool) -- if this module is the outermost module
innermost (bool) -- if this module is the innermost module
norm_layer -- normalization layer
user_dropout (bool) -- if use dropout layers.
"""
super(UnetSkipConnectionBlock, self).__init__()
self.outermost = outermost
if type(norm_layer) == functools.partial:
use_bias = norm_layer.func == nn.InstanceNorm2d
else:
use_bias = norm_layer == nn.InstanceNorm2d
if input_nc is None:
input_nc = outer_nc
downconv = nn.Conv2d(input_nc, inner_nc, kernel_size=4,
stride=2, padding=1, bias=use_bias)
downrelu = nn.LeakyReLU(0.2, True)
downnorm = norm_layer(inner_nc)
uprelu = nn.ReLU(True)
upnorm = norm_layer(outer_nc)
if outermost:
upconv = nn.ConvTranspose2d(inner_nc * 2, outer_nc,
kernel_size=4, stride=2,
padding=1)
down = [downconv]
up = [uprelu, upconv, nn.Tanh()]
model = down + [submodule] + up
elif innermost:
upconv = nn.ConvTranspose2d(inner_nc, outer_nc,
kernel_size=4, stride=2,
padding=1, bias=use_bias)
down = [downrelu, downconv]
up = [uprelu, upconv, upnorm]
model = down + up
else:
upconv = nn.ConvTranspose2d(inner_nc * 2, outer_nc,
kernel_size=4, stride=2,
padding=1, bias=use_bias)
down = [downrelu, downconv, downnorm]
up = [uprelu, upconv, upnorm]
if use_dropout:
model = down + [submodule] + up + [nn.Dropout(0.5)]
else:
model = down + [submodule] + up
self.model = nn.Sequential(*model)
def forward(self, x):
if self.outermost:
return self.model(x)
else: # add skip connections
return torch.cat([x, self.model(x)], 1)
class NLayerDiscriminator_new(nn.Module):
"""Defines a PatchGAN discriminator"""
def __init__(self, input_nc, ndf=64, n_layers=3, norm_layer=nn.BatchNorm2d):
"""Construct a PatchGAN discriminator
Parameters:
input_nc (int) -- the number of channels in input images
ndf (int) -- the number of filters in the last conv layer
n_layers (int) -- the number of conv layers in the discriminator
norm_layer -- normalization layer
"""
super(NLayerDiscriminator_new, self).__init__()
if type(norm_layer) == functools.partial: # no need to use bias as BatchNorm2d has affine parameters
use_bias = norm_layer.func != nn.BatchNorm2d
else:
use_bias = norm_layer != nn.BatchNorm2d
kw = 4
padw = 1
sequence = [nn.Conv2d(input_nc, ndf, kernel_size=kw, stride=2, padding=padw), nn.LeakyReLU(0.2, True)]
nf_mult = 1
nf_mult_prev = 1
for n in range(1, n_layers): # gradually increase the number of filters
nf_mult_prev = nf_mult
nf_mult = min(2 ** n, 8)
sequence += [
nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=kw, stride=2, padding=padw, bias=use_bias),
norm_layer(ndf * nf_mult),
nn.LeakyReLU(0.2, True)
]
nf_mult_prev = nf_mult
nf_mult = min(2 ** n_layers, 8)
sequence += [
nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=kw, stride=1, padding=padw, bias=use_bias),
norm_layer(ndf * nf_mult),
nn.LeakyReLU(0.2, True)
]
sequence += [nn.Conv2d(ndf * nf_mult, 1, kernel_size=kw, stride=1, padding=padw)] # output 1 channel prediction map
self.model = nn.Sequential(*sequence)
self.fc = nn.Linear(900, 1) # hardcoded input size
def forward(self, input):
"""Standard forward."""
output1 = self.model(input)
t = output1.view(output1.size(0),-1)
input_size = t.size(1)
output2 = self.fc(t)
return (output1, output2.view(-1))
class NLayerDiscriminator(nn.Module):
"""Defines a PatchGAN discriminator"""
def __init__(self, input_nc, ndf=64, n_layers=3, norm_layer=nn.BatchNorm2d):
"""Construct a PatchGAN discriminator
Parameters:
input_nc (int) -- the number of channels in input images
ndf (int) -- the number of filters in the last conv layer
n_layers (int) -- the number of conv layers in the discriminator
norm_layer -- normalization layer
"""
super(NLayerDiscriminator, self).__init__()
if type(norm_layer) == functools.partial: # no need to use bias as BatchNorm2d has affine parameters
use_bias = norm_layer.func != nn.BatchNorm2d
else:
use_bias = norm_layer != nn.BatchNorm2d
kw = 4
padw = 1
sequence = [nn.Conv2d(input_nc, ndf, kernel_size=kw, stride=2, padding=padw), nn.LeakyReLU(0.2, True)]
nf_mult = 1
nf_mult_prev = 1
for n in range(1, n_layers): # gradually increase the number of filters
nf_mult_prev = nf_mult
nf_mult = min(2 ** n, 8)
sequence += [
nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=kw, stride=2, padding=padw, bias=use_bias),
norm_layer(ndf * nf_mult),
nn.LeakyReLU(0.2, True)
]
nf_mult_prev = nf_mult
nf_mult = min(2 ** n_layers, 8)
sequence += [
nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=kw, stride=1, padding=padw, bias=use_bias),
norm_layer(ndf * nf_mult),
nn.LeakyReLU(0.2, True)
]
sequence += [nn.Conv2d(ndf * nf_mult, 1, kernel_size=kw, stride=1, padding=padw)] # output 1 channel prediction map
self.model = nn.Sequential(*sequence)
def forward(self, input):
"""Standard forward."""
return self.model(input)
class PixelDiscriminator(nn.Module):
"""Defines a 1x1 PatchGAN discriminator (pixelGAN)"""
def __init__(self, input_nc, ndf=64, norm_layer=nn.BatchNorm2d):
"""Construct a 1x1 PatchGAN discriminator
Parameters:
input_nc (int) -- the number of channels in input images
ndf (int) -- the number of filters in the last conv layer
norm_layer -- normalization layer
"""
super(PixelDiscriminator, self).__init__()
if type(norm_layer) == functools.partial: # no need to use bias as BatchNorm2d has affine parameters
use_bias = norm_layer.func != nn.InstanceNorm2d
else:
use_bias = norm_layer != nn.InstanceNorm2d
self.net = [
nn.Conv2d(input_nc, ndf, kernel_size=1, stride=1, padding=0),
nn.LeakyReLU(0.2, True),
nn.Conv2d(ndf, ndf * 2, kernel_size=1, stride=1, padding=0, bias=use_bias),
norm_layer(ndf * 2),
nn.LeakyReLU(0.2, True),
nn.Conv2d(ndf * 2, 1, kernel_size=1, stride=1, padding=0, bias=use_bias)]
self.net = nn.Sequential(*self.net)
def forward(self, input):
"""Standard forward."""
return self.net(input)
class Ske2Ink_encoder(nn.Module):
def __init__(self, input_nc, ngf=64):
super(Ske2Ink_encoder, self).__init__()
self.generator_dim = ngf
self.layer_result = dict()
self.encoder = nn.Sequential(
nn.Conv2d(input_nc, self.generator_dim, kernel_size = 4, stride = 2, padding = 1),
self.encoder_layer(self.generator_dim, self.generator_dim * 2),
self.encoder_layer(self.generator_dim * 2, self.generator_dim * 4),
self.encoder_layer(self.generator_dim * 4, self.generator_dim * 8),
self.encoder_layer(self.generator_dim * 8, self.generator_dim * 8),
self.encoder_layer(self.generator_dim * 8, self.generator_dim * 8),
self.encoder_layer(self.generator_dim * 8, self.generator_dim * 8),
self.last_layer(self.generator_dim * 8, self.generator_dim * 8),
)
def last_layer(self, input_nc, output_nc):
encoder_layer = nn.Sequential(
nn.ReLU(),
nn.Conv2d(input_nc, output_nc, kernel_size = 4, stride = 2, padding = 1),
)
return encoder_layer
def encoder_layer(self, input_nc, output_nc):
encoder_layer = nn.Sequential(
nn.ReLU(),
nn.Conv2d(input_nc, output_nc, kernel_size = 4, stride = 2, padding = 1),
nn.BatchNorm2d(output_nc)
)
return encoder_layer
def forward(self, image):
enc = image
for i, layer in enumerate(self.encoder):
enc = layer(enc)
self.layer_result["e%d" % (i+1)] = enc
return enc, self.layer_result
class Ske2Ink_decoder(nn.Module):
def __init__(self, output_nc, ngf=64):
super(Ske2Ink_decoder, self).__init__()
s = 256
self.generator_dim = ngf
s2, s4, s8, s16, s32, s64, s128 = int(s / 2), int(s / 4), int(s / 8), int(s / 16), int(s / 32), int(s / 64), int(s / 128)
self.linear_layer = nn.Linear(640, 512) # hardcode a vector with 512+128 to 512
self.decoder = nn.Sequential(
self.decoder_layer(s128, self.generator_dim * 8, self.generator_dim * 8),
self.decoder_layer(s64, self.generator_dim * 16, self.generator_dim * 8),
self.decoder_layer(s32, self.generator_dim * 16, self.generator_dim * 8),
self.decoder_layer(s16, self.generator_dim * 16, self.generator_dim * 8),
self.decoder_layer(s8, self.generator_dim * 16, self.generator_dim * 4),
self.decoder_layer(s4, self.generator_dim * 8, self.generator_dim * 2),
self.decoder_layer(s2, self.generator_dim * 4, self.generator_dim),
self.decoder_layer(s, self.generator_dim * 2, output_nc),
)
def decoder_layer(self, output_width, input_nc, output_nc, dropout=False):
decoder_layer = nn.Sequential(
nn.ReLU(),
nn.ConvTranspose2d(input_nc, output_nc, kernel_size=4, stride=2, padding=1),
nn.BatchNorm2d(output_nc)
)
if dropout:
decoder_layer = nn.Sequential(
decoder_layer,
nn.Dropout2d(0.5)
)
return decoder_layer
def forward(self, encoded_vector, enc_layer_results):
self.linear_layer(encoded_vector)
dec = self.linear_layer(encoded_vector)
dec = dec[:, :, None, None]
i = 7
for layer in self.decoder:
dec = layer(dec)
if i != 0:
dec = torch.cat((dec, enc_layer_results["e%d" % i]),1)
i = i-1
return torch.tanh(dec)
class Ske2Ink_generator(nn.Module):
def __init__(self, input_nc=3, output_nc=3):
super(Ske2Ink_generator, self).__init__()
self.encoder = Ske2Ink_encoder(input_nc, ngf=64)
self.decoder = Ske2Ink_decoder(output_nc, ngf=64)
self.embedding_layer = init_embedding(2, 128)
self.embedding_layer.weight.require_grad = False
def forward(self, images, embedding_ids):
e8, enc_layers = self.encoder.forward(images)
local_embeddings = self.embedding_layer(embedding_ids)
local_embeddings = local_embeddings[:, :, None, None] # HardCoding 2 new dimensions instead of view
embedded = torch.cat((e8, local_embeddings), 1)
embedded = embedded.view(embedded.size(0),-1)
output = self.decoder.forward(embedded, enc_layers)
return output
def init_embedding(embedding_num, embedding_dim):
weight = torch.randn(embedding_num, embedding_dim)
embeddings = nn.Embedding.from_pretrained(weight)
return embeddings
class Ske2InkRandom_generator(nn.Module):
def __init__(self, input_nc=3, output_nc=3):
super(Ske2InkRandom_generator, self).__init__()
self.encoder = Ske2Ink_encoder(input_nc, ngf=64)
self.decoder = Ske2Ink_decoder(output_nc, ngf=64)
self.embedding_layer = init_embedding(2, 64)
self.embedding_layer.weight.require_grad = False
def forward(self, images, embedding_ids, z):
e8, enc_layers = self.encoder.forward(images)
local_embeddings = self.embedding_layer(embedding_ids)
local_embeddings = local_embeddings[:, :, None, None] # HardCoding 2 new dimensions instead of view
embedded = torch.cat((e8, local_embeddings), 1)
embedded = embedded.view(embedded.size(0), -1)
embedded = torch.cat((embedded, z), 1)
output = self.decoder.forward(embedded, enc_layers)
return output
class Encoder(nn.Module):
def __init__(self, input_nc, ngf=64):
super(Encoder, self).__init__()
self.generator_dim = ngf
self.encoder = nn.Sequential(
nn.Conv2d(input_nc, self.generator_dim, kernel_size = 4, stride = 2, padding = 1),
self.encoder_layer(self.generator_dim, self.generator_dim * 2),
self.encoder_layer(self.generator_dim * 2, self.generator_dim * 4),
self.encoder_layer(self.generator_dim * 4, self.generator_dim * 8),
self.encoder_layer(self.generator_dim * 8, self.generator_dim * 8),
self.encoder_layer(self.generator_dim * 8, self.generator_dim * 8),
self.encoder_layer(self.generator_dim * 8, self.generator_dim * 8),
self.last_layer(self.generator_dim * 8, self.generator_dim * 8),
)
self.linear = nn.Linear(512, 64)
def last_layer(self, input_nc, output_nc):
encoder_layer = nn.Sequential(
nn.ReLU(),
nn.Conv2d(input_nc, output_nc, kernel_size = 4, stride = 2, padding = 1),
)
return encoder_layer
def encoder_layer(self, input_nc, output_nc):
encoder_layer = nn.Sequential(
nn.ReLU(),
nn.Conv2d(input_nc, output_nc, kernel_size = 4, stride = 2, padding = 1),
nn.BatchNorm2d(output_nc)
)
return encoder_layer
def forward(self, image):
output = self.encoder(image)
output = output.view(output.size(0), -1)
mu = self.linear(output)
logvar = self.linear(output)
return mu, logvar
| 45.371565 | 167 | 0.620286 |
6483bb6c073f05396f7375026dd8559266381bd8
| 13,995 |
py
|
Python
|
content/tests/test_views.py
|
uktrade/great-domestic-ui
|
e4c1e4783d7321e170ecb6fd5f9eb6c30cd21f4c
|
[
"MIT"
] | null | null | null |
content/tests/test_views.py
|
uktrade/great-domestic-ui
|
e4c1e4783d7321e170ecb6fd5f9eb6c30cd21f4c
|
[
"MIT"
] | 369 |
2019-02-18T15:53:55.000Z
|
2021-06-09T13:17:37.000Z
|
content/tests/test_views.py
|
uktrade/great-domestic-ui
|
e4c1e4783d7321e170ecb6fd5f9eb6c30cd21f4c
|
[
"MIT"
] | 3 |
2019-03-11T12:04:22.000Z
|
2020-11-12T15:28:13.000Z
|
from unittest import mock
import pytest
from unittest.mock import call, patch
from django.urls import reverse
from core.tests.helpers import create_response
from content.views import MarketsPageView
@pytest.fixture
def mock_get_page():
stub = patch('directory_cms_client.client.cms_api_client.lookup_by_slug', return_value=create_response())
yield stub.start()
stub.stop()
markets_pages = [
(
'TopicLandingPage',
'/markets/'
),
(
'CountryGuidePage',
'/markets/australia/'
)
]
def test_community_article_view(mock_get_page, client):
mock_get_page.return_value = create_response({
"meta": {"slug": "foo"},
"title": "Community article",
"page_type": "ArticlePage",
"tree_based_breadcrumbs": [
{"url": "/advice/", "title": "Topic title"},
{"url": "/advice/create-an-export-plan/", "title": "List title"},
{"url": (
"/advice/create-an-export-plan/how-to-write-an-export-plan/"),
"title": "How to write an export plan"},
]
})
url = reverse('community-article')
response = client.get(url)
assert response.status_code == 200
assert mock_get_page.call_count == 1
assert mock_get_page.call_args == call(
draft_token=None,
language_code='en-gb',
slug='community',
)
@patch('directory_cms_client.client.cms_api_client.lookup_by_slug')
def test_get_country_guide_page_attaches_array_lengths(mock_get_page, client):
page = {
'title': 'test',
'page_type': 'CountryGuidePage',
'tree_based_breadcrumbs': [
{'url': '/markets/', 'title': 'Markets'},
{'url': '/markets/japan/', 'title': 'Japan'},
],
'heading': 'Heading',
'statistics': [
{'number': '1'},
{'number': '2', 'heading': 'heading'},
{'number': None, 'heading': 'no-number-stat'}
],
'accordions': [
{
'title': 'title',
'teaser': 'teaser',
'statistics': [
{'number': '1'},
{'number': '2', 'heading': 'heading'},
{'number': '3', 'heading': 'heading2'},
{'number': None, 'heading': 'no-number-stat'}
],
'subsections': [
{'heading': 'heading'},
{'heading': 'heading-with-teaser', 'teaser': 'teaser'},
{'heading': 'heading-with-teaser-2', 'teaser': 'teaser2'},
{'heading': None, 'teaser': 'teaser-without-heading'}
],
'case_study': {'title': 'title', 'image': 'image'}
}
],
'fact_sheet': {
'columns': [
{'title': 'title'},
{'title': 'title-with-teaser', 'teaser': 'teaser'},
{'title': None, 'teaser': 'teaser-without-title'}
]
}
}
mock_get_page.return_value = create_response(page)
url = reverse(
'country-guide',
kwargs={'slug': 'japan'}
)
response = client.get(url)
view = response.context_data['view']
assert view.num_of_statistics == 2
accordions = response.context_data['page']['accordions']
assert accordions[0]['num_of_statistics'] == 3
assert accordions[0]['num_of_subsections'] == 3
assert response.context_data['page']['fact_sheet']['num_of_columns'] == 2
@patch('directory_cms_client.client.cms_api_client.lookup_by_slug')
def test_get_country_guide_page_viable_accordion(
mock_get_page,
client
):
viable_accordion = {
'statistics': [],
'title': 'title',
'teaser': 'teaser',
'subsections': [
{
'heading': 'heading1'
},
{
'heading': 'heading2'
}
],
'ctas': [
{
'link': 'link1'
},
{
'link': 'link2'
}
],
'case_study': {'title': 'title', 'image': 'image'}
}
page = {
'title': 'test',
'page_type': 'CountryGuidePage',
'tree_based_breadcrumbs': [
{'url': '/markets/', 'title': 'Markets'},
{'url': '/markets/japan/', 'title': 'Japan'},
],
'heading': 'Heading',
'statistics': [],
'accordions': [viable_accordion],
'fact_sheet': {
'columns': []
}
}
mock_get_page.return_value = create_response(page)
url = reverse(
'country-guide',
kwargs={'slug': 'japan'}
)
response = client.get(url)
accordions = response.context_data['page']['accordions']
assert bool(accordions[0]['is_viable']) is True
non_viable_accordions = [
{
'statistics': [],
'title': '',
'teaser': 'teaser',
'subsections': [
{
'heading': 'heading1'
},
{
'heading': 'heading2'
}
],
'case_study': {'title': 'title', 'image': 'image'}
},
{
'statistics': [],
'title': 'title',
'teaser': '',
'subsections': [
{
'heading': 'heading1'
},
{
'heading': 'heading2'
}
],
'case_study': {'title': 'title', 'image': 'image'}
},
{
'statistics': [],
'title': 'title',
'teaser': 'teaser',
'subsections': [
{
'heading': 'heading1'
}
],
'case_study': {'title': 'title', 'image': 'image'}
},
]
@pytest.mark.parametrize('non_viable_accordion', non_viable_accordions)
@patch('directory_cms_client.client.cms_api_client.lookup_by_slug')
def test_get_country_guide_page_non_viable_accordion(
mock_get_page, non_viable_accordion, client
):
page = {
'title': 'test',
'page_type': 'CountryGuidePage',
'tree_based_breadcrumbs': [
{'url': '/markets/', 'title': 'Markets'},
{'url': '/markets/japan/', 'title': 'Japan'},
],
'heading': 'Heading',
'statistics': [],
'accordions': [non_viable_accordion],
'fact_sheet': {
'columns': []
}
}
mock_get_page.return_value = create_response(page)
url = reverse(
'country-guide',
kwargs={'slug': 'japan'}
)
response = client.get(url)
accordions = response.context_data['page']['accordions']
assert bool(accordions[0]['is_viable']) is False
@patch('directory_cms_client.client.cms_api_client.lookup_by_slug')
def test_get_country_guide_page_viable_case_study(mock_get_page, client):
page = {
'title': 'test',
'page_type': 'CountryGuidePage',
'tree_based_breadcrumbs': [
{'url': '/markets/', 'title': 'Markets'},
{'url': '/markets/japan/', 'title': 'Japan'},
],
'heading': 'Heading',
'statistics': [],
'accordions': [{
'case_study': {
'title': 'Case study title',
'image': 'Case study image'
},
'statistics': [],
'title': 'title',
'teaser': 'teaser',
'subsections': [],
'ctas': []
}],
'fact_sheet': {
'columns': []
}
}
mock_get_page.return_value = create_response(page)
url = reverse(
'country-guide',
kwargs={'slug': 'japan'}
)
response = client.get(url)
case_study = response.context_data['page']['accordions'][0]['case_study']
assert bool(case_study['is_viable']) is True
@patch('directory_cms_client.client.cms_api_client.lookup_by_slug')
def test_get_country_guide_page_neither_case_study_nor_statistics(
mock_get_page,
client
):
page = {
'title': 'test',
'page_type': 'CountryGuidePage',
'tree_based_breadcrumbs': [
{'url': '/markets/', 'title': 'Markets'},
{'url': '/markets/japan/', 'title': 'Japan'},
],
'heading': 'Heading',
'statistics': [],
'accordions': [{
'case_study': {
'title': '',
'image': 'Case study image'
},
'statistics': [],
'title': 'title',
'teaser': 'teaser',
'subsections': [],
'ctas': []
}],
'fact_sheet': {
'columns': []
}
}
mock_get_page.return_value = create_response(page)
url = reverse(
'country-guide',
kwargs={'slug': 'japan'}
)
response = client.get(url)
accordion = response.context_data['page']['accordions'][0]
assert bool(accordion['neither_case_study_nor_statistics']) is True
@patch('directory_cms_client.client.cms_api_client.lookup_by_slug')
@patch('directory_cms_client.client.cms_api_client.lookup_country_guides')
@patch('directory_cms_client.client.cms_api_client.list_industry_tags', mock.MagicMock())
@patch('directory_cms_client.client.cms_api_client.list_regions', mock.MagicMock())
def test_markets_page_filters(mock_countries, mock_page, rf):
page = {
'title': 'test',
'page_type': 'TopicLandingPage',
'tree_based_breadcrumbs': [
{'url': '/', 'title': 'great.gov.uk'},
{'url': '/markets/', 'title': 'Markets'},
],
'child_pages': [
{
'title': 'Brazil',
'tags': [{'name': 'Aerospace'}]
},
{
'title': 'China',
'tags': [{'name': 'Technology'}]
},
{
'title': 'India',
'tags': [{'name': 'Aerospace'}]
},
{
'title': 'Japan',
'tags': [{'name': 'Aerospace'}]
}
]
}
mock_page.return_value = create_response(page)
filtered_countries = [
{
'title': 'Brazil',
'tags': [{'name': 'Aerospace'}]
},
{
'title': 'Japan',
'tags': [{'name': 'Aerospace'}]
}
]
mock_countries.return_value = create_response(filtered_countries)
request = rf.get('/markets/', {'sector': 'Aerospace'})
response = MarketsPageView.as_view()(request, slug='markets')
response_content = str(response.render().content)
assert response.status_code == 200
assert 'Aerospace' in response_content
assert response.context_data['pagination_page'].object_list == filtered_countries
@patch('directory_cms_client.client.cms_api_client.lookup_by_slug')
@patch('directory_cms_client.client.cms_api_client.lookup_country_guides')
def test_markets_page_filters_remove_title_prefix_from_sort(mock_countries, mock_page):
page = {
'title': 'test',
'page_type': 'TopicLandingPage',
'tree_based_breadcrumbs': [
{'url': '/', 'title': 'great.gov.uk'},
{'url': '/markets/', 'title': 'Markets'},
],
'child_pages': [{'title': 'Japan'}, {'title': 'Brazil'}, {'title': 'China'}, {'title': 'The Baltics'}],
}
sorted_child_pages = sorted(page['child_pages'], key=lambda x: x['title'].replace('The ', ''))
mock_page.return_value = create_response(page)
mock_countries.return_value = create_response({})
assert sorted_child_pages[0]['title'] == 'The Baltics'
assert sorted_child_pages[1]['title'] == 'Brazil'
@patch('directory_cms_client.client.cms_api_client.lookup_by_slug')
@patch('directory_cms_client.client.cms_api_client.lookup_country_guides')
@patch('directory_cms_client.client.cms_api_client.list_industry_tags', mock.MagicMock())
@patch('directory_cms_client.client.cms_api_client.list_regions', mock.MagicMock())
def test_markets_page_filters_sort_by_title(mock_countries, mock_page, rf):
page = {
'title': 'test',
'page_type': 'TopicLandingPage',
'tree_based_breadcrumbs': [
{'url': '/', 'title': 'great.gov.uk'},
{'url': '/markets/', 'title': 'Markets'},
],
'child_pages': [{'title': 'India'}, {'title': 'Japan'}, {'title': 'Brazil'}, {'title': 'China'}],
}
sorted_child_pages = sorted(page['child_pages'], key=lambda x: x['title'])
mock_page.return_value = create_response(page)
mock_countries.return_value = create_response(sorted_child_pages)
request = rf.get('/markets/', {'sector': '', 'sortby': 'title'})
response = MarketsPageView.as_view()(request, slug='markets')
assert response.status_code == 200
assert response.context_data['pagination_page'].object_list == sorted_child_pages
@patch('directory_cms_client.client.cms_api_client.lookup_by_slug')
@patch('directory_cms_client.client.cms_api_client.lookup_country_guides')
@patch('directory_cms_client.client.cms_api_client.list_industry_tags', mock.MagicMock())
@patch('directory_cms_client.client.cms_api_client.list_regions', mock.MagicMock())
def test_markets_page_filters_no_results(mock_countries, mock_page, rf):
page = {
'title': 'test',
'page_type': 'TopicLandingPage',
'tree_based_breadcrumbs': [
{'url': '/', 'title': 'great.gov.uk'},
{'url': '/markets/', 'title': 'Markets'},
],
'child_pages': [],
}
mock_page.return_value = create_response(page)
mock_countries.return_value = create_response(page['child_pages'])
request = rf.get('/markets/', {'sector': '', 'sortby': 'title'})
response = MarketsPageView.as_view()(request, slug='markets')
response_content = str(response.render().content)
assert response.status_code == 200
assert 'sort by' not in response_content
assert len(response.context_data['pagination_page'].object_list) == 0
| 30.490196 | 111 | 0.550411 |
e8aac9cfa0d850d20c8399e80ceee70cbf82de6f
| 3,091 |
py
|
Python
|
plantcv/plantcv/morphology/segment_angle.py
|
Howzit123/plantcv
|
b4ff6ad765da36353f40827ce3816b33d1d3596a
|
[
"MIT"
] | 1 |
2021-02-25T16:57:45.000Z
|
2021-02-25T16:57:45.000Z
|
plantcv/plantcv/morphology/segment_angle.py
|
Howzit123/plantcv
|
b4ff6ad765da36353f40827ce3816b33d1d3596a
|
[
"MIT"
] | null | null | null |
plantcv/plantcv/morphology/segment_angle.py
|
Howzit123/plantcv
|
b4ff6ad765da36353f40827ce3816b33d1d3596a
|
[
"MIT"
] | null | null | null |
# Find angles in degrees of skeleton segments
import os
import cv2
import numpy as np
import pandas as pd
from plantcv.plantcv import params
from plantcv.plantcv import outputs
from plantcv.plantcv import plot_image
from plantcv.plantcv import print_image
from plantcv.plantcv import color_palette
def segment_angle(segmented_img, objects):
""" Calculate angle of segments (in degrees) by fitting a linear regression line to segments.
Inputs:
segmented_img = Segmented image to plot slope lines and angles on
objects = List of contours
Returns:
labeled_img = Segmented debugging image with angles labeled
:param segmented_img: numpy.ndarray
:param objects: list
:return labeled_img: numpy.ndarray
"""
label_coord_x = []
label_coord_y = []
segment_angles = []
labeled_img = segmented_img.copy()
# Use a previously saved color scale if available
rand_color = color_palette(num=len(objects), saved=True)
for i, cnt in enumerate(objects):
# Find bounds for regression lines to get drawn
rect = cv2.minAreaRect(cnt)
pts = cv2.boxPoints(rect)
df = pd.DataFrame(pts, columns=('x', 'y'))
x_max = int(df['x'].max())
x_min = int(df['x'].min())
# Find line fit to each segment
[vx, vy, x, y] = cv2.fitLine(objects[i], cv2.DIST_L2, 0, 0.01, 0.01)
slope = -vy / vx
left_list = int(((x - x_min) * slope) + y)
right_list = int(((x - x_max) * slope) + y)
if slope > 1000000 or slope < -1000000:
print("Slope of contour with ID#", i, "is", slope, "and cannot be plotted.")
else:
# Draw slope lines
cv2.line(labeled_img, (x_max - 1, right_list), (x_min, left_list), rand_color[i], 1)
# Store coordinates for labels
label_coord_x.append(objects[i][0][0][0])
label_coord_y.append(objects[i][0][0][1])
# Calculate degrees from slopes
segment_angles.append(np.arctan(slope[0]) * 180 / np.pi)
segment_ids = []
for i, cnt in enumerate(objects):
# Label slope lines
w = label_coord_x[i]
h = label_coord_y[i]
text = "{:.2f}".format(segment_angles[i])
cv2.putText(img=labeled_img, text=text, org=(w, h), fontFace=cv2.FONT_HERSHEY_SIMPLEX,
fontScale=params.text_size, color=(150, 150, 150), thickness=params.text_thickness)
# segment_label = "ID" + str(i)
segment_ids.append(i)
outputs.add_observation(variable='segment_angle', trait='segment angle',
method='plantcv.plantcv.morphology.segment_angle', scale='degrees', datatype=list,
value=segment_angles, label=segment_ids)
# Auto-increment device
params.device += 1
if params.debug == 'print':
print_image(labeled_img, os.path.join(params.debug_outdir, str(params.device) + '_segmented_angles.png'))
elif params.debug == 'plot':
plot_image(labeled_img)
return labeled_img
| 34.344444 | 113 | 0.630864 |
4a8a8e611340b041e0f695ebcee9f634c1d67189
| 373 |
py
|
Python
|
src/week5/week5a_quizz_question8.py
|
hemmerling/python-coursera2012
|
1828dc26c196dbd7d4a20d207e996bd8ce99c525
|
[
"Apache-2.0"
] | null | null | null |
src/week5/week5a_quizz_question8.py
|
hemmerling/python-coursera2012
|
1828dc26c196dbd7d4a20d207e996bd8ce99c525
|
[
"Apache-2.0"
] | null | null | null |
src/week5/week5a_quizz_question8.py
|
hemmerling/python-coursera2012
|
1828dc26c196dbd7d4a20d207e996bd8ce99c525
|
[
"Apache-2.0"
] | null | null | null |
def is_ascending(numbers):
"""Returns whether the given list of numbers is in ascending order."""
for i in range(len(numbers) - 1):
if numbers[i+1] < numbers[i]:
return False
return True
global_numbers = [2, 6, 9, 12, 400]
print is_ascending(global_numbers)
global_numbers = [4, 8, 2, 13 ]
print is_ascending(global_numbers)
| 26.642857 | 75 | 0.643432 |
706cffe80a2533e73684e333a8a33826ea256374
| 4,885 |
py
|
Python
|
Plots/MapProjections/NCL_sat_2.py
|
learn2free/GeoCAT-examples
|
3ac152a767e78a362a8ebb6f677005f3de320ca6
|
[
"Apache-2.0"
] | 1 |
2021-05-09T02:54:10.000Z
|
2021-05-09T02:54:10.000Z
|
Plots/MapProjections/NCL_sat_2.py
|
learn2free/GeoCAT-examples
|
3ac152a767e78a362a8ebb6f677005f3de320ca6
|
[
"Apache-2.0"
] | null | null | null |
Plots/MapProjections/NCL_sat_2.py
|
learn2free/GeoCAT-examples
|
3ac152a767e78a362a8ebb6f677005f3de320ca6
|
[
"Apache-2.0"
] | null | null | null |
"""
NCL_sat_2.py
===============
This script illustrates the following concepts:
- Converting float data into short data
- Drawing filled contours over a satellite map
- Explicitly setting contour fill colors
- Finding local high pressure values
See following URLs to see the reproduced NCL plot & script:
- Original NCL script: https://www.ncl.ucar.edu/Applications/Scripts/sat_2.ncl
- Original NCL plot: https://www.ncl.ucar.edu/Applications/Images/sat_2_lg.png
"""
import cartopy.crs as ccrs
import cartopy.feature as cfeature
import geocat.datafiles as gdf
import geocat.viz.util as gvutil
import matplotlib.pyplot as plt
import matplotlib.ticker as mticker
import numpy as np
###############################################################################
# Import packages:
import xarray as xr
from matplotlib import colors
###############################################################################
# Read in data:
# Open a netCDF data file using xarray default engine and
# load the data into xarrays
ds = xr.open_dataset(gdf.get("netcdf_files/slp.1963.nc"), decode_times=False)
# Get data from the 21st timestep
pressure = ds.slp[21, :, :]
# Translate float values to short values
pressure = pressure.astype('float32')
# Convert Pa to hPa data
pressure = pressure * 0.01
# Fix the artifact of not-shown-data around 0 and 360-degree longitudes
wrap_pressure = gvutil.xr_add_cyclic_longitudes(pressure, "lon")
###############################################################################
# Create plot
# Set figure size
fig = plt.figure(figsize=(8, 8))
# Set global axes with an orthographic projection
proj = ccrs.Orthographic(central_longitude=270, central_latitude=45)
ax = plt.axes(projection=proj)
ax.set_global()
# Add land, coastlines, and ocean features
ax.add_feature(cfeature.LAND, facecolor='lightgray', zorder=1)
ax.add_feature(cfeature.COASTLINE, linewidth=.3, zorder=2)
ax.add_feature(cfeature.OCEAN, facecolor='white')
ax.add_feature(cfeature.BORDERS, linewidth=.3)
ax.add_feature(cfeature.LAKES,
facecolor='white',
edgecolor='black',
linewidth=.3)
# Create color map
colorvalues = [1020, 1036, 1500]
cmap = colors.ListedColormap(['None', 'lightgray', 'dimgrey'])
norm = colors.BoundaryNorm(colorvalues, 2)
# Plot contour data
p = wrap_pressure.plot.contourf(ax=ax,
zorder=2,
transform=ccrs.PlateCarree(),
levels=30,
cmap=cmap,
norm=norm,
add_labels=False,
add_colorbar=False)
p = wrap_pressure.plot.contour(ax=ax,
transform=ccrs.PlateCarree(),
linewidths=0.3,
levels=30,
cmap='black',
add_labels=False)
# low pressure contour levels- these will be plotted
# as a subscript to an 'L' symbol.
lowClevels = gvutil.findLocalExtrema(pressure, lowVal=995, eType='Low')
highClevels = gvutil.findLocalExtrema(pressure, highVal=1042, eType='High')
# Label regular contours with automatic matplotlib labeling
# Specify the levels to label every other contour level
ax.clabel(p,
levels=np.arange(956, 1064, 8),
inline=True,
fontsize=12,
colors='black',
fmt="%.0f")
# Label low and high contours
gvutil.plotELabels(wrap_pressure,
ccrs.Geodetic(),
proj,
clabel_locations=lowClevels,
label='L')
gvutil.plotELabels(wrap_pressure,
ccrs.Geodetic(),
proj,
clabel_locations=highClevels,
label='H')
# Use gvutil function to set title and subtitles
gvutil.set_titles_and_labels(ax,
maintitle=r"$\bf{SLP}$" + " " + r"$\bf{1963,}$" +
" " + r"$\bf{January}$" + " " + r"$\bf{24th}$",
maintitlefontsize=20,
lefttitle="mean Daily Sea Level Pressure",
lefttitlefontsize=16,
righttitle="hPa",
righttitlefontsize=16)
# Set characteristics of text box
props = dict(facecolor='white', edgecolor='black', alpha=0.5)
# Place text box
ax.text(0.40,
-0.1,
'CONTOUR FROM 948 TO 1064 BY 4',
transform=ax.transAxes,
fontsize=16,
bbox=props)
# Add gridlines to axis
gl = ax.gridlines(color='gray', linestyle='--')
gl.xlocator = mticker.FixedLocator(np.arange(-180, 180, 20))
gl.ylocator = mticker.FixedLocator(np.arange(-90, 90, 20))
# Make layout tight
plt.tight_layout()
plt.show()
| 33.923611 | 82 | 0.581781 |
24bc41244045ec9e45ad94208d6544ca46f8d4c9
| 7,815 |
py
|
Python
|
project/eeg_pca.py
|
boredStats/eeg-machine-learning
|
bbf72ef55644f3941120f8f9007d839c1a4731fd
|
[
"MIT"
] | 1 |
2020-07-31T11:38:53.000Z
|
2020-07-31T11:38:53.000Z
|
project/eeg_pca.py
|
boredStats/eeg-machine-learning
|
bbf72ef55644f3941120f8f9007d839c1a4731fd
|
[
"MIT"
] | null | null | null |
project/eeg_pca.py
|
boredStats/eeg-machine-learning
|
bbf72ef55644f3941120f8f9007d839c1a4731fd
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""Running PCAs on connectivity data."""
import os
import numpy as np
import pandas as pd
import matplotlib as mpl
import matplotlib.pyplot as plt
import proj_utils as pu
from sklearn.decomposition import PCA, IncrementalPCA
from sklearn.model_selection import cross_val_score
from sklearn.utils import resample
from sklearn.preprocessing import StandardScaler, RobustScaler
pd.options.display.float_format = '{:.3f}'.format
def pretty_pca_res(p):
# Return pca results in a nicer way
cols = [
'Singular values',
'Explained variance',
'Explained variance ratio',
]
df = pd.DataFrame(columns=cols)
df['Singular values'] = p.singular_values_
df['Explained variance'] = p.explained_variance_
df['Explained variance ratio'] = p.explained_variance_ratio_ * 100
cumulative_variance_ratio = []
i = 0
for comp in df['Explained variance ratio']:
i += comp
cumulative_variance_ratio.append(i)
df['Cumulative variance ratio'] = cumulative_variance_ratio
return df
def find_n_components(data, max_n_components=None, step=1):
# Use cross-validation to find best number of components to use
if max_n_components is None:
if pu.df_or_np(data):
max_n_components = data.values.shape[1]
else:
max_n_components = data.shape[1]
scores = []
pca = PCA()
for n in np.arange(1, max_n_components, step):
pca.n_components = n
scores.append(np.mean(cross_val_score(pca, data, cv=3)))
df = pd.DataFrame(columns=['Cross validation scores'])
df['Cross validation scores'] = scores
print(df.head())
return df
def reconstruct_pca(data):
# Reconstruct data after removing the first component
# https://bit.ly/2rGNlXn
X = data.values
mu = np.mean(X, axis=0)
pca = PCA()
pca.fit(X)
raw_num_comp = pca.n_components_
Xhat = np.dot(
pca.transform(X)[:, 1:raw_num_comp],
pca.components_[1:raw_num_comp, :]
)
Xhat += mu
return Xhat
def plot_scree(pretty_res, percent=True, pvals=None, kaiser=False, fname=None):
# Create a scree plot using pretty_pca_res output
mpl.rcParams.update(mpl.rcParamsDefault)
eigs = pretty_res['Singular values'].values
percent_var = pretty_res['Explained variance ratio'].values
if len(eigs) > 30:
n_comp_to_plot = 30
eigs = eigs[:n_comp_to_plot]
percent_var = percent_var[:n_comp_to_plot]
fig, ax = plt.subplots(figsize=(10, 10))
ax.set_title("Scree plot", fontsize='xx-large')
ax.plot(np.arange(1, len(eigs) + 1), eigs, 'ok')
ax.set_ylim([0, (max(eigs) * 1.2)])
ax.set_ylabel('Eigenvalues', fontsize='xx-large')
ax.set_xlabel('Principal Components', fontsize='xx-large')
if percent:
ax2 = ax.twinx()
ax2.plot(np.arange(1, len(percent_var) + 1), percent_var, '-k')
ax2.set_ylim(0, max(percent_var) * 1.2)
ax2.set_ylabel('Percentage of variance explained', fontsize='xx-large')
if pvals is not None and len(pvals) == len(eigs):
# TO-DO: add p<.05 legend?
p_check = [i for i, t in enumerate(pvals) if t < .05]
eigen_check = [e for i, e in enumerate(eigs) for j in p_check if i == j]
ax.plot(np.add(p_check, 1), eigen_check, 'ob', markersize=12)
if kaiser:
ax.axhline(1, color='k', linestyle=':', linewidth=2)
if fname:
fig.savefig(fname, bbox_inches='tight')
else:
plt.show()
# return fig, ax, ax2
def perm_pca(data, n_iters=1000, n_components=None):
if n_iters == 0:
pass
if n_components is None:
n_components = np.min(data.shape)
n = 0
permutation_dataframes = {}
while n != n_iters:
n += 1
print('Running permutation PCA - Iteration %04d' % n)
permuted_dataset = resample(data, replace=False)
pca = IncrementalPCA(n_components=n_components, whiten=True)
pca.fit(permuted_dataset)
perm_df = pretty_pca_res(pca)
permutation_dataframes['Permutation %04d' % n] = perm_df
del pca
return permutation_dataframes
def p_from_perm_data(observed_df, perm_data, variable='Singular values'):
perm_results = {}
for perm in perm_data:
p_df = perm_data[perm]
perm_results[perm] = p_df[variable].values
p_values = []
for i, perm in enumerate(perm_results):
perm_array = perm_results[perm]
observed = observed_df.iloc[i][variable]
p = permutation_p(observed, perm_array)
p_values.append(p)
return p_values
def permutation_p(observed, perm_array):
"""Non-parametric null hypothesis testing
see Phipson & Smyth 2010 for more information
"""
n_iters = len(perm_array)
n_hits = np.where(np.abs(perm_array) >= np.abs(observed))
return (len(n_hits[0]) + 1) / (n_iters + 1)
def split_connectivity_by_band(connectivity_df):
bands = ['delta', 'theta', 'alpha', 'beta', 'gamma']
colnames = list(connectivity_df)
connectivity_by_band = {}
for band in bands:
band_columns = [c for c in colnames if band in c]
band_df = connectivity_df[band_columns]
connectivity_by_band[band] = band_df
return connectivity_by_band
def pca_by_band(data, n_iters=1000, res_dir=None):
if res_dir is None:
res_dir = os.path.dirname(__file__)
conn_by_band = split_connectivity_by_band(data)
band_results = {}
for b in conn_by_band:
band_df = conn_by_band[b]
print(pu.ctime() + 'Running PCA on %s' % b)
# scaled_data = norm_to_ss1(band_df.values)
# scaled_data = RobustScaler().fit_transform(band_df.values)
scaled_data = StandardScaler().fit_transform(band_df)
pca = PCA(.97)
pca.fit(scaled_data)
band_res = pretty_pca_res(pca)
band_results[b] = band_res
print(pca.n_components_)
del pca
perm_res = perm_pca(data=band_df, n_iters=n_iters)
p_values = p_from_perm_data(observed_df=band_res, perm_data=perm_res)
plot_scree(
band_res,
pvals=p_values,
percent=False,
fname=os.path.join(res_dir, '%s_pca_scree.png' % b)
)
band_res.to_excel(os.path.join(res_dir, '%s_pca_res.xlsx' % b))
def norm_to_ss1(matrix):
# Alternate method for scaling, see Abdi & Williams, 2010 (PLS methods)
centered = matrix - np.mean(matrix, axis=0)
sum_of_squares = np.sum(centered ** 2, axis=0)
rescaled_matrix = np.ndarray(shape=matrix.shape)
for i, ss in enumerate(sum_of_squares):
rescaled_matrix[:, i] = centered[:, i] / np.sqrt(ss)
return rescaled_matrix
def _test_cross_val_score_method(data):
pca = PCA(n_components=5, whiten=True)
pca.fit(data)
scores = pca.score_samples(data.values)
print(scores.shape)
score_df = find_n_components(data, step=5)
score_df.to_excel('./pca_cross_val_scores_test.xlsx')
def grand_pca(data, res_dir=None):
if res_dir is None:
res_dir = os.path.dirname(__file__)
print(pu.ctime() + 'Running grand PCA')
pca = PCA(n_components=.99, whiten=True)
zdata = StandardScaler().fit_transform(data)
pca.fit(zdata)
print(pca.n_components_)
true_df = pretty_pca_res(pca)
plot_scree(
true_df,
percent=False,
fname=os.path.join(res_dir, 'grand_pca_scree.png'))
true_df.to_excel(os.path.join(res_dir, 'grand_pca_res.xlsx'))
if __name__ == "__main__":
print(pu.ctime() + 'Loading data')
data = pu.load_connectivity_data()
res_dir = os.path.abspath('./../results/pca')
if not os.path.isdir(res_dir):
os.mkdir(res_dir)
grand_pca(data)
pca_by_band(data, n_iters=0, res_dir=res_dir)
| 30.173745 | 80 | 0.654511 |
2961e7bc3c5b2709b1402ca5f1d1b06e704e5b35
| 1,406 |
py
|
Python
|
src/emit_started_event.py
|
duyhoang15/test
|
75a22b77adf8f582b40595654cb7200da32b700e
|
[
"MIT-0"
] | 8 |
2021-02-04T13:16:01.000Z
|
2021-09-06T21:38:04.000Z
|
src/emit_started_event.py
|
duyhoang15/test
|
75a22b77adf8f582b40595654cb7200da32b700e
|
[
"MIT-0"
] | null | null | null |
src/emit_started_event.py
|
duyhoang15/test
|
75a22b77adf8f582b40595654cb7200da32b700e
|
[
"MIT-0"
] | 12 |
2021-02-04T17:49:03.000Z
|
2022-03-07T18:30:02.000Z
|
import sys
import boto3 # type: ignore
import logging
import json
from awsglue.utils import getResolvedOptions # type: ignore
# Setup Logging
def setup_logger(log_level):
log_msg_format = '%(asctime)s %(levelname)s %(name)s: %(message)s'
log_datetime_format = '%Y-%m-%d %H:%M:%S'
logging.basicConfig(format=log_msg_format, datefmt=log_datetime_format)
logger = logging.getLogger(__name__)
logger.setLevel(log_level)
return logger
# Read params from commandline
args = getResolvedOptions(sys.argv, ['WORKFLOW_NAME', 'WORKFLOW_RUN_ID', 'LOG_LEVEL'])
workflow_name = args['WORKFLOW_NAME']
workflow_run_id = args['WORKFLOW_RUN_ID']
log_level = args['LOG_LEVEL']
# Logging
logger = setup_logger(log_level)
logger.info(f"workflowName [{workflow_name}]")
logger.info(f"runId [{workflow_run_id}]")
# Initiate Events client
events = boto3.client('events')
detail = json.dumps({'workflowName': workflow_name, 'runId': workflow_run_id, 'state': 'STARTED'})
# Submit event to PutEvents API
response = events.put_events(
Entries=[
{
'Detail': detail,
'DetailType': 'Glue Workflow State Change',
'Source': 'finance.cur.kpi.pipeline'
}
]
)
response_string = json.dumps(response)
logger.info(f"Response from PutEvents API [{response_string}]")
logger.info("put-event-workflow-started.py: process completed successfully ...")
| 28.12 | 98 | 0.714083 |
5015bde98f851d89920c327262db0dcdc2227ef7
| 2,862 |
py
|
Python
|
tests/dic2owl/test_dic2owl_generator.py
|
emmo-repo/CIF-ontology
|
84142714e7db60c87dbd60aabcdc07102d5d8678
|
[
"CC-BY-4.0"
] | 5 |
2021-04-28T14:07:49.000Z
|
2022-02-17T08:17:06.000Z
|
tests/dic2owl/test_dic2owl_generator.py
|
emmo-repo/CIF-ontology
|
84142714e7db60c87dbd60aabcdc07102d5d8678
|
[
"CC-BY-4.0"
] | 42 |
2021-04-09T12:50:35.000Z
|
2022-03-25T09:31:20.000Z
|
tests/dic2owl/test_dic2owl_generator.py
|
emmo-repo/CIF-ontology
|
84142714e7db60c87dbd60aabcdc07102d5d8678
|
[
"CC-BY-4.0"
] | 2 |
2021-04-28T08:47:35.000Z
|
2021-08-25T12:03:50.000Z
|
"""Test the `dic2owl.dic2owl.Generator` class."""
# pylint: disable=redefined-outer-name,import-outside-toplevel
from pathlib import Path
from typing import TYPE_CHECKING
import pytest
if TYPE_CHECKING:
from typing import Callable, List, Optional
from dic2owl import Generator
@pytest.fixture(scope="session")
def sample_generator_comments() -> "List[str]":
"""The comments to be used for the `sample_generator` fixture."""
return ["This is a test."]
@pytest.fixture
def sample_generator(
base_iri: str, cif_dic_path: Path, sample_generator_comments: "List[str]"
) -> "Callable[[Optional[List[str]]], Generator]":
"""Create a generator similar to what is tested in
`test_initialization()`."""
from dic2owl import Generator
def _sample_generator(comments: "Optional[List[str]]" = None) -> Generator:
"""Create and return a `Generator` with specific list of metadata
comments. By default, the fixture `sample_generator_comments` is
used."""
return Generator(
dicfile=cif_dic_path,
base_iri=base_iri,
comments=sample_generator_comments
if comments is None
else comments,
)
return _sample_generator
def test_initialization(
base_iri: str, cif_dic_path: Path, sample_generator_comments: "List[str]"
) -> None:
"""Ensure a newly initialized Generator has intended ontologies and
properties."""
from CifFile import CifDic
from dic2owl import Generator
cif_dictionary = CifDic(str(cif_dic_path), do_dREL=False)
generator = Generator(
dicfile=cif_dic_path,
base_iri=base_iri,
comments=sample_generator_comments,
)
assert generator
assert generator.dic.WriteOut() == cif_dictionary.WriteOut()
assert generator.ddl
assert generator.ddl in generator.onto.imported_ontologies
assert generator.comments == sample_generator_comments
def test_generate(
cif_ttl: str,
create_location_free_ttl: "Callable[[Path], str]",
sample_generator: "Callable[[Optional[List[str]]], Generator]",
sample_generator_comments: "List[str]",
) -> None:
"""Test the `generate()` method."""
from tempfile import NamedTemporaryFile
generator = sample_generator(None)
generated_ontology = generator.generate()
for comment in sample_generator_comments:
assert comment in generated_ontology.metadata.comment
assert (
f"Generated with dic2owl from {generator.dicfile}"
in generated_ontology.metadata.comment
)
generated_ontology = sample_generator([]).generate()
with NamedTemporaryFile() as output_turtle:
generated_ontology.save(output_turtle.name, format="turtle")
generated_ttl = create_location_free_ttl(Path(output_turtle.name))
assert generated_ttl == cif_ttl
| 30.774194 | 79 | 0.706499 |
142e4ca76f757b49b1efa97d742664729184bbfb
| 34,624 |
py
|
Python
|
nempy/spot_markert_backend/solver_interface.py
|
hy3440/nempy
|
ffc6c3e1a0becde8cbf6ba56d5885768dc1c0a37
|
[
"BSD-3-Clause"
] | 24 |
2020-05-16T11:46:25.000Z
|
2022-03-29T22:25:09.000Z
|
nempy/spot_markert_backend/solver_interface.py
|
hy3440/nempy
|
ffc6c3e1a0becde8cbf6ba56d5885768dc1c0a37
|
[
"BSD-3-Clause"
] | 6 |
2020-11-17T22:37:35.000Z
|
2022-03-03T00:11:08.000Z
|
nempy/spot_markert_backend/solver_interface.py
|
hy3440/nempy
|
ffc6c3e1a0becde8cbf6ba56d5885768dc1c0a37
|
[
"BSD-3-Clause"
] | 12 |
2020-04-30T09:42:22.000Z
|
2022-03-06T23:45:08.000Z
|
import numpy as np
import pandas as pd
from mip import Model, xsum, minimize, CONTINUOUS, OptimizationStatus, BINARY, CBC, GUROBI, LP_Method
class InterfaceToSolver:
"""A wrapper for the mip model class, allows interaction with mip using pd.DataFrames."""
def __init__(self, solver_name='CBC'):
self.variables = {}
self.linear_mip_variables = {}
self.solver_name = solver_name
if solver_name == 'CBC':
self.mip_model = Model("market", solver_name=CBC)
self.linear_mip_model = Model("market", solver_name=CBC)
elif solver_name == 'GUROBI':
self.mip_model = Model("market", solver_name=GUROBI)
self.linear_mip_model = Model("market", solver_name=GUROBI)
else:
raise ValueError("Solver '{}' not recognised.")
self.mip_model.verbose = 0
self.mip_model.solver.set_mip_gap_abs(1e-10)
self.mip_model.solver.set_mip_gap(1e-20)
self.mip_model.lp_method = LP_Method.DUAL
self.linear_mip_model.verbose = 0
self.linear_mip_model.solver.set_mip_gap_abs(1e-10)
self.linear_mip_model.solver.set_mip_gap(1e-20)
self.linear_mip_model.lp_method = LP_Method.DUAL
def add_variables(self, decision_variables):
"""Add decision variables to the model.
Examples
--------
>>> decision_variables = pd.DataFrame({
... 'variable_id': [0, 1],
... 'lower_bound': [0.0, 0.0],
... 'upper_bound': [6.0, 1.0],
... 'type': ['continuous', 'binary']})
>>> si = InterfaceToSolver()
>>> si.add_variables(decision_variables)
The underlying mip_model should now have 2 variables.
>>> print(si.mip_model.num_cols)
2
The first one should have the following properties.
>>> print(si.mip_model.var_by_name('0').var_type)
C
>>> print(si.mip_model.var_by_name('0').lb)
0.0
>>> print(si.mip_model.var_by_name('0').ub)
6.0
The second one should have the following properties.
>>> print(si.mip_model.var_by_name('1').var_type)
B
>>> print(si.mip_model.var_by_name('1').lb)
0.0
>>> print(si.mip_model.var_by_name('1').ub)
1.0
"""
# Create a mapping between the nempy level names for variable types and the mip representation.
variable_types = {'continuous': CONTINUOUS, 'binary': BINARY}
# Add each variable to the mip model.
for variable_id, lower_bound, upper_bound, variable_type in zip(
list(decision_variables['variable_id']), list(decision_variables['lower_bound']),
list(decision_variables['upper_bound']), list(decision_variables['type'])):
self.variables[variable_id] = self.mip_model.add_var(lb=lower_bound, ub=upper_bound,
var_type=variable_types[variable_type],
name=str(variable_id))
self.linear_mip_variables[variable_id] = self.linear_mip_model.add_var(lb=lower_bound, ub=upper_bound,
var_type=variable_types[
variable_type],
name=str(variable_id))
def add_sos_type_2(self, sos_variables, sos_id_columns, position_column):
"""Add groups of special ordered sets of type 2 two the mip model.
Examples
--------
>>> decision_variables = pd.DataFrame({
... 'variable_id': [0, 1, 2, 3, 4, 5],
... 'lower_bound': [0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
... 'upper_bound': [5.0, 5.0, 5.0, 5.0, 5.0, 5.0],
... 'type': ['continuous', 'continuous', 'continuous',
... 'continuous', 'continuous', 'continuous']})
>>> sos_variables = pd.DataFrame({
... 'variable_id': [0, 1, 2, 3, 4, 5],
... 'sos_id': ['A', 'A', 'A', 'B', 'B', 'B'],
... 'position': [0, 1, 2, 0, 1, 2]})
>>> si = InterfaceToSolver()
>>> si.add_variables(decision_variables)
>>> si.add_sos_type_2(sos_variables, 'sos_id', 'position')
"""
# Function that adds sets to mip model.
def add_sos_vars(sos_group):
self.mip_model.add_sos(list(zip(sos_group['vars'], sos_group[position_column])), 2)
# For each variable_id get the variable object from the mip model
sos_variables['vars'] = sos_variables['variable_id'].apply(lambda x: self.variables[x])
# Break up the sets based on their id and add them to the model separately.
sos_variables.groupby(sos_id_columns).apply(add_sos_vars)
# This is a hack to make sure mip knows there are binary constraints.
self.mip_model.add_var(var_type=BINARY, obj=0.0)
def add_sos_type_1(self, sos_variables):
# Function that adds sets to mip model.
def add_sos_vars(sos_group):
self.mip_model.add_sos(list(zip(sos_group['vars'], [1.0 for i in range(len(sos_variables['vars']))])), 1)
# For each variable_id get the variable object from the mip model
sos_variables['vars'] = sos_variables['variable_id'].apply(lambda x: self.variables[x])
# Break up the sets based on their id and add them to the model separately.
sos_variables.groupby('sos_id').apply(add_sos_vars)
# This is a hack to make mip knows there are binary constraints.
self.mip_model.add_var(var_type=BINARY, obj=0.0)
def add_objective_function(self, objective_function):
"""Add the objective function to the mip model.
Examples
--------
>>> decision_variables = pd.DataFrame({
... 'variable_id': [0, 1, 2, 3, 4, 5],
... 'lower_bound': [0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
... 'upper_bound': [5.0, 5.0, 5.0, 5.0, 5.0, 5.0],
... 'type': ['continuous', 'continuous', 'continuous',
... 'continuous', 'continuous', 'continuous']})
>>> objective_function = pd.DataFrame({
... 'variable_id': [0, 1, 3, 4, 5],
... 'cost': [1.0, 2.0, -1.0, 5.0, 0.0]})
>>> si = InterfaceToSolver()
>>> si.add_variables(decision_variables)
>>> si.add_objective_function(objective_function)
>>> print(si.mip_model.var_by_name('0').obj)
1.0
>>> print(si.mip_model.var_by_name('5').obj)
0.0
"""
objective_function = objective_function.sort_values('variable_id')
objective_function = objective_function.set_index('variable_id')
obj = minimize(xsum(objective_function['cost'][i] * self.variables[i] for i in
list(objective_function.index)))
self.mip_model.objective = obj
self.linear_mip_model.objective = obj
def add_constraints(self, constraints_lhs, constraints_type_and_rhs):
"""Add constraints to the mip model.
Examples
--------
>>> decision_variables = pd.DataFrame({
... 'variable_id': [0, 1, 2, 3, 4, 5],
... 'lower_bound': [0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
... 'upper_bound': [5.0, 5.0, 10.0, 10.0, 5.0, 5.0],
... 'type': ['continuous', 'continuous', 'continuous',
... 'continuous', 'continuous', 'continuous']})
>>> constraints_lhs = pd.DataFrame({
... 'constraint_id': [1, 1, 2, 2],
... 'variable_id': [0, 1, 3, 4],
... 'coefficient': [1.0, 0.5, 1.0, 2.0]})
>>> constraints_type_and_rhs = pd.DataFrame({
... 'constraint_id': [1, 2],
... 'type': ['<=', '='],
... 'rhs': [10.0, 20.0]})
>>> si = InterfaceToSolver()
>>> si.add_variables(decision_variables)
>>> si.add_constraints(constraints_lhs, constraints_type_and_rhs)
>>> print(si.mip_model.constr_by_name('1'))
1: +1.0 0 +0.5 1 <= 10.0
>>> print(si.mip_model.constr_by_name('2'))
2: +1.0 3 +2.0 4 = 20.0
"""
constraints_lhs = constraints_lhs.groupby(['constraint_id', 'variable_id'], as_index=False).agg(
{'coefficient': 'sum'})
rows = constraints_lhs.groupby(['constraint_id'], as_index=False)
# Make a dictionary so constraint rhs values can be accessed using the constraint id.
rhs = dict(zip(constraints_type_and_rhs['constraint_id'], constraints_type_and_rhs['rhs']))
# Make a dictionary so constraint type can be accessed using the constraint id.
enq_type = dict(zip(constraints_type_and_rhs['constraint_id'], constraints_type_and_rhs['type']))
var_ids = constraints_lhs['variable_id'].to_numpy()
vars = np.asarray(
[self.variables[k] if k in self.variables.keys() else None for k in range(0, max(var_ids) + 1)])
coefficients = constraints_lhs['coefficient'].to_numpy()
for row_id, row in rows.indices.items():
# Use the variable_ids to get mip variable objects present in the constraints
lhs_variables = vars[var_ids[row]]
# Use the positions of the non nan values to the lhs coefficients.
lhs = coefficients[row]
# Multiply and the variables by their coefficients and sum to create the lhs of the constraint.
exp = lhs_variables * lhs
exp = exp.tolist()
exp = xsum(exp)
# Add based on inequality type.
if enq_type[row_id] == '<=':
new_constraint = exp <= rhs[row_id]
elif enq_type[row_id] == '>=':
new_constraint = exp >= rhs[row_id]
elif enq_type[row_id] == '=':
new_constraint = exp == rhs[row_id]
else:
raise ValueError("Constraint type not recognised should be one of '<=', '>=' or '='.")
self.mip_model.add_constr(new_constraint, name=str(row_id))
self.linear_mip_model.add_constr(new_constraint, name=str(row_id))
def optimize(self):
"""Optimize the mip model.
If an optimal solution cannot be found and the investigate_infeasibility flag is set to True then remove
constraints until a feasible solution is found.
Examples
--------
>>> decision_variables = pd.DataFrame({
... 'variable_id': [0, 1, 2, 3, 4, 5],
... 'lower_bound': [0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
... 'upper_bound': [5.0, 5.0, 10.0, 10.0, 5.0, 5.0],
... 'type': ['continuous', 'continuous', 'continuous',
... 'continuous', 'continuous', 'continuous']})
>>> constraints_lhs = pd.DataFrame({
... 'constraint_id': [1, 1, 2, 2],
... 'variable_id': [0, 1, 3, 4],
... 'coefficient': [1.0, 0.5, 1.0, 2.0]})
>>> constraints_type_and_rhs = pd.DataFrame({
... 'constraint_id': [1, 2],
... 'type': ['<=', '='],
... 'rhs': [10.0, 20.0]})
>>> si = InterfaceToSolver()
>>> si.add_variables(decision_variables)
>>> si.add_constraints(constraints_lhs, constraints_type_and_rhs)
>>> si.optimize()
>>> decision_variables['value'] = si.get_optimal_values_of_decision_variables(decision_variables)
>>> print(decision_variables)
variable_id lower_bound upper_bound type value
0 0 0.0 5.0 continuous 0.0
1 1 0.0 5.0 continuous 0.0
2 2 0.0 10.0 continuous 0.0
3 3 0.0 10.0 continuous 10.0
4 4 0.0 5.0 continuous 5.0
5 5 0.0 5.0 continuous 0.0
"""
status = self.mip_model.optimize()
if status != OptimizationStatus.OPTIMAL:
# Attempt find constraint causing infeasibility.
print('Model infeasible attempting to find problem constraint.')
con_index = find_problem_constraint(self.mip_model)
print('Couldn\'t find an optimal solution, but removing con {} fixed INFEASIBLITY'.format(con_index))
raise ValueError('Linear program infeasible')
def get_optimal_values_of_decision_variables(self, variable_definitions):
"""Get the optimal values for each decision variable.
Examples
--------
>>> decision_variables = pd.DataFrame({
... 'variable_id': [0, 1, 2, 3, 4, 5],
... 'lower_bound': [0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
... 'upper_bound': [5.0, 5.0, 10.0, 10.0, 5.0, 5.0],
... 'type': ['continuous', 'continuous', 'continuous',
... 'continuous', 'continuous', 'continuous']})
>>> constraints_lhs = pd.DataFrame({
... 'constraint_id': [1, 1, 2, 2],
... 'variable_id': [0, 1, 3, 4],
... 'coefficient': [1.0, 0.5, 1.0, 2.0]})
>>> constraints_type_and_rhs = pd.DataFrame({
... 'constraint_id': [1, 2],
... 'type': ['<=', '='],
... 'rhs': [10.0, 20.0]})
>>> si = InterfaceToSolver()
>>> si.add_variables(decision_variables)
>>> si.add_constraints(constraints_lhs, constraints_type_and_rhs)
>>> si.optimize()
>>> decision_variables['value'] = si.get_optimal_values_of_decision_variables(decision_variables)
>>> print(decision_variables)
variable_id lower_bound upper_bound type value
0 0 0.0 5.0 continuous 0.0
1 1 0.0 5.0 continuous 0.0
2 2 0.0 10.0 continuous 0.0
3 3 0.0 10.0 continuous 10.0
4 4 0.0 5.0 continuous 5.0
5 5 0.0 5.0 continuous 0.0
"""
values = variable_definitions['variable_id'].apply(lambda x: self.mip_model.var_by_name(str(x)).x,
self.mip_model)
return values
def get_optimal_values_of_decision_variables_lin(self, variable_definitions):
values = variable_definitions['variable_id'].apply(lambda x: self.linear_mip_model.var_by_name(str(x)).x,
self.mip_model)
return values
def get_slack_in_constraints(self, constraints_type_and_rhs):
"""Get the slack values in each constraint.
Examples
--------
>>> decision_variables = pd.DataFrame({
... 'variable_id': [0, 1, 2, 3, 4, 5],
... 'lower_bound': [0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
... 'upper_bound': [5.0, 5.0, 10.0, 10.0, 5.0, 5.0],
... 'type': ['continuous', 'continuous', 'continuous',
... 'continuous', 'continuous', 'continuous']})
>>> constraints_lhs = pd.DataFrame({
... 'constraint_id': [1, 1, 2, 2],
... 'variable_id': [0, 1, 3, 4],
... 'coefficient': [1.0, 0.5, 1.0, 2.0]})
>>> constraints_type_and_rhs = pd.DataFrame({
... 'constraint_id': [1, 2],
... 'type': ['<=', '='],
... 'rhs': [10.0, 20.0]})
>>> si = InterfaceToSolver()
>>> si.add_variables(decision_variables)
>>> si.add_constraints(constraints_lhs, constraints_type_and_rhs)
>>> si.optimize()
>>> constraints_type_and_rhs['slack'] = si.get_slack_in_constraints(constraints_type_and_rhs)
>>> print(constraints_type_and_rhs)
constraint_id type rhs slack
0 1 <= 10.0 10.0
1 2 = 20.0 0.0
"""
slack = constraints_type_and_rhs['constraint_id'].apply(lambda x: self.mip_model.constr_by_name(str(x)).slack,
self.mip_model)
return slack
def price_constraints(self, constraint_ids_to_price):
"""For each constraint_id find the marginal value of the constraint.
This is done by incrementing the constraint by a value of 1.0 and re-optimizing the model, the marginal cost
of the constraint is increase in the objective function value between model runs.
Examples
--------
>>> decision_variables = pd.DataFrame({
... 'variable_id': [0, 1, 2, 3, 4, 5],
... 'lower_bound': [0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
... 'upper_bound': [5.0, 5.0, 10.0, 10.0, 5.0, 5.0],
... 'type': ['continuous', 'continuous', 'continuous',
... 'continuous', 'continuous', 'continuous']})
>>> objective_function = pd.DataFrame({
... 'variable_id': [0, 1, 2, 3, 4, 5],
... 'cost': [1.0, 3.0, 10.0, 8.0, 9.0, 7.0]})
>>> constraints_lhs = pd.DataFrame({
... 'constraint_id': [1, 1, 1, 1],
... 'variable_id': [0, 1, 3, 4],
... 'coefficient': [1.0, 1.0, 1.0, 1.0]})
>>> constraints_type_and_rhs = pd.DataFrame({
... 'constraint_id': [1],
... 'type': ['='],
... 'rhs': [20.0]})
>>> si = InterfaceToSolver()
>>> si.add_variables(decision_variables)
>>> si.add_constraints(constraints_lhs, constraints_type_and_rhs)
>>> si.add_objective_function(objective_function)
>>> si.optimize()
>>> si.linear_mip_model.optimize()
<OptimizationStatus.OPTIMAL: 0>
>>> prices = si.price_constraints([1])
>>> print(prices)
{1: 8.0}
>>> decision_variables['value'] = si.get_optimal_values_of_decision_variables(decision_variables)
>>> print(decision_variables)
variable_id lower_bound upper_bound type value
0 0 0.0 5.0 continuous 5.0
1 1 0.0 5.0 continuous 5.0
2 2 0.0 10.0 continuous 0.0
3 3 0.0 10.0 continuous 10.0
4 4 0.0 5.0 continuous 0.0
5 5 0.0 5.0 continuous 0.0
"""
costs = {}
for id in constraint_ids_to_price:
costs[id] = self.linear_mip_model.constr_by_name(str(id)).pi
return costs
def update_rhs(self, constraint_id, violation_degree):
constraint = self.linear_mip_model.constr_by_name(str(constraint_id))
constraint.rhs += violation_degree
def update_variable_bounds(self, new_bounds):
for variable_id, lb, ub in zip(new_bounds['variable_id'], new_bounds['lower_bound'], new_bounds['upper_bound']):
self.mip_model.var_by_name(str(variable_id)).lb = lb
self.mip_model.var_by_name(str(variable_id)).ub = ub
def disable_variables(self, variables):
for var_id in variables['variable_id']:
var = self.linear_mip_model.var_by_name(str(var_id))
var.lb = 0.0
var.ub = 0.0
def find_problem_constraint(base_prob):
cons = []
test_prob = base_prob.copy()
for con in [con.name for con in base_prob.constrs]:
[test_prob.remove(c) for c in test_prob.constrs if c.name == con]
status = test_prob.optimize()
cons.append(con)
if status == OptimizationStatus.OPTIMAL:
return cons
return []
def create_lhs(constraints, decision_variables, join_columns):
"""Combine constraints with general definitions of lhs with variables to give an explicit lhs definition.
Both constraints and decision_variables can have a coefficient, the coefficient use in the actual lhs will
be the product of the two coefficients.
Examples
--------
>>> decision_variables = pd.DataFrame({
... 'variable_id': [0, 1, 2, 3, 4, 5],
... 'region': ['NSW', 'NSW', 'VIC',
... 'VIC', 'VIC', 'VIC'],
... 'service': ['energy', 'energy','energy',
... 'energy','energy','energy',],
... 'coefficient': [0.9, 0.8, 1.0, 0.95, 1.1, 1.01]})
>>> constraints = pd.DataFrame({
... 'constraint_id': [1, 2],
... 'region': ['NSW', 'VIC'],
... 'service': ['energy', 'energy'],
... 'coefficient': [1.0, 1.0]})
>>> lhs = create_lhs(decision_variables, constraints, ['region', 'service'])
>>> print(lhs)
constraint_id variable_id coefficient
0 1 0 0.90
1 1 1 0.80
2 2 2 1.00
3 2 3 0.95
4 2 4 1.10
5 2 5 1.01
Parameters
----------
constraints : pd.DataFrame
============= ===============================================================
Columns: Description:
constraint_id the unique identifier of the constraint (as `np.int64`)
join_columns one or more columns defining the types of variables that should
be on the lhs (as `str`)
coefficient the constraint level contribution to the lhs coefficient (as `np.float64`)
============= ===============================================================
decision_variables : pd.DataFrame
============= ===============================================================
Columns: Description:
variable_id the unique identifier of the variable (as `np.int64`)
join_columns one or more columns defining the types of variables that should
be on the lhs (as `str`)
coefficient the variable level contribution to the lhs coefficient (as `np.float64`)
============= ===============================================================
Returns
-------
lhs : pd.DataFrame
============= ===============================================================
Columns: Description:
constraint_id the unique identifier of the constraint (as `np.int64`)
variable_id the unique identifier of the variable (as `np.int64`)
coefficient the constraint level contribution to the lhs coefficient (as `np.float64`)
============= ===============================================================
"""
constraints = pd.merge(constraints, decision_variables, 'inner', on=join_columns)
constraints['coefficient'] = constraints['coefficient_x'] * constraints['coefficient_y']
lhs = constraints.loc[:, ['constraint_id', 'variable_id', 'coefficient']]
return lhs
def create_mapping_of_generic_constraint_sets_to_constraint_ids(constraints, market_constraints):
"""Combine generic constraints and fcas market constraints to get the full set of generic constraints.
Returns non if there are no generic of fcas market constraints.
Examples
--------
>>> constraints = {
... 'generic': pd.DataFrame({
... 'constraint_id': [0, 1],
... 'set': ['A', 'B']})
... }
>>> market_constraints = {
... 'fcas': pd.DataFrame({
... 'constraint_id': [2, 3],
... 'set': ['C', 'D']})
... }
>>> generic_constraints = create_mapping_of_generic_constraint_sets_to_constraint_ids(
... constraints, market_constraints)
>>> print(generic_constraints)
constraint_id set
0 0 A
1 1 B
0 2 C
1 3 D
Parameters
----------
constraints : dict{str : pd.DataFrame}
The pd.DataFrame stored under the key 'generic', if it exists, should have the structure.
============= ===============================================================
Columns: Description:
constraint_id the unique identifier of the constraint (as `np.int64`)
set the constraint set that the id refers to (as `str`)
============= ===============================================================
market_constraints : dict{str : pd.DataFrame}
The pd.DataFrame stored under the key 'fcas', if it exists, should have the structure.
============= ===============================================================
Columns: Description:
constraint_id the unique identifier of the constraint (as `np.int64`)
set the constraint set that the id refers to (as `str`)
============= ===============================================================
Returns
-------
pd.DataFrame or None
If pd.DataFrame
============= ===============================================================
Columns: Description:
constraint_id the unique identifier of the constraint (as `np.int64`)
set the constraint set that the id refers to (as `str`)
============= ===============================================================
"""
generic_constraints = []
if 'generic' in constraints:
generic_constraints.append(constraints['generic'].loc[:, ['constraint_id', 'set']])
if 'fcas' in market_constraints:
generic_constraints.append(market_constraints['fcas'].loc[:, ['constraint_id', 'set']])
if len(generic_constraints) > 0:
return pd.concat(generic_constraints)
else:
return None
def create_unit_level_generic_constraint_lhs(generic_constraint_units, generic_constraint_ids,
unit_bids_to_constraint_map):
"""Find the lhs variables from units for generic constraints.
Examples
--------
>>> generic_constraint_units = pd.DataFrame({
... 'set': ['A', 'A'],
... 'unit': ['X', 'Y'],
... 'service': ['energy', 'energy'],
... 'coefficient': [0.9, 0.8]})
>>> generic_constraint_ids = pd.DataFrame({
... 'constraint_id': [1, 2],
... 'set': ['A', 'B']})
>>> unit_bids_to_constraint_map = pd.DataFrame({
... 'variable_id': [0, 1],
... 'unit': ['X', 'Y'],
... 'service': ['energy', 'energy']})
>>> lhs = create_unit_level_generic_constraint_lhs(generic_constraint_units, generic_constraint_ids,
... unit_bids_to_constraint_map)
>>> print(lhs)
constraint_id variable_id coefficient
0 1 0 0.9
1 1 1 0.8
Parameters
----------
generic_constraint_units : pd.DataFrame
============= ==============================================================
Columns: Description:
set the unique identifier of the constraint set to map the
lhs coefficients to (as `str`)
unit the unit whose variables will be mapped to the lhs (as `str`)
service the service whose variables will be mapped to the lhs (as `str`)
coefficient the lhs coefficient (as `np.float64`)
============= ==============================================================
generic_constraint_ids : pd.DataFrame
============= ===============================================================
Columns: Description:
constraint_id the unique identifier of the constraint (as `np.int64`)
set the constraint set that the id refers to (as `str`)
============= ===============================================================
unit_bids_to_constraint_map : pd.DataFrame
============= =============================================================================
Columns: Description:
variable_id the id of the variable (as `np.int64`)
unit the unit level constraints the variable should map to (as `str`)
service the service type of the constraints the variables should map to (as `str`)
============= =============================================================================
Returns
-------
lhs : pd.DataFrame
============= ===============================================================
Columns: Description:
constraint_id the unique identifier of the constraint (as `np.int64`)
variable_id the unique identifier of the variable (as `np.int64`)
coefficient the constraint level contribution to the lhs coefficient (as `np.float64`)
============= ===============================================================
"""
unit_lhs = pd.merge(generic_constraint_units,
unit_bids_to_constraint_map.loc[:, ['unit', 'service', 'variable_id']],
on=['unit', 'service'])
unit_lhs = pd.merge(unit_lhs, generic_constraint_ids.loc[:, ['constraint_id', 'set']], on='set')
return unit_lhs.loc[:, ['constraint_id', 'variable_id', 'coefficient']]
def create_region_level_generic_constraint_lhs(generic_constraint_regions, generic_constraint_ids,
regional_bids_to_constraint_map):
"""Find the lhs variables from regions for generic constraints.
Examples
--------
>>> generic_constraint_regions = pd.DataFrame({
... 'set': ['A'],
... 'region': ['X'],
... 'service': ['energy'],
... 'coefficient': [0.9]})
>>> generic_constraint_ids = pd.DataFrame({
... 'constraint_id': [1, 2],
... 'set': ['A', 'B']})
>>> regional_bids_to_constraint_map = pd.DataFrame({
... 'variable_id': [0, 1],
... 'region': ['X', 'X'],
... 'service': ['energy', 'energy']})
>>> lhs = create_region_level_generic_constraint_lhs(generic_constraint_regions, generic_constraint_ids,
... regional_bids_to_constraint_map)
>>> print(lhs)
constraint_id variable_id coefficient
0 1 0 0.9
1 1 1 0.9
Parameters
----------
generic_constraint_regions : pd.DataFrame
============= ==============================================================
Columns: Description:
set the unique identifier of the constraint set to map the
lhs coefficients to (as `str`)
region the region whose variables will be mapped to the lhs (as `str`)
service the service whose variables will be mapped to the lhs (as `str`)
coefficient the lhs coefficient (as `np.float64`)
============= ==============================================================
generic_constraint_ids : pd.DataFrame
============= ===============================================================
Columns: Description:
constraint_id the unique identifier of the constraint (as `np.int64`)
set the constraint set that the id refers to (as `str`)
============= ===============================================================
regional_bids_to_constraint_map : pd.DataFrame
============= =============================================================================
Columns: Description:
variable_id the id of the variable (as `np.int64`)
region the region level constraints the variable should map to (as `str`)
service the service type of the constraints the variables should map to (as `str`)
============= =============================================================================
Returns
-------
lhs : pd.DataFrame
============= ===============================================================
Columns: Description:
constraint_id the unique identifier of the constraint (as `np.int64`)
variable_id the unique identifier of the variable (as `np.int64`)
coefficient the constraint level contribution to the lhs coefficient (as `np.float64`)
============= ===============================================================
"""
region_lhs = pd.merge(generic_constraint_regions,
regional_bids_to_constraint_map.loc[:, ['region', 'service', 'variable_id']],
on=['region', 'service'])
region_lhs = pd.merge(region_lhs, generic_constraint_ids.loc[:, ['constraint_id', 'set']], on='set')
return region_lhs.loc[:, ['constraint_id', 'variable_id', 'coefficient']]
def create_interconnector_generic_constraint_lhs(generic_constraint_interconnectors, generic_constraint_ids,
interconnector_variables):
"""Find the lhs variables from interconnectors for generic constraints.
Examples
--------
>>> generic_constraint_interconnectors = pd.DataFrame({
... 'set': ['A'],
... 'interconnector': ['X'],
... 'coefficient': [0.9]})
>>> generic_constraint_ids = pd.DataFrame({
... 'constraint_id': [1, 2],
... 'set': ['A', 'B']})
>>> interconnector_variables = pd.DataFrame({
... 'variable_id': [0, 1],
... 'interconnector': ['X', 'X'],
... 'generic_constraint_factor': [1, 1]})
>>> lhs = create_interconnector_generic_constraint_lhs(generic_constraint_interconnectors, generic_constraint_ids,
... interconnector_variables)
>>> print(lhs)
constraint_id variable_id coefficient
0 1 0 0.9
1 1 1 0.9
"""
interconnector_lhs = pd.merge(generic_constraint_interconnectors,
interconnector_variables.loc[:, ['interconnector', 'variable_id',
'generic_constraint_factor']],
on=['interconnector'])
interconnector_lhs = pd.merge(interconnector_lhs, generic_constraint_ids.loc[:, ['constraint_id', 'set']], on='set')
interconnector_lhs['coefficient'] = interconnector_lhs['coefficient'] * interconnector_lhs[
'generic_constraint_factor']
return interconnector_lhs.loc[:, ['constraint_id', 'variable_id', 'coefficient']]
| 41.765983 | 120 | 0.521026 |
bf218ee2349f7a63726c31d479b9661ac719c84a
| 2,237 |
py
|
Python
|
epytope/Data/pssms/comblibsidney/mat/B_1501_9.py
|
christopher-mohr/epytope
|
8ac9fe52c0b263bdb03235a5a6dffcb72012a4fd
|
[
"BSD-3-Clause"
] | 7 |
2021-02-01T18:11:28.000Z
|
2022-01-31T19:14:07.000Z
|
epytope/Data/pssms/comblibsidney/mat/B_1501_9.py
|
christopher-mohr/epytope
|
8ac9fe52c0b263bdb03235a5a6dffcb72012a4fd
|
[
"BSD-3-Clause"
] | 22 |
2021-01-02T15:25:23.000Z
|
2022-03-14T11:32:53.000Z
|
epytope/Data/pssms/comblibsidney/mat/B_1501_9.py
|
christopher-mohr/epytope
|
8ac9fe52c0b263bdb03235a5a6dffcb72012a4fd
|
[
"BSD-3-Clause"
] | 4 |
2021-05-28T08:50:38.000Z
|
2022-03-14T11:45:32.000Z
|
B_1501_9 = {0: {'A': -0.31, 'C': -0.19, 'E': -0.16, 'D': -0.042, 'G': -0.43, 'F': -0.59, 'I': -1.0, 'H': -0.59, 'K': -0.39, 'M': -0.89, 'L': -0.61, 'N': -0.22, 'Q': -0.25, 'P': -0.059, 'S': -0.42, 'R': -0.67, 'T': -0.13, 'W': -0.44, 'V': -0.14, 'Y': -0.33}, 1: {'A': -0.57, 'C': -0.027, 'E': -0.2, 'D': -0.0094, 'G': -0.095, 'F': -0.099, 'I': -0.42, 'H': -0.015, 'K': -0.0091, 'M': -0.96, 'L': -0.52, 'N': -0.052, 'Q': -1.0, 'P': -0.015, 'S': -0.59, 'R': -0.024, 'T': -0.29, 'W': -0.027, 'V': -0.39, 'Y': -0.028}, 2: {'A': -0.23, 'C': -0.35, 'E': -0.04, 'D': -0.039, 'G': -0.14, 'F': -0.83, 'I': -1.0, 'H': -0.68, 'K': -0.53, 'M': -0.77, 'L': -0.18, 'N': -0.46, 'Q': -0.3, 'P': -0.32, 'S': -0.41, 'R': -0.69, 'T': -0.12, 'W': -0.16, 'V': -0.14, 'Y': -0.6}, 3: {'A': -0.4, 'C': -0.39, 'E': -0.27, 'D': -0.3, 'G': -0.23, 'F': -0.42, 'I': -0.27, 'H': -0.19, 'K': -0.43, 'M': -0.56, 'L': -0.56, 'N': -0.46, 'Q': -0.96, 'P': -0.22, 'S': -1.0, 'R': -0.28, 'T': -0.39, 'W': -0.39, 'V': -0.52, 'Y': -0.41}, 4: {'A': -0.64, 'C': -0.87, 'E': -0.42, 'D': -0.17, 'G': -0.77, 'F': -0.52, 'I': -0.32, 'H': -0.66, 'K': -0.31, 'M': -0.41, 'L': -0.69, 'N': -0.56, 'Q': -1.0, 'P': -0.89, 'S': -0.71, 'R': -0.64, 'T': -0.62, 'W': -0.49, 'V': -0.56, 'Y': -0.95}, 5: {'A': -0.3, 'C': -0.4, 'E': -0.3, 'D': -0.18, 'G': -0.35, 'F': -1.0, 'I': -0.53, 'H': -0.28, 'K': -0.28, 'M': -0.44, 'L': -0.12, 'N': -0.26, 'Q': -0.21, 'P': -0.11, 'S': -0.35, 'R': -0.13, 'T': -0.55, 'W': -0.36, 'V': -0.37, 'Y': -0.2}, 6: {'A': -0.69, 'C': -0.78, 'E': -0.49, 'D': -0.1, 'G': -0.52, 'F': -0.34, 'I': -0.4, 'H': -0.33, 'K': -0.15, 'M': -0.54, 'L': -1.0, 'N': -0.66, 'Q': -0.51, 'P': -0.62, 'S': -0.65, 'R': -0.22, 'T': -0.49, 'W': -0.31, 'V': -0.56, 'Y': -0.28}, 7: {'A': -0.36, 'C': -0.18, 'E': -0.23, 'D': -0.049, 'G': -0.15, 'F': -0.32, 'I': -0.18, 'H': -1.0, 'K': -0.24, 'M': -0.061, 'L': -0.15, 'N': -0.12, 'Q': -0.17, 'P': -0.35, 'S': -0.34, 'R': -0.15, 'T': -0.4, 'W': -0.035, 'V': -0.38, 'Y': -0.2}, 8: {'A': -0.054, 'C': -0.023, 'E': -0.006, 'D': -0.0025, 'G': -0.075, 'F': -1.0, 'I': -0.042, 'H': -0.018, 'K': -0.001, 'M': -0.11, 'L': -0.049, 'N': -0.0092, 'Q': -0.004, 'P': -0.0045, 'S': -0.0039, 'R': -0.0018, 'T': -0.0039, 'W': -0.0069, 'V': -0.016, 'Y': -0.44}}
| 2,237 | 2,237 | 0.338847 |
63c52b8a164c321004f8a54c113eb69d1180ced8
| 1,973 |
py
|
Python
|
face_detect/sample/learning_opencv3_with_python_sample/Chapter 7_Code/car_sliding_windows.py
|
minatuyang/RASP-ATTA
|
e182248da2f9f131e4e1aca5a2198b6ae910424e
|
[
"MIT"
] | 1 |
2018-11-14T02:54:24.000Z
|
2018-11-14T02:54:24.000Z
|
face_detect/sample/learning_opencv3_with_python_sample/Chapter 7_Code/car_sliding_windows.py
|
minatuyang/RASP-ATTA
|
e182248da2f9f131e4e1aca5a2198b6ae910424e
|
[
"MIT"
] | null | null | null |
face_detect/sample/learning_opencv3_with_python_sample/Chapter 7_Code/car_sliding_windows.py
|
minatuyang/RASP-ATTA
|
e182248da2f9f131e4e1aca5a2198b6ae910424e
|
[
"MIT"
] | null | null | null |
import cv2
import numpy as np
from car_detector.detector import car_detector, bow_features
from car_detector.pyramid import pyramid
from car_detector.non_maximum import non_max_suppression_fast as nms
from car_detector.sliding_window import sliding_window
import urllib
def in_range(number, test, thresh=0.2):
return abs(number - test) < thresh
test_image = "/home/d3athmast3r/dev/python/study/images/cars.jpg"
img_path = "/home/d3athmast3r/dev/python/study/images/test.jpg"
remote = "http://previews.123rf.com/images/aremac/aremac0903/aremac090300044/4545419-Lonely-car-on-an-empty-parking-lot-Stock-Photo.jpg"
urllib.urlretrieve(test_image, img_path)
svm, extractor = car_detector()
detect = cv2.xfeatures2d.SIFT_create()
w, h = 100, 40
img = cv2.imread(img_path)
#img = cv2.imread(test_image)
rectangles = []
counter = 1
scaleFactor = 1.25
scale = 1
font = cv2.FONT_HERSHEY_PLAIN
for resized in pyramid(img, scaleFactor):
scale = float(img.shape[1]) / float(resized.shape[1])
for (x, y, roi) in sliding_window(resized, 20, (100, 40)):
if roi.shape[1] != w or roi.shape[0] != h:
continue
try:
bf = bow_features(roi, extractor, detect)
_, result = svm.predict(bf)
a, res = svm.predict(bf, flags=cv2.ml.STAT_MODEL_RAW_OUTPUT | cv2.ml.STAT_MODEL_UPDATE_MODEL)
print "Class: %d, Score: %f, a: %s" % (result[0][0], res[0][0], res)
score = res[0][0]
if result[0][0] == 1:
if score < -1.0:
rx, ry, rx2, ry2 = int(x * scale), int(y * scale), int((x+w) * scale), int((y+h) * scale)
rectangles.append([rx, ry, rx2, ry2, abs(score)])
except:
pass
counter += 1
windows = np.array(rectangles)
boxes = nms(windows, 0.25)
for (x, y, x2, y2, score) in boxes:
print x, y, x2, y2, score
cv2.rectangle(img, (int(x),int(y)),(int(x2), int(y2)),(0, 255, 0), 1)
cv2.putText(img, "%f" % score, (int(x),int(y)), font, 1, (0, 255, 0))
cv2.imshow("img", img)
cv2.waitKey(0)
| 31.31746 | 136 | 0.670046 |
6d16fc43f9a93d490e299253f3061c3c50459b24
| 2,751 |
py
|
Python
|
hpa_src/models/inception.py
|
s6juncheng/HumanProteinAtlas
|
00b8c194133790c9e092339dfb9908d682ea9c87
|
[
"MIT"
] | null | null | null |
hpa_src/models/inception.py
|
s6juncheng/HumanProteinAtlas
|
00b8c194133790c9e092339dfb9908d682ea9c87
|
[
"MIT"
] | null | null | null |
hpa_src/models/inception.py
|
s6juncheng/HumanProteinAtlas
|
00b8c194133790c9e092339dfb9908d682ea9c87
|
[
"MIT"
] | null | null | null |
## My version of InceptionResNetV2 with larger input image
import torch.utils.model_zoo as model_zoo
from pretrainedmodels.models.inceptionresnetv2 import InceptionResNetV2, pretrained_settings
import torch.nn as nn
class MyInceptionResNetV2(InceptionResNetV2):
def __init__(self, *args, **kwargs):
super(MyInceptionResNetV2, self).__init__(*args, **kwargs)
self.conv2d_last = nn.Conv2d(1536, 28, 1)
self.avgpool_last = nn.AvgPool2d(8, count_include_pad=False)
def logits(self, features):
x = self.avgpool_1a(features)
#x = x.view(x.size(0), -1)
#x = self.last_linear(x)
x = self.conv2d_last(x)
x = x.view(x.size(0), -1)
return x
def inceptionresnetv2(num_classes=1000, pretrained='imagenet'):
r"""InceptionResNetV2 model architecture from the
`"InceptionV4, Inception-ResNet..." <https://arxiv.org/abs/1602.07261>`_ paper.
"""
if pretrained:
settings = pretrained_settings['inceptionresnetv2'][pretrained]
assert num_classes == settings['num_classes'], \
"num_classes should be {}, but is {}".format(settings['num_classes'], num_classes)
# both 'imagenet'&'imagenet+background' are loaded from same parameters
model = MyInceptionResNetV2(num_classes=1001)
model.load_state_dict(model_zoo.load_url(settings['url']), strict=False)
# if pretrained == 'imagenet':
# new_last_linear = nn.Linear(1536, 1000)
# new_last_linear.weight.data = model.last_linear.weight.data[1:]
# new_last_linear.bias.data = model.last_linear.bias.data[1:]
# model.last_linear = new_last_linear
model.input_space = settings['input_space']
model.input_size = settings['input_size']
model.input_range = settings['input_range']
model.mean = settings['mean']
model.std = settings['std']
else:
model = MyInceptionResNetV2(num_classes=num_classes)
return model
class TransferedModel(nn.Module):
def __init__(self,
pretrained,
num_classes):
super(TransferedModel, self).__init__()
self.pretrained = pretrained
n_feature = pretrained.last_linear.in_features
self.classifier = nn.Sequential(
#nn.Linear(n_feature, n_feature),
#nn.BatchNorm1d(n_feature),
#nn.ReLU(inplace=True),
nn.Dropout(p=0.2),
nn.Conv1d(16, num_classes, kernel_size=1))
self.pretrained.last_linear = self.classifier
def forward(self, x):
x = self.pretrained(x)
return x
# nn.Conv2d(n_feature, n_feature, 1),
# nn.ReLU(inplace=True),
| 37.684932 | 94 | 0.640494 |
54666d3e365b502e0fe693f6efa6697e2863a925
| 14,866 |
py
|
Python
|
KernTool3.roboFontExt/lib/tdExceptionView.py
|
typedev/KernTool3
|
f95e2107b56d65429365f38dd43b222940d5025d
|
[
"MIT"
] | 3 |
2021-02-04T23:28:59.000Z
|
2021-02-26T11:26:45.000Z
|
KernTool3.roboFontExt/lib/tdExceptionView.py
|
typedev/KernTool3
|
f95e2107b56d65429365f38dd43b222940d5025d
|
[
"MIT"
] | null | null | null |
KernTool3.roboFontExt/lib/tdExceptionView.py
|
typedev/KernTool3
|
f95e2107b56d65429365f38dd43b222940d5025d
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import sys
# from math import *
from vanilla import *
from mojo.UI import *
from fontParts.world import CurrentFont, RGlyph
from defconAppKit.windows.baseWindow import BaseWindowController
from mojo.canvas import Canvas
from mojo.drawingTools import *
from mojo.glyphPreview import GlyphPreview
from defconAppKit.controls.glyphCollectionView import GlyphCollectionView
from AppKit import *
# from mojo.drawingTools import drawingTools
from fontTools.pens.cocoaPen import CocoaPen
import importlib
import tdCanvasKeysDecoder
importlib.reload(tdCanvasKeysDecoder)
from tdCanvasKeysDecoder import decodeCanvasKeys
from mojo.canvas import Canvas
from mojo.drawingTools import *
# from robofab.world import CurrentFont
# from vanilla.nsSubclasses import getNSSubclass
from defconAppKit.windows import *
import tdKernToolEssentials
importlib.reload(tdKernToolEssentials)
from tdKernToolEssentials import *
import tdGroupViews
importlib.reload(tdGroupViews)
from tdGroupViews import TDGroupLine
class TDExceptionLine(VanillaBaseObject):
nsViewClass = NSView
def __init__ (self, posSize, selectionCallback=None, sizeStyle='big'):
xw, yw, tx, ty = posSize
self.glyphsToDisplay = []
self.keyGlyph = None
self.font = None
self.hashKernDic = None
self.direction = 'L'
self.showSelected = False
self.groupIsEmpty = False
self.diffMarginsInGroup = False
self.groupname = None
self.keyGlyph = None
self.keyGlyphMargin = None
self.showInfo = True
self.excGlyphL = None
self.excGlyphR = None
self.kernValue = 0
self.darkmode = KERNTOOL_UI_DARKMODE
self.darkmodeWarm = KERNTOOL_UI_DARKMODE_WARMBACKGROUND
self._alpha = .1
self._scalefactorUI = .045 # sizeStyle = 'regular'
ty = 85
if sizeStyle == 'big':
self._scalefactorUI = .065
ty = 100
elif sizeStyle == 'small':
self._scalefactorUI = .035
ty = 65
elif sizeStyle == 'mini':
self._scalefactorUI = .025
ty = 45
elif sizeStyle == 'micro':
self._scalefactorUI = .015
ty = 30
self.heightOfControl = ty
self._selectionCallback = selectionCallback
self._selfHeight = ty
self._setupView(self.nsViewClass, (xw, yw, tx, ty)) # (0, 0, -0, 106)
self.infoLine = Canvas((0, 0, -0, -0),
delegate = self, # canvasSize = (ty, ty),
hasHorizontalScroller = False,
hasVerticalScroller = False,
autohidesScrollers = True,
backgroundColor = NSColor.whiteColor(),
# acceptMouseMoved = True
)
self.infoLine.scrollView.getNSScrollView().setBorderType_(NSNoBorder)
# self.infoLine.update()
def setFont (self, font, hashKernDic):
self.font = font
self.hashKernDic = hashKernDic
def selected (self, selected=False):
self.showSelected = selected
self.infoLine.update()
# if self._selectionCallback and selected:
# self._selectionCallback(self)
def setKeyGlyph (self, groupname):
if len(self.font.groups[groupname]) > 0:
gname = self.font.groups[groupname][0]
self.keyGlyph = gname
else:
self.keyGlyph = None
self.keyGlyphMargin = 0
self.groupIsEmpty = True
def setupGroupView (self, groupname):
self.glyphsToDisplay = []
self.diffMarginsInGroup = False
self.groupname = groupname
self.setKeyGlyph(groupname = groupname)
if not self.keyGlyph: return
totalglyphs = len(self.font.groups[groupname])
if totalglyphs in range(0, 6):
self._alpha = .3
else:
self._alpha = .1
for idx, glyphname in enumerate(self.font.groups[groupname]):
if glyphname in self.font:
self.glyphsToDisplay.append(self.font[glyphname])
self.infoLine.update()
def setPair (self, pair, direction):
self.direction = direction
l, r = pair
pair = researchPair(self.font, self.hashKernDic, (l, r))
self.excPairName = (l, r)
gL = pair['L_nameForKern']
gR = pair['R_nameForKern']
if not pair['kernValue']:
self.kernValue = 0
else:
self.kernValue = pair['kernValue']
if self.direction == 'L':
self.resultPair = (gL, r)
self.setupGroupView(gL)
elif self.direction == 'R':
self.resultPair = (l, gR)
self.setupGroupView(gR)
elif self.direction == 'B':
self.resultPair = (l, r)
self.infoLine.update()
def mouseDown (self, event):
if self._selectionCallback:
self._selectionCallback(self)
def resetView (self):
self.infoLine.update()
def draw (self):
def drawLeftCursor (txt, Xcenter, Ycontrol, color):
m = 17
y = Ycontrol
fsize = 10
step = 0 # 6.55
font('Menlo', fsize * m)
fill(0, 0, 0, 1)
txlen, _y = textSize(txt)
if color:
fillRGB(COLOR_EXCEPTION_GROUP_ICON)
# w = step * len(txt) * m + step * m
w = txlen + 140
Xpos = Xcenter - w + 20
newPath()
moveTo((Xpos, y))
curveTo((Xpos - 7 * m, y), (Xpos - 7 * m, y + 2 * m), (Xpos - 7 * m, y + 7 * m))
curveTo((Xpos - 7 * m, y + 12 * m), (Xpos - 7 * m, y + 14 * m), (Xpos, y + 14 * m))
# lineTo((Xpos + w, y + 14 * m))
lineTo((Xpos + w, y + 14 * m))
lineTo((Xpos + w - 3 * m, y + 7 * m))
lineTo((Xpos + w, y + 7 * m))
lineTo((Xpos + w - 3 * m, y))
lineTo((Xpos + w, y))
closePath()
drawPath()
fill(1, 1, 1, 1)
text(txt, (Xpos, y + 1.5 * m))
def drawRightCursor (txt, Xcenter, Ycontrol, color):
m = 17
y = Ycontrol
fsize = 10
step = 0 #6.55
font('Menlo', fsize * m)
fill(0, 0, 0, 1)
txlen, _y = textSize(txt)
if color:
fillRGB(COLOR_EXCEPTION_GROUP_ICON)
# w2 = step * len(txt) * m + (step / 2) * m + step * m
w2 = txlen + 140
Xpos = Xcenter - 20
w = 0
newPath()
moveTo((Xpos + w, y))
# lineTo((Xpos + w, y + 14 * m))
lineTo((Xpos + 3 * m, y + 7 * m))
lineTo((Xpos, y + 7 * m))
lineTo((Xpos + 3 * m, y + 14 * m))
lineTo((Xpos + w + w2, y + 14 * m))
curveTo((Xpos + w + w2 + 7 * m, y + 14 * m), (Xpos + w + w2 + 7 * m, y + 12 * m),
(Xpos + w + w2 + 7 * m, y + 7 * m))
curveTo((Xpos + w + w2 + 7 * m, y + 2 * m), (Xpos + w + w2 + 7 * m, y), (Xpos + w + w2, y))
closePath()
drawPath()
fill(1, 1, 1, 1)
text(txt, (Xpos + w + step * m + 140, y + 1.5 * m))
visibleHeight = self.infoLine.scrollView.getNSScrollView().documentVisibleRect().size.height
visibleWidth = self.infoLine.scrollView.getNSScrollView().documentVisibleRect().size.width
scalefactor = self._scalefactorUI
Xcenter = visibleWidth / 2
# if not self.glyphsToDisplay: return
if self.keyGlyph:
keyGlyph = self.font[self.keyGlyph] # self.glyphsToDisplay[-1]
keyWidth = keyGlyph.width
else:
keyWidth = 0
Xright = 0
Xleft = Xcenter # - keyWidth/2
Xright = Xleft # + keyWidth
if self.darkmodeWarm:
fillRGB((.75, .73, .7, .8))
# _rw = self.maxX+100
# # print('maxY', maxY)
# if _rw < visibleWidth:
# _rw = visibleWidth #+ 500
rect(0, 0, visibleWidth, visibleHeight)
translate(visibleWidth / 2 - visibleWidth / 30, (visibleHeight / 3)) # -4
stroke(0, 0, 0, 0)
strokeWidth(0)
scale(scalefactor)
if self.direction != 'B':
for idx, glyph in enumerate(self.glyphsToDisplay):
save()
pen = CocoaPen(self.font)
if self.direction == 'L':
translate(Xcenter - glyph.width, 0)
elif self.direction == 'R':
translate(self.kernValue + Xcenter, 0)
fill(0, 0, 0, self._alpha)
if glyph.name == self.keyGlyph:
fill(0, 0, 0, 1)
glyph.draw(pen)
drawPath(pen.path)
restore()
translate(0, 0)
save()
(l, r) = self.resultPair
pen = CocoaPen(self.font)
if self.direction == 'L':
glyph = self.font[r]
translate(self.kernValue + Xcenter, 0)
elif self.direction == 'R':
glyph = self.font[l]
translate(Xcenter - glyph.width, 0)
fillRGB(COLOR_EXCEPTION_GROUP_ICON)
glyph.draw(pen)
drawPath(pen.path)
restore()
translate(0, 0)
elif self.direction == 'B':
save()
(l, r) = self.resultPair
pen = CocoaPen(self.font)
glyph = self.font[l]
translate(Xcenter - glyph.width, 0)
fillRGB(COLOR_EXCEPTION_GROUP_ICON)
glyph.draw(pen)
drawPath(pen.path)
restore()
translate(0, 0)
save()
pen = CocoaPen(self.font)
glyph = self.font[r]
translate(self.kernValue + Xcenter, 0)
fillRGB(COLOR_EXCEPTION_GROUP_ICON)
glyph.draw(pen)
drawPath(pen.path)
restore()
translate(0, 0)
if self.showSelected:
Ycontrol = -450
(l, r) = self.resultPair
if self.direction == 'L':
drawLeftCursor(getDisplayNameGroup(l),Xcenter-20, Ycontrol,False)
drawRightCursor(r, Xcenter+20,Ycontrol,True)
elif self.direction == 'R':
drawLeftCursor(l, Xcenter-20, Ycontrol, True)
drawRightCursor(getDisplayNameGroup(r), Xcenter+20, Ycontrol, False)
elif self.direction == 'B':
drawLeftCursor(l, Xcenter-20, Ycontrol, True)
drawRightCursor(r, Xcenter+20, Ycontrol, True)
self.infoLine._view.setFrame_(NSMakeRect(0, 0, visibleWidth, visibleHeight))
class TDExceptionView(object):
def __init__ (self, parentWindow, font=None,
hashKernDic=None, pair=None, callback=None, autokern = True):
wW = 400
hW = 500
self.w = Sheet((wW, hW), parentWindow)
self.callback = callback
self.font = font
self.hashKernDic = hashKernDic
self.pair = pair
self.selectedPair = None
self.deltaKern = None
self.useAutokern = autokern
self.direction = 'L'
hGRPcontrols = 85
yInfoControl = 30
sizeStyle = 'big'
hasHorizontalScroller = False
separatePairs = False
# pair = ['T', 'icircumflex']
self.w.lblMessage = TextBox((10, 10, -10, 17), text = 'Choose exception:', sizeStyle = 'small')
self.w.gC = TDExceptionLine(posSize = (5, yInfoControl, -5, hGRPcontrols),
selectionCallback = self._viewSelected,
sizeStyle = sizeStyle)
nextG = self.w.gC.heightOfControl
self.w.gC2 = TDExceptionLine(posSize = (5, yInfoControl + nextG + 2, -5, hGRPcontrols),
selectionCallback = self._viewSelected,
sizeStyle = sizeStyle)
nextG += self.w.gC2.heightOfControl
self.w.gC3 = TDExceptionLine(posSize = (5, yInfoControl + nextG + 4, -5, hGRPcontrols),
selectionCallback = self._viewSelected,
sizeStyle = sizeStyle)
nextG += self.w.gC3.heightOfControl + 8
self.w.lblMessage2 = TextBox((10, yInfoControl + nextG, 200, 17), text = 'Preview:', sizeStyle = 'small')
self.w.checkAutokern = CheckBox((-130, yInfoControl + nextG - 2, 140, 17),
title = 'fix touches', value = self.useAutokern,
sizeStyle = 'small', callback = self.checkUseAutokernCallback)
self.w.checkAutokern.set(self.useAutokern)
# self.deltaKern = self.getDeltaKern(self.pair)
nextG = nextG + 16
self.w.excPreview = TDGroupLine(posSize = (5, yInfoControl + nextG, -5, hGRPcontrols),
# selectionCallback = self._viewSelected,
separatePairs = True,
sizeStyle = sizeStyle,
hasHorizontalScroller = True,
showValues = True)
nextG += self.w.excPreview.heightOfControl
self.w.gC.setFont(font, self.hashKernDic)
self.w.gC.setPair(pair, direction = 'L')
self.w.gC2.setFont(font, self.hashKernDic)
self.w.gC2.setPair(pair, direction = 'R')
self.w.gC3.setFont(font, self.hashKernDic)
self.w.gC3.setPair(pair, direction = 'B')
self.w.excPreview.setFont(font, self.hashKernDic)
self.w.btnApply = Button(((wW / 2) + 2, yInfoControl + nextG + 8, -10, 17), "Apply",
callback = self.btnCloseCallback,
sizeStyle = 'small')
self.w.btnCancel = Button((10, yInfoControl + nextG + 8, (wW / 2) - 12, 17), "Cancel",
callback = self.btnCloseCallback,
sizeStyle = 'small')
self.w.open()
self.w.gC.selected(True)
if self.useAutokern:
self.deltaKern = self.getDeltaKern(self.pair)
self.w.excPreview.setPair(self.pair, direction = self.direction, deltaKern = self.deltaKern)
self.selectedPair = self.w.gC.resultPair
def _viewSelected (self, sender):
# print 'info:', info
# print sender#self.w.gC.get()
# print sender.resultPair
self.selectedPair = sender.resultPair
if sender == self.w.gC:
self.w.gC.selected(True)
self.w.gC2.selected(False)
self.w.gC3.selected(False)
self.direction = 'L'
self.w.excPreview.setPair(self.pair, direction = self.direction, deltaKern = self.deltaKern)
elif sender == self.w.gC2:
self.w.gC.selected(False)
self.w.gC2.selected(True)
self.w.gC3.selected(False)
self.direction = 'R'
self.w.excPreview.setPair(self.pair, direction = self.direction, deltaKern = self.deltaKern)
elif sender == self.w.gC3:
self.w.gC.selected(False)
self.w.gC2.selected(False)
self.w.gC3.selected(True)
self.direction = 'B'
self.w.excPreview.setPair(self.pair, direction = self.direction, deltaKern = self.deltaKern)
def btnCloseCallback (self, sender):
if self.callback and sender == self.w.btnApply:
kern = self.deltaKern
if not kern:
kern = 0
self.callback((self.selectedPair, kern, self.useAutokern))
self.w.close()
def checkUseAutokernCallback (self, sender):
self.useAutokern = sender.get()
if sender.get():
self.deltaKern = self.getDeltaKern(self.pair)
self.w.excPreview.setPair(self.pair, direction = self.direction, deltaKern = self.deltaKern)
else:
self.deltaKern = None
self.w.excPreview.setPair(self.pair, direction = self.direction)
def getDeltaKern (self, pair):
return autoCalcPairValue(self.font, self.hashKernDic, self.pair, simplePair = True, mode = 'fixtouches')
# TEST Section
if __name__ == "__main__":
class MyW(object):
def __init__ (self):
self.w = Window((300, 400), "Choose exception", minSize = (100, 100))
hGRPcontrols = 85
yInfoControl = 5
wW = 500
hW = 350
self.font = CurrentFont()
self.hashKernDic = TDHashKernDic(self.font)
self.flagAutoKern = True
self.pair = ['A', 'H']
# self.pair = ['A', 'afii10071']
# self.pair = ['Lslash', 'Tbar']
self.w.btnOpen = Button(((wW / 2) + 2, hW - 22, -10, 17), "Apply", callback = self.btnOpenCallback,
sizeStyle = 'small')
self.w.open()
def btnOpenCallback (self, sender):
TDExceptionView(self.w, font = self.font,
hashKernDic = self.hashKernDic,
pair = self.pair,
callback = self.getResultFromexceptionView,
autokern = self.flagAutoKern)
def getResultFromexceptionView (self, result):
(l,r), v, a = result
print ('RESULT')
print ((l,r),v,a)
self.flagAutoKern = a
MyW()
| 30.154158 | 107 | 0.638571 |
28d5ac18dcfe2639b15e819f969cdb50f586e622
| 125 |
py
|
Python
|
nimbro_service_transport/scripts/get_md5.py
|
lsolanka-flya/nimbro_network
|
13768a9971ea91ce081c4774fb7fecfc4c323912
|
[
"BSD-3-Clause"
] | 105 |
2015-09-22T14:36:32.000Z
|
2022-03-09T08:51:43.000Z
|
nimbro_service_transport/scripts/get_md5.py
|
tradr-project/nimbro_network
|
97e15d7d4c0d1a56fa85cdc62da13b00ef5275a9
|
[
"BSD-3-Clause"
] | 19 |
2015-09-25T23:23:39.000Z
|
2021-11-04T16:59:49.000Z
|
nimbro_service_transport/scripts/get_md5.py
|
tradr-project/nimbro_network
|
97e15d7d4c0d1a56fa85cdc62da13b00ef5275a9
|
[
"BSD-3-Clause"
] | 33 |
2015-09-25T18:11:31.000Z
|
2022-03-23T06:55:28.000Z
|
#!/usr/bin/python
import roslib.message
import sys
sys.stdout.write(roslib.message.get_service_class(sys.argv[1])._md5sum)
| 17.857143 | 71 | 0.792 |
599d9122ced23f1433235fd8ef8056a1d9e52de0
| 5,190 |
py
|
Python
|
simple_site_checker.py
|
terryBaz/Simple-Site-Checker
|
0343c3b13cd46c2fe61a91d9378a41d2c46c4760
|
[
"BSD-3-Clause"
] | null | null | null |
simple_site_checker.py
|
terryBaz/Simple-Site-Checker
|
0343c3b13cd46c2fe61a91d9378a41d2c46c4760
|
[
"BSD-3-Clause"
] | null | null | null |
simple_site_checker.py
|
terryBaz/Simple-Site-Checker
|
0343c3b13cd46c2fe61a91d9378a41d2c46c4760
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
import argparse
from datetime import datetime
import logging
import os
import sys
import urllib.request, urllib.error, urllib.parse
from lxml import etree
USER_AGENT = ''
SITEMAP_NAMESPACE = 'http://www.sitemaps.org/schemas/sitemap/0.9'
XMLNS = {'sitemap': SITEMAP_NAMESPACE}
DATETIME_FORMAT = '%Y-%m-%d %H:%M:%S'
VERBOSE_HELP = (
"""Verbose mode. Controls the script output
0 - print output only in case of errors
1 - prints the result count plus list of failed URLs(if any)
2 - print all checked URLs \n""")
LOGGING_LEVELS = {
0: logging.ERROR,
1: logging.INFO,
2: logging.DEBUG,
}
logger = logging.getLogger(__name__)
class HeadRequest(urllib.request.Request):
def get_method(self):
return "HEAD"
class XMLSitemapParser(object):
total = 0
succeeded = 0
failed = []
sitemaps = {}
def load_sitemap(self, url):
logger.debug('Loading sitemap %s' % url)
if '://' in url:
try:
sitemap = urllib.request.urlopen(urllib.request.Request(url, headers={'User-Agent': USER_AGENT}))
except urllib.error.HTTPError as e:
if e.code == 404:
logger.error('Sitemap not found as %s' % url)
elif e.code == 500:
logger.error('Server error when accessing sitemap as %s' % url)
else:
logger.error('Server error \'%s\' when accessing sitemap as %s' % (e, url))
sys.exit(1)
except Exception as e:
logger.debug('Unexpected error', e)
logger.error('Unexpected error while loading sitemap.')
sys.exit(1)
else:
try:
path = os.path.abspath(url)
sitemap = open(url, encoding="utf8")
except Exception as e:
logger.error('Unable to load sitemap file from %s' % path)
logger.debug(e)
sys.exit(1)
try:
tree = etree.parse(sitemap)
except Exception as e:
logger.debug('Unexpected error', e)
logger.error('Unexpected error while parsing sitemap XML from %s' % url)
else:
root = tree.getroot()
if root.tag == '{%s}sitemapindex' % SITEMAP_NAMESPACE:
self.process_sitemapindex(tree)
else:
self.sitemaps[url] = tree
def process_sitemapindex(self, tree):
logger.debug('Processing sitemapindex')
for tag in tree.xpath('//sitemap:sitemap/sitemap:loc', namespaces=XMLNS):
sitemap_loc = tag.text
self.load_sitemap(sitemap_loc)
def process_sitemap(self, sitemap):
tree = self.sitemaps[sitemap]
logger.debug('Processing sitemap %s' % sitemap)
loc_tags = tree.xpath('//sitemap:loc', namespaces=XMLNS)
urls_found = len(loc_tags)
self.total += urls_found
logger.info('%i URLs found' % urls_found)
for tag in loc_tags:
loc_url = tag.text
logger.debug('Checking %s' % loc_url)
try:
loc_url = urllib.parse.urlsplit(loc_url)
loc_url = list(loc_url)
loc_url[2] = urllib.parse.quote(loc_url[2])
loc_url = urllib.parse.urlunsplit(loc_url)
response = urllib.request.urlopen(HeadRequest(loc_url.encode('ascii', 'ignore').decode('ascii'), headers={'User-Agent': USER_AGENT}))
self.succeeded += 1
logger.info('%s - OK' % loc_url)
except Exception as e:
self.failed.append((loc_url, e))
logger.error('%s -> %s' % (loc_url, e))
def process_sitemaps(self):
for sitemap in self.sitemaps:
self.process_sitemap(sitemap)
def time_info(start, end):
hours, remainder = divmod((end-start).seconds, 3600)
minutes, seconds = divmod(remainder, 60)
logger.info('Start - %s' % start.strftime(DATETIME_FORMAT))
logger.info('End - %s' % end.strftime(DATETIME_FORMAT))
logger.info('Time elapsed %s:%s:%s' % (hours, minutes, seconds))
def main():
arg_parser = argparse.ArgumentParser(description='Simple Site Checker',
formatter_class=argparse.RawTextHelpFormatter)
arg_parser.add_argument('sitemap', metavar='s', type=str,
help='XML sitemap URL/path')
arg_parser.add_argument('-v', '--verbose', type=int, required=False,
help=VERBOSE_HELP, default = 0, choices=LOGGING_LEVELS)
args = arg_parser.parse_args()
logging.basicConfig(format='%(levelname)s: %(message)s',
level = LOGGING_LEVELS[args.verbose])
start = datetime.now()
url = args.sitemap
parser = XMLSitemapParser()
parser.load_sitemap(url)
parser.process_sitemaps()
end = datetime.now()
failed_number = len(parser.failed)
logger.info('Result - Checked %i, succeeded %i, failed %i' %
(parser.total, parser.succeeded, failed_number))
time_info(start, end)
if __name__ == '__main__':
main()
| 33.057325 | 149 | 0.587476 |
320f0c665346cd5d19578bebbc23369ee768006b
| 3,098 |
py
|
Python
|
env_1.py
|
jcostacurta11/panda-env
|
519733eb329ced3fffd2a957e0c526ded66f0ae0
|
[
"MIT"
] | 5 |
2020-09-10T08:40:37.000Z
|
2021-08-10T09:47:41.000Z
|
env_1.py
|
jcostacurta11/panda-env
|
519733eb329ced3fffd2a957e0c526ded66f0ae0
|
[
"MIT"
] | null | null | null |
env_1.py
|
jcostacurta11/panda-env
|
519733eb329ced3fffd2a957e0c526ded66f0ae0
|
[
"MIT"
] | 2 |
2020-10-05T19:06:34.000Z
|
2021-08-07T11:08:13.000Z
|
import os
import numpy as np
import pybullet as p
import pybullet_data
from panda import Panda
from objects import YCBObject, InteractiveObj, RBOObject
class SimpleEnv():
def __init__(self):
# create simulation (GUI)
self.urdfRootPath = pybullet_data.getDataPath()
p.connect(p.GUI)
p.setGravity(0, 0, -9.81)
# set up camera
self._set_camera()
# load some scene objects
p.loadURDF(os.path.join(self.urdfRootPath, "plane.urdf"), basePosition=[0, 0, -0.65])
p.loadURDF(os.path.join(self.urdfRootPath, "table/table.urdf"), basePosition=[0.5, 0, -0.65])
# example YCB object
obj1 = YCBObject('003_cracker_box')
obj1.load()
p.resetBasePositionAndOrientation(obj1.body_id, [0.7, -0.2, 0.1], [0, 0, 0, 1])
# load a panda robot
self.panda = Panda()
def reset(self):
self.panda.reset()
return self.panda.state
def close(self):
p.disconnect()
def step(self, action):
# get current state
state = self.panda.state
# action in this example is the end-effector velocity
self.panda.step(dposition=action)
# take simulation step
p.stepSimulation()
# return next_state, reward, done, info
next_state = self.panda.state
reward = 0.0
done = False
info = {}
return next_state, reward, done, info
def render(self):
(width, height, pxl, depth, segmentation) = p.getCameraImage(width=self.camera_width,
height=self.camera_height,
viewMatrix=self.view_matrix,
projectionMatrix=self.proj_matrix)
rgb_array = np.array(pxl, dtype=np.uint8)
rgb_array = np.reshape(rgb_array, (self.camera_height, self.camera_width, 4))
rgb_array = rgb_array[:, :, :3]
return rgb_array
def _set_camera(self):
self.camera_width = 256
self.camera_height = 256
p.resetDebugVisualizerCamera(cameraDistance=1.2, cameraYaw=30, cameraPitch=-60,
cameraTargetPosition=[0.5, -0.2, 0.0])
self.view_matrix = p.computeViewMatrixFromYawPitchRoll(cameraTargetPosition=[0.5, 0, 0],
distance=1.0,
yaw=90,
pitch=-50,
roll=0,
upAxisIndex=2)
self.proj_matrix = p.computeProjectionMatrixFOV(fov=60,
aspect=float(self.camera_width) / self.camera_height,
nearVal=0.1,
farVal=100.0)
| 38.246914 | 109 | 0.496449 |
4a032be5d1b43e725143b0c597cc83f0af62e150
| 10,964 |
py
|
Python
|
citation_graph.py
|
Azzaare/multiplex-carbonara
|
c911bd08aa4120a3ebf099b03eb1fa1670a3a255
|
[
"MIT"
] | null | null | null |
citation_graph.py
|
Azzaare/multiplex-carbonara
|
c911bd08aa4120a3ebf099b03eb1fa1670a3a255
|
[
"MIT"
] | null | null | null |
citation_graph.py
|
Azzaare/multiplex-carbonara
|
c911bd08aa4120a3ebf099b03eb1fa1670a3a255
|
[
"MIT"
] | null | null | null |
import os
from py2neo import Graph, Node, Relationship, authenticate
#lists and tables required to parse the date
months = {
"jan": 1,
"january": 1,
"feb": 2,
"february": 2,
"mar": 3,
"march": 3,
"apr": 4,
"april": 4,
"may": 5,
"jun": 6,
"june": 6,
"jul": 7,
"july": 7,
"aug": 8,
"august": 8,
"sep": 9,
"september": 9,
"oct": 10,
"october": 10,
"nov": 11,
"november": 11,
"dec": 12,
"december":12
}
days = ["mon","tue","wed","thu","fri","sat","sun"]
dates = ["1","01","2","02","3","03","4","04","5","05","6","06","7","07","8","08","9","09","10","11","12","13","14","15","16","17","18","19","20","21","22","23","24","25","26","27","28","29","30","31"]
years = ["1991","1992","1993","1994","1995","1996","1997","1998","1999","2000","2001","2002","2003"]
years_short = {"91":1991,"92":1992,"93":1993,"94":1994,"95":1995,"96":1996,"97":1997,"98":1998,"99":1999,"00":2000,"01":2001,"02":2002,"03":2003}
#function used in parsing authors list
def remove_text_inside_brackets(text, brackets="()[]"): #taken from http://stackoverflow.com/questions/14596884/remove-text-between-and-in-python
count = [0] * (len(brackets) // 2) # count open/close brackets
saved_chars = []
for character in text:
for i, b in enumerate(brackets):
if character == b: # found bracket
kind, is_close = divmod(i, 2)
count[kind] += (-1)**is_close # `+1`: open, `-1`: close
if count[kind] < 0: # unbalanced bracket
count[kind] = 0
break
else: # character is not a bracket
if not any(count): # outside brackets
saved_chars.append(character)
return ''.join(saved_chars)
#function used to determine the publication at which to start push_neo_graph() in function of the total number of citations already loaded
def citation_no(pub_data,l):
k=0
h=0
for i in pub_data:
for j in pub_data[i][0]:
if h == l:
return k
h=h+1
k=k+1
#Parsing functions for parsing the date and the author list
def parse_date(line):
l =" ".join(line.split()) #remove extra spaces
l = l.lower() #remove capitals
l = l.split(' ') #split the sentence into words
j=0
m=0
a=0
for i in l :
if i in dates:
j = int(i)
if i in months:
m = months[i]
if i in years :
a = int(i)
if i in years_short:
a = years_short[i]
return [j,m,a]
def adjust_initials(aut):
l = aut.split(".")
ll = []
lll = []
for i in l:
ll+= [i.lstrip().rstrip()]
ll = ". ".join(ll)
ll = ll.split(" ")
for i in ll:
if len(i)==1: #if it's an initial
lll += [i+"."]
else:
lll += [i]
lll = " ".join(lll)
return lll
def parse_author(line): # Can be better
l = line.strip() #remove special chars
l = remove_text_inside_brackets(l)
#remove all instances of special accents (\\)
l = l.replace("\\'","")
l = l.replace("\\\"","")
#l = l.replace("\\","") there are still special accents to remove
l =" ".join(l.split()) #remove extra spaces
l = l.split(' ',2) #delete the "Authors:"
l = " ".join(l[1:])
l = l.split('and ') #remove the "and"s and commas
lp = []
for i in l:
lp += i.split(',')
lp = [adjust_initials(x.lstrip().rstrip()).lower() for x in lp if x.lstrip().rstrip() != ""] #remove the spaces at the beginning and end of authors name, and add spaces between initials
return lp
#Function for loading the data structure which associates for each publication the other publications which it cites, its publication date and its list of authors
#function to return list of unique authors
def author_list(pub_data):
autlist = []
for i in pub_data:
autlist+=pub_data[i][2]
autlist = list(set(autlist))
return autlist
#function to return count of authors
def count_authors(pub_data):
return len(author_list(pub_data))
#function which adjusts the initials to the correct format
def author_initials(name):
tnames = name.lower().split(" ")
tname = ""
for s in tnames[:len(tnames)-1]:
if s[len(s)-1]!='.':
tname += s[0]+'.'
else:
tname+=s
return tname+tnames[len(tnames)-1]
#function which checks if there are conflicts between different authors sharing the same initials
def check_author_initials_conflict(pub_data):
autlist = author_list(pub_data)
initial_table = {}
for a in autlist:
initial_table[author_initials(a)] = []
for a in autlist:
#if "".join(a.lower().split()) != author_initials(a):
initial_table[author_initials(a)] += [a]
#corrections
#remove singletons
to_delete = []
for i in initial_table:
if len(initial_table[i]) <= 1:
to_delete+=[i]
for i in to_delete:
del initial_table[i]
k=0
for i in initial_table:
print i,initial_table[i]
if len(initial_table[i])>2:
k+=1
print k
#function to reduce the number of authors by fusioning authors according to whether one authors is just the initials of another author
def reduce_authors(pub_data): #PROBLEMATIC if the authors have the same initials especially if one of the authors only appears with his initials and the other authors has both initials and full name
#First get lists of all authors, then classify authors by initials. If two (and only two) authors share the same initials, and if one of them is equal to the initials, then mark the change to use the other author name
#######BUGGGGGGG with jr.
autlist = author_list(pub_data)
initial_table = {}
change_table = {}
for a in autlist: #build initials tables
initial_table[author_initials(a)] = []
for a in autlist:
initial_table[author_initials(a)] += [a]
#if one author corresponds to one initial, nothing to do. If two authors correspond to one initial check if we can reduce. If 3 or more authors correspond to the same initial too complicated to do anything
for i in initial_table:
if len(initial_table[i]) == 2:
if "".join(initial_table[i][0].lower().split()) == author_initials(initial_table[i][0]):
change_table[initial_table[i][0]] = initial_table[i][1]
elif "".join(initial_table[i][1].lower().split()) == author_initials(initial_table[i][1]):
change_table[initial_table[i][1]] = initial_table[i][0]
#now we reduce
for id in pub_data:
for i in range(len(pub_data[id][2])):
if pub_data[id][2][i] in change_table:
pub_data[id][2][i] = change_table[pub_data[id][2][i]]
#Function which loads the data into the data structure
def load_data():
pub_data = {} #Data structure for our program. Associates to an id (int) a list of 3 lists : the list of citations, the date and the list of authors
print "Loading data..."
#First we will load the file with the citation data to add the citations to the data structure
f = open('/home/vivek/prog/multiplex-carbonara/Cit-HepTh.txt','r')
for i in range(4): #first four lines are useless
line = f.readline()
for line in f : #read lines
l = line.strip().split('\t')
i1 = int(l[0])
if i1 not in pub_data:
pub_data[i1] = [[],[],[]] #if the entry for that publication doesn't exit, initialize it
i2 = int(l[1])
if i2 not in pub_data:
pub_data[i2] = [[],[],[]] #if the entry for that publication doesn't exit, initialize it
pub_data[i1][0].append(i2) #add citation
#Secondly we will load the files with the metadata to add the dates and authors of the publications to the data structure
for root,dirs,fns in os.walk("/home/vivek/prog/multiplex-carbonara/cit-HepTh-abstracts/") :
for fn in fns :
if fn.endswith(".abs") :
f = open(os.path.join(root, fn),'r')
id = int(fn.split('.')[0]) #the ID of the publication is its filename
if id in pub_data: #if the publication is in our citations data
lauthors = [] #list of authors for the publication
ldate = [] #date for the publication, in the format [day,month,year] (int)
line=f.readline()
while line != "" :
if line.split(' ')[0] == "Date:" :
ldate=parse_date(line)
if line.split(' ')[0] == "Authors:" or line.split(' ')[0] == "Author:" : #Authors can be written over several lines...
laut = line
line = f.readline()
while (line.split(' ')[0] != "Comments:" and line.split(' ')[0] != "Report-no:" and
line.split(' ')[0] != "Subj-class:" and line.split(' ')[0] != "Journal-ref:" and
line.split(' ')[0].strip() != "\\\\") : #we read until we reach another section
laut+=line
line = f.readline()
lauthors = parse_author(laut)
line = f.readline()
pub_data[id][1] = ldate #add the metadata to the data structure
pub_data[id][2] = lauthors
reduce_authors(pub_data) #reduce the number of authors (check if some different authors are the same author but with name written differently
print "Data loaded"
return pub_data
| 40.758364 | 225 | 0.513134 |
9d2e66567145a7a89fe9bfc9f74ef5a90ea1db40
| 367 |
py
|
Python
|
Algo and DSA/LeetCode-Solutions-master/Python/thousand-separator.py
|
Sourav692/FAANG-Interview-Preparation
|
f523e5c94d582328b3edc449ea16ac6ab28cdc81
|
[
"Unlicense"
] | 3,269 |
2018-10-12T01:29:40.000Z
|
2022-03-31T17:58:41.000Z
|
Algo and DSA/LeetCode-Solutions-master/Python/thousand-separator.py
|
Sourav692/FAANG-Interview-Preparation
|
f523e5c94d582328b3edc449ea16ac6ab28cdc81
|
[
"Unlicense"
] | 53 |
2018-12-16T22:54:20.000Z
|
2022-02-25T08:31:20.000Z
|
Algo and DSA/LeetCode-Solutions-master/Python/thousand-separator.py
|
Sourav692/FAANG-Interview-Preparation
|
f523e5c94d582328b3edc449ea16ac6ab28cdc81
|
[
"Unlicense"
] | 1,236 |
2018-10-12T02:51:40.000Z
|
2022-03-30T13:30:37.000Z
|
# Time: O(n)
# Space: O(1)
class Solution(object):
def thousandSeparator(self, n):
"""
:type n: int
:rtype: str
"""
result = []
s = str(n)
for i, c in enumerate(str(n)):
if i and (len(s)-i)%3 == 0:
result.append(".")
result.append(c)
return "".join(result)
| 21.588235 | 39 | 0.430518 |
6a578ad3665703fc3b6411a96fc3eedd49fe790f
| 3,201 |
py
|
Python
|
graalpython/com.oracle.graal.python.test/src/tests/test_mro.py
|
qunaibit/graalpython
|
7676d10af92f5a7ddb2ca04efc11cabae4957930
|
[
"UPL-1.0",
"Apache-2.0",
"OpenSSL"
] | null | null | null |
graalpython/com.oracle.graal.python.test/src/tests/test_mro.py
|
qunaibit/graalpython
|
7676d10af92f5a7ddb2ca04efc11cabae4957930
|
[
"UPL-1.0",
"Apache-2.0",
"OpenSSL"
] | null | null | null |
graalpython/com.oracle.graal.python.test/src/tests/test_mro.py
|
qunaibit/graalpython
|
7676d10af92f5a7ddb2ca04efc11cabae4957930
|
[
"UPL-1.0",
"Apache-2.0",
"OpenSSL"
] | null | null | null |
# Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# The Universal Permissive License (UPL), Version 1.0
#
# Subject to the condition set forth below, permission is hereby granted to any
# person obtaining a copy of this software, associated documentation and/or
# data (collectively the "Software"), free of charge and under any and all
# copyright rights in the Software, and any and all patent rights owned or
# freely licensable by each licensor hereunder covering either (i) the
# unmodified Software as contributed to or provided by such licensor, or (ii)
# the Larger Works (as defined below), to deal in both
#
# (a) the Software, and
#
# (b) any piece of software and/or hardware listed in the lrgrwrks.txt file if
# one is included with the Software each a "Larger Work" to which the Software
# is contributed by such licensors),
#
# without restriction, including without limitation the rights to copy, create
# derivative works of, display, perform, and distribute the Software and make,
# use, sell, offer for sale, import, export, have made, and have sold the
# Software and the Larger Work(s), and to sublicense the foregoing rights on
# either these or other terms.
#
# This license is subject to the following condition:
#
# The above copyright notice and either this complete permission notice or at a
# minimum a reference to the UPL must be included in all copies or substantial
# portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
def test_class_attr_change():
class A(object):
counter = 0
for i in range(10):
A.counter += 1
assert A.counter == 10
def test_class_attr_deleted():
class A(object):
counter = 0
class B(A):
counter = 1
for i in range(10):
B.counter += 1
assert B.counter == 11
assert A.counter == 0
del B.counter
assert B.counter == 0
for i in range(10):
A.counter += 1
assert A.counter == 10
def test_class_attr_added():
class A(object):
counter = 0
class B(A):
pass
for i in range(10):
B.counter += 1
assert B.counter == 10
assert A.counter == 0
B.counter = 1
assert B.counter == 1
for i in range(10):
A.counter += 1
assert A.counter == 10
def test_class_attr_add_del():
class A:
foo = 1
class B(A):
foo = 2
class C(B):
foo = 3
C.foo += 1
C.foo += 1
C.foo += 1
C.foo += 1
C.foo += 1
C.foo += 1
C.foo += 1
assert C.foo == 10
del C.foo
assert C.foo == 2
del B.foo
assert C.foo == 1
B.foo = 5
assert C.foo == 5
C.foo = 10
assert C.foo == 10
| 26.89916 | 79 | 0.669166 |
1f3ca56cad56109b0d17580ee76564c9661e831f
| 502 |
py
|
Python
|
ExtractTable/config.py
|
rbm897/ExtractTable-py
|
cfb7d313d9e466f570c315e53eab673ea9926aab
|
[
"Apache-2.0"
] | null | null | null |
ExtractTable/config.py
|
rbm897/ExtractTable-py
|
cfb7d313d9e466f570c315e53eab673ea9926aab
|
[
"Apache-2.0"
] | null | null | null |
ExtractTable/config.py
|
rbm897/ExtractTable-py
|
cfb7d313d9e466f570c315e53eab673ea9926aab
|
[
"Apache-2.0"
] | null | null | null |
"""
Configure all Server request/response objects here
"""
class HOST:
"""API Endpoints of ExtractTable.com"""
VALIDATOR = 'validator.extracttable.com'
TRIGGER = 'trigger.extracttable.com'
RESULT = 'getresult.extracttable.com'
BIGFILE = 'bigfile.extracttable.com'
class JobStatus:
"""Job Status responses recieved from Server. Declared here to maintain consistency"""
SUCCESS = 'Success'
FAILED = 'Failed'
PROCESSING = 'Processing'
INCOMPLETE = 'Incomplete'
| 25.1 | 90 | 0.701195 |
6d88a2194a4f07a858c91228ecbf144667c5e138
| 1,622 |
py
|
Python
|
data_profiler/tests/labelers/test_integration_regex_data_labeler.py
|
taylorfturner/data-profiler
|
da416d1ccaed4b04d2e5b93da41a508de58b642e
|
[
"Apache-2.0"
] | 1 |
2021-02-13T21:53:16.000Z
|
2021-02-13T21:53:16.000Z
|
data_profiler/tests/labelers/test_integration_regex_data_labeler.py
|
taylorfturner/data-profiler
|
da416d1ccaed4b04d2e5b93da41a508de58b642e
|
[
"Apache-2.0"
] | null | null | null |
data_profiler/tests/labelers/test_integration_regex_data_labeler.py
|
taylorfturner/data-profiler
|
da416d1ccaed4b04d2e5b93da41a508de58b642e
|
[
"Apache-2.0"
] | null | null | null |
import os
import unittest
import pkg_resources
import numpy as np
from data_profiler.labelers.data_labelers import BaseDataLabeler
default_labeler_dir = pkg_resources.resource_filename(
'resources', 'labelers'
)
class TestRegexDataLabeler(unittest.TestCase):
@classmethod
def setUpClass(cls) -> None:
cls.data = np.array(
['123 Fake St.', '1/2/2020', 'nice.', '4/3/22', 'abc',
'333-44-2341']
).reshape((-1,))
cls.data_labeler = BaseDataLabeler.load_from_disk(
os.path.join(default_labeler_dir, 'regex_model')
)
def test_default_model(self):
"""simple test of predict"""
data_labeler = self.data_labeler
# get char-level predictions on default model
model_predictions = data_labeler.predict(self.data)
final_results = model_predictions["pred"]
# for now just checking that it's not empty
self.assertIsNotNone(final_results)
self.assertEqual(len(self.data), len(final_results))
def test_default_confidences(self):
"""tests confidence scores output"""
data_labeler = self.data_labeler
# get char-level predictions/confidence scores on default model
results = data_labeler.predict(
self.data, predict_options=dict(show_confidences=True))
model_predictions_char_level, model_confidences_char_level = \
results["pred"], results["conf"]
# for now just checking that it's not empty
self.assertIsNotNone(model_confidences_char_level)
if __name__ == '__main__':
unittest.main()
| 28.45614 | 71 | 0.670777 |
2d1f05011776b530770e1aa3f50f1d4b66d1d596
| 2,138 |
py
|
Python
|
commands/profile.py
|
SSwiftGamer440/Synergile
|
22a0879f292b8fea056373633f9f956ae82fc609
|
[
"MIT"
] | 5 |
2020-05-25T20:30:54.000Z
|
2021-03-29T10:23:56.000Z
|
commands/profile.py
|
SSwiftGamer440/Synergile
|
22a0879f292b8fea056373633f9f956ae82fc609
|
[
"MIT"
] | 43 |
2020-05-24T21:11:07.000Z
|
2021-04-03T09:18:40.000Z
|
commands/profile.py
|
SSwiftGamer440/Synergile
|
22a0879f292b8fea056373633f9f956ae82fc609
|
[
"MIT"
] | 1 |
2020-05-23T00:04:10.000Z
|
2020-05-23T00:04:10.000Z
|
import math
import discord
from discord.ext import commands
from datetime import datetime
from datetime import timezone
from util.pyutil import buildMultiMatchString
from util.discordutil import resolveMember
class Profile(commands.Cog, name='Profile'):
def __init__(self, bot):
self.bot = bot
@commands.command(description="Gets information about a user and outputs it",usage='[user]')
async def profile(self, ctx, *, member=None):
if member is None:
#self profile
mem = ctx.guild.get_member(ctx.author.id)
else:
#resolve argument to a member
mem = await resolveMember(ctx, member)
if mem is None:
#return when input cannot be resolved
await ctx.send(f'You must provide a valid user reference: "{member}" could not be resolved to a user')
return
#generate profile embed and send
if(isinstance(mem, list)):
usersFound = buildMultiMatchString(self.bot.command_prefix, 'profile', mem, member)
await ctx.send(usersFound)
else:
embed = self.profileEmbed(ctx.message.author, mem)
await ctx.send(embed=embed)
#this should go in the Profile cog
def profileEmbed(self, author, mem):
#avoiding magic numbers
DISCORD_EPOCH = 1420070400000 #first second of 2015
userMilliseconds = int(mem.id/math.pow(2,22) + DISCORD_EPOCH)
embed = discord.Embed(title= mem.nick or mem.name, color= 0x00ff00, timestamp = datetime.now(timezone.utc))
embed.set_thumbnail(url=mem.avatar_url)
embed.add_field(name= "Username+Discrim:", value = f'{mem.name}#{mem.discriminator}', inline=False)
embed.add_field(name= "Highest role:", value = mem.top_role.name, inline=False)
embed.add_field(name= 'Is Bot?', value = 'Yes' if mem.bot else 'No', inline=False)
embed.add_field(name= 'Joined Discord:', value = datetime.utcfromtimestamp(int(userMilliseconds//1000)), inline=False)
embed.add_field(name= 'Joined the server at:', value = mem.joined_at.replace(microsecond=0), inline=False)
embed.add_field(name= "ID:", value = mem.id, inline= False)
embed.set_footer(text= f"Requested by {author}", icon_url=author.avatar_url)
return embed
def setup(bot):
bot.add_cog(Profile(bot))
| 39.592593 | 120 | 0.739008 |
8238ff17b248bc15ddfb8f8e62a4a6e5d0836234
| 1,000 |
py
|
Python
|
setup.py
|
nthdegreeburns/pysolr
|
b8c68d658de46084166b331d41491b681843b8b1
|
[
"BSD-3-Clause"
] | null | null | null |
setup.py
|
nthdegreeburns/pysolr
|
b8c68d658de46084166b331d41491b681843b8b1
|
[
"BSD-3-Clause"
] | null | null | null |
setup.py
|
nthdegreeburns/pysolr
|
b8c68d658de46084166b331d41491b681843b8b1
|
[
"BSD-3-Clause"
] | null | null | null |
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
setup(
name="pysolr",
version="3.5.0",
description="Lightweight python wrapper for Apache Solr.",
author='Daniel Lindsley',
author_email='[email protected]',
long_description=open('README.rst', 'r').read(),
py_modules=[
'pysolr'
],
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Internet :: WWW/HTTP :: Indexing/Search',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 3',
],
url='https://github.com/django-haystack/pysolr/',
license='BSD',
install_requires=[
'requests>=2.0'
],
extras_require={
'solrcloud': [
'kazoo==2.2'
]
}
)
| 26.315789 | 62 | 0.589 |
8af9fdb42c8e51bdee96875b8406a113383f474e
| 920 |
py
|
Python
|
src/zerohunger/accounts/migrations/0004_auto_20200531_2139.py
|
BuildForSDG/Team-250-Backends
|
1a81e20e3a01f909f26966070db5c9acc508838d
|
[
"MIT"
] | null | null | null |
src/zerohunger/accounts/migrations/0004_auto_20200531_2139.py
|
BuildForSDG/Team-250-Backends
|
1a81e20e3a01f909f26966070db5c9acc508838d
|
[
"MIT"
] | 16 |
2020-05-27T20:04:04.000Z
|
2021-09-22T19:07:34.000Z
|
src/zerohunger/accounts/migrations/0004_auto_20200531_2139.py
|
BuildForSDG/Team-250-Backends
|
1a81e20e3a01f909f26966070db5c9acc508838d
|
[
"MIT"
] | 1 |
2020-05-17T00:02:57.000Z
|
2020-05-17T00:02:57.000Z
|
# Generated by Django 3.0.6 on 2020-05-31 20:39
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('accounts', '0003_auto_20200528_2033'),
]
operations = [
migrations.AlterField(
model_name='user',
name='is_active',
field=models.BooleanField(
default=True,
help_text='Designates whether this user should be treated as'
' active. Unselect this instead of deleting accounts.',
verbose_name='active'),
),
migrations.AlterField(
model_name='user',
name='is_staff',
field=models.BooleanField(
default=False,
help_text='Designates whether the user '
'can log into this admin site.',
verbose_name='staff status'),
),
]
| 28.75 | 77 | 0.551087 |
4d2986de890c3b48ae0c87fff80715b096960c58
| 157 |
py
|
Python
|
fist_phase/0503_chicken.py
|
kapuni/exercise_py
|
b60ba8462d2545cae57483bcb0b3428b03c5d522
|
[
"MIT"
] | null | null | null |
fist_phase/0503_chicken.py
|
kapuni/exercise_py
|
b60ba8462d2545cae57483bcb0b3428b03c5d522
|
[
"MIT"
] | null | null | null |
fist_phase/0503_chicken.py
|
kapuni/exercise_py
|
b60ba8462d2545cae57483bcb0b3428b03c5d522
|
[
"MIT"
] | null | null | null |
for x in range(20):
for y in range(33):
z = 100 - x - y
if 5*x + 3*y + z/3 == 100:
print("公鸡:%d只,母鸡:%d只, 小鸡:%d只" % (x, y, z))
| 31.4 | 54 | 0.401274 |
c306a081db56bbc06f498efe1fcc66faa5a02dfd
| 775 |
py
|
Python
|
hello_python_source_py3/py2to3_converter.py
|
AnthonyBriggs/Python-101
|
e6c7584fd6791bb5d7d05fd419faa46dc7148f61
|
[
"MIT"
] | 3 |
2017-08-02T23:40:55.000Z
|
2018-07-02T14:59:07.000Z
|
hello_python_source_py3/py2to3_converter.py
|
AnthonyBriggs/Python-101
|
e6c7584fd6791bb5d7d05fd419faa46dc7148f61
|
[
"MIT"
] | null | null | null |
hello_python_source_py3/py2to3_converter.py
|
AnthonyBriggs/Python-101
|
e6c7584fd6791bb5d7d05fd419faa46dc7148f61
|
[
"MIT"
] | null | null | null |
"""Script to mass-convert all the Python 2 files in a directory to Python 3."""
import os
import sys
try:
target = sys.argv[1]
if target.endswith('"'):
target = target[:-1]
except IndexError:
target = "."
print(target)
#sys.exit(1) # debug
for path, dirs, files in os.walk(target):
print(path, files, dirs)
for file_name in files:
print(file_name)
if not file_name.endswith('.py'):
continue
file_path = os.path.join(path, file_name)
print("Converting ", file_path)
#print("python -m lib2to3 -wn '" + file_path + "'")
# Double quotes are for Windows systems, which don't like
# file arguments with spaces :P
os.system('python -m lib2to3 -wn "' + file_path + '"')
| 26.724138 | 79 | 0.603871 |
1b3932b86052b68dd1bca488a0afc1f8c31f9328
| 43,862 |
py
|
Python
|
third_party/python/Lib/test/test_codeccallbacks.py
|
appotry/cosmopolitan
|
af4687cc3f2331a23dc336183ab58fe001cda082
|
[
"ISC"
] | null | null | null |
third_party/python/Lib/test/test_codeccallbacks.py
|
appotry/cosmopolitan
|
af4687cc3f2331a23dc336183ab58fe001cda082
|
[
"ISC"
] | null | null | null |
third_party/python/Lib/test/test_codeccallbacks.py
|
appotry/cosmopolitan
|
af4687cc3f2331a23dc336183ab58fe001cda082
|
[
"ISC"
] | null | null | null |
import codecs
import html.entities
import sys
import test.support
import unicodedata
import unittest
from encodings import raw_unicode_escape
class PosReturn:
# this can be used for configurable callbacks
def __init__(self):
self.pos = 0
def handle(self, exc):
oldpos = self.pos
realpos = oldpos
if realpos<0:
realpos = len(exc.object) + realpos
# if we don't advance this time, terminate on the next call
# otherwise we'd get an endless loop
if realpos <= exc.start:
self.pos = len(exc.object)
return ("<?>", oldpos)
# A UnicodeEncodeError object with a bad start attribute
class BadStartUnicodeEncodeError(UnicodeEncodeError):
def __init__(self):
UnicodeEncodeError.__init__(self, "ascii", "", 0, 1, "bad")
self.start = []
# A UnicodeEncodeError object with a bad object attribute
class BadObjectUnicodeEncodeError(UnicodeEncodeError):
def __init__(self):
UnicodeEncodeError.__init__(self, "ascii", "", 0, 1, "bad")
self.object = []
# A UnicodeDecodeError object without an end attribute
class NoEndUnicodeDecodeError(UnicodeDecodeError):
def __init__(self):
UnicodeDecodeError.__init__(self, "ascii", bytearray(b""), 0, 1, "bad")
del self.end
# A UnicodeDecodeError object with a bad object attribute
class BadObjectUnicodeDecodeError(UnicodeDecodeError):
def __init__(self):
UnicodeDecodeError.__init__(self, "ascii", bytearray(b""), 0, 1, "bad")
self.object = []
# A UnicodeTranslateError object without a start attribute
class NoStartUnicodeTranslateError(UnicodeTranslateError):
def __init__(self):
UnicodeTranslateError.__init__(self, "", 0, 1, "bad")
del self.start
# A UnicodeTranslateError object without an end attribute
class NoEndUnicodeTranslateError(UnicodeTranslateError):
def __init__(self):
UnicodeTranslateError.__init__(self, "", 0, 1, "bad")
del self.end
# A UnicodeTranslateError object without an object attribute
class NoObjectUnicodeTranslateError(UnicodeTranslateError):
def __init__(self):
UnicodeTranslateError.__init__(self, "", 0, 1, "bad")
del self.object
class CodecCallbackTest(unittest.TestCase):
def test_xmlcharrefreplace(self):
# replace unencodable characters which numeric character entities.
# For ascii, latin-1 and charmaps this is completely implemented
# in C and should be reasonably fast.
s = "\u30b9\u30d1\u30e2 \xe4nd eggs"
self.assertEqual(
s.encode("ascii", "xmlcharrefreplace"),
b"スパモ änd eggs"
)
self.assertEqual(
s.encode("latin-1", "xmlcharrefreplace"),
b"スパモ \xe4nd eggs"
)
def test_xmlcharnamereplace(self):
# This time use a named character entity for unencodable
# characters, if one is available.
def xmlcharnamereplace(exc):
if not isinstance(exc, UnicodeEncodeError):
raise TypeError("don't know how to handle %r" % exc)
l = []
for c in exc.object[exc.start:exc.end]:
try:
l.append("&%s;" % html.entities.codepoint2name[ord(c)])
except KeyError:
l.append("&#%d;" % ord(c))
return ("".join(l), exc.end)
codecs.register_error(
"test.xmlcharnamereplace", xmlcharnamereplace)
sin = "\xab\u211c\xbb = \u2329\u1234\u20ac\u232a"
sout = b"«ℜ» = ⟨ሴ€⟩"
self.assertEqual(sin.encode("ascii", "test.xmlcharnamereplace"), sout)
sout = b"\xabℜ\xbb = ⟨ሴ€⟩"
self.assertEqual(sin.encode("latin-1", "test.xmlcharnamereplace"), sout)
sout = b"\xabℜ\xbb = ⟨ሴ\xa4⟩"
self.assertEqual(sin.encode("iso-8859-15", "test.xmlcharnamereplace"), sout)
def test_uninamereplace(self):
# We're using the names from the unicode database this time,
# and we're doing "syntax highlighting" here, i.e. we include
# the replaced text in ANSI escape sequences. For this it is
# useful that the error handler is not called for every single
# unencodable character, but for a complete sequence of
# unencodable characters, otherwise we would output many
# unnecessary escape sequences.
def uninamereplace(exc):
if not isinstance(exc, UnicodeEncodeError):
raise TypeError("don't know how to handle %r" % exc)
l = []
for c in exc.object[exc.start:exc.end]:
l.append(unicodedata.name(c, "0x%x" % ord(c)))
return ("\033[1m%s\033[0m" % ", ".join(l), exc.end)
codecs.register_error(
"test.uninamereplace", uninamereplace)
sin = "\xac\u1234\u20ac\u8000"
sout = b"\033[1mNOT SIGN, ETHIOPIC SYLLABLE SEE, EURO SIGN, CJK UNIFIED IDEOGRAPH-8000\033[0m"
self.assertEqual(sin.encode("ascii", "test.uninamereplace"), sout)
sout = b"\xac\033[1mETHIOPIC SYLLABLE SEE, EURO SIGN, CJK UNIFIED IDEOGRAPH-8000\033[0m"
self.assertEqual(sin.encode("latin-1", "test.uninamereplace"), sout)
sout = b"\xac\033[1mETHIOPIC SYLLABLE SEE\033[0m\xa4\033[1mCJK UNIFIED IDEOGRAPH-8000\033[0m"
self.assertEqual(sin.encode("iso-8859-15", "test.uninamereplace"), sout)
def test_backslashescape(self):
# Does the same as the "unicode-escape" encoding, but with different
# base encodings.
sin = "a\xac\u1234\u20ac\u8000\U0010ffff"
sout = b"a\\xac\\u1234\\u20ac\\u8000\\U0010ffff"
self.assertEqual(sin.encode("ascii", "backslashreplace"), sout)
sout = b"a\xac\\u1234\\u20ac\\u8000\\U0010ffff"
self.assertEqual(sin.encode("latin-1", "backslashreplace"), sout)
sout = b"a\xac\\u1234\xa4\\u8000\\U0010ffff"
self.assertEqual(sin.encode("iso-8859-15", "backslashreplace"), sout)
def test_nameescape(self):
# Does the same as backslashescape, but prefers ``\N{...}`` escape
# sequences.
sin = "a\xac\u1234\u20ac\u8000\U0010ffff"
sout = (b'a\\N{NOT SIGN}\\N{ETHIOPIC SYLLABLE SEE}\\N{EURO SIGN}'
b'\\N{CJK UNIFIED IDEOGRAPH-8000}\\U0010ffff')
self.assertEqual(sin.encode("ascii", "namereplace"), sout)
sout = (b'a\xac\\N{ETHIOPIC SYLLABLE SEE}\\N{EURO SIGN}'
b'\\N{CJK UNIFIED IDEOGRAPH-8000}\\U0010ffff')
self.assertEqual(sin.encode("latin-1", "namereplace"), sout)
sout = (b'a\xac\\N{ETHIOPIC SYLLABLE SEE}\xa4'
b'\\N{CJK UNIFIED IDEOGRAPH-8000}\\U0010ffff')
self.assertEqual(sin.encode("iso-8859-15", "namereplace"), sout)
def test_decoding_callbacks(self):
# This is a test for a decoding callback handler
# that allows the decoding of the invalid sequence
# "\xc0\x80" and returns "\x00" instead of raising an error.
# All other illegal sequences will be handled strictly.
def relaxedutf8(exc):
if not isinstance(exc, UnicodeDecodeError):
raise TypeError("don't know how to handle %r" % exc)
if exc.object[exc.start:exc.start+2] == b"\xc0\x80":
return ("\x00", exc.start+2) # retry after two bytes
else:
raise exc
codecs.register_error("test.relaxedutf8", relaxedutf8)
# all the "\xc0\x80" will be decoded to "\x00"
sin = b"a\x00b\xc0\x80c\xc3\xbc\xc0\x80\xc0\x80"
sout = "a\x00b\x00c\xfc\x00\x00"
self.assertEqual(sin.decode("utf-8", "test.relaxedutf8"), sout)
# "\xc0\x81" is not valid and a UnicodeDecodeError will be raised
sin = b"\xc0\x80\xc0\x81"
self.assertRaises(UnicodeDecodeError, sin.decode,
"utf-8", "test.relaxedutf8")
def test_charmapencode(self):
# For charmap encodings the replacement string will be
# mapped through the encoding again. This means, that
# to be able to use e.g. the "replace" handler, the
# charmap has to have a mapping for "?".
charmap = dict((ord(c), bytes(2*c.upper(), 'ascii')) for c in "abcdefgh")
sin = "abc"
sout = b"AABBCC"
self.assertEqual(codecs.charmap_encode(sin, "strict", charmap)[0], sout)
sin = "abcA"
self.assertRaises(UnicodeError, codecs.charmap_encode, sin, "strict", charmap)
charmap[ord("?")] = b"XYZ"
sin = "abcDEF"
sout = b"AABBCCXYZXYZXYZ"
self.assertEqual(codecs.charmap_encode(sin, "replace", charmap)[0], sout)
charmap[ord("?")] = "XYZ" # wrong type in mapping
self.assertRaises(TypeError, codecs.charmap_encode, sin, "replace", charmap)
def test_decodeunicodeinternal(self):
with test.support.check_warnings(('unicode_internal codec has been '
'deprecated', DeprecationWarning)):
self.assertRaises(
UnicodeDecodeError,
b"\x00\x00\x00\x00\x00".decode,
"unicode-internal",
)
if len('\0'.encode('unicode-internal')) == 4:
def handler_unicodeinternal(exc):
if not isinstance(exc, UnicodeDecodeError):
raise TypeError("don't know how to handle %r" % exc)
return ("\x01", 1)
self.assertEqual(
b"\x00\x00\x00\x00\x00".decode("unicode-internal", "ignore"),
"\u0000"
)
self.assertEqual(
b"\x00\x00\x00\x00\x00".decode("unicode-internal", "replace"),
"\u0000\ufffd"
)
self.assertEqual(
b"\x00\x00\x00\x00\x00".decode("unicode-internal", "backslashreplace"),
"\u0000\\x00"
)
codecs.register_error("test.hui", handler_unicodeinternal)
self.assertEqual(
b"\x00\x00\x00\x00\x00".decode("unicode-internal", "test.hui"),
"\u0000\u0001\u0000"
)
def test_callbacks(self):
def handler1(exc):
r = range(exc.start, exc.end)
if isinstance(exc, UnicodeEncodeError):
l = ["<%d>" % ord(exc.object[pos]) for pos in r]
elif isinstance(exc, UnicodeDecodeError):
l = ["<%d>" % exc.object[pos] for pos in r]
else:
raise TypeError("don't know how to handle %r" % exc)
return ("[%s]" % "".join(l), exc.end)
codecs.register_error("test.handler1", handler1)
def handler2(exc):
if not isinstance(exc, UnicodeDecodeError):
raise TypeError("don't know how to handle %r" % exc)
l = ["<%d>" % exc.object[pos] for pos in range(exc.start, exc.end)]
return ("[%s]" % "".join(l), exc.end+1) # skip one character
codecs.register_error("test.handler2", handler2)
s = b"\x00\x81\x7f\x80\xff"
self.assertEqual(
s.decode("ascii", "test.handler1"),
"\x00[<129>]\x7f[<128>][<255>]"
)
self.assertEqual(
s.decode("ascii", "test.handler2"),
"\x00[<129>][<128>]"
)
self.assertEqual(
b"\\u3042\\u3xxx".decode("unicode-escape", "test.handler1"),
"\u3042[<92><117><51>]xxx"
)
self.assertEqual(
b"\\u3042\\u3xx".decode("unicode-escape", "test.handler1"),
"\u3042[<92><117><51>]xx"
)
self.assertEqual(
codecs.charmap_decode(b"abc", "test.handler1", {ord("a"): "z"})[0],
"z[<98>][<99>]"
)
self.assertEqual(
"g\xfc\xdfrk".encode("ascii", "test.handler1"),
b"g[<252><223>]rk"
)
self.assertEqual(
"g\xfc\xdf".encode("ascii", "test.handler1"),
b"g[<252><223>]"
)
def test_longstrings(self):
# test long strings to check for memory overflow problems
errors = [ "strict", "ignore", "replace", "xmlcharrefreplace",
"backslashreplace", "namereplace"]
# register the handlers under different names,
# to prevent the codec from recognizing the name
for err in errors:
codecs.register_error("test." + err, codecs.lookup_error(err))
l = 1000
errors += [ "test." + err for err in errors ]
for uni in [ s*l for s in ("x", "\u3042", "a\xe4") ]:
for enc in ("ascii", "latin-1", "iso-8859-1", "iso-8859-15",
"utf-8", "utf-7", "utf-16", "utf-32"):
for err in errors:
try:
uni.encode(enc, err)
except UnicodeError:
pass
def check_exceptionobjectargs(self, exctype, args, msg):
# Test UnicodeError subclasses: construction, attribute assignment and __str__ conversion
# check with one missing argument
self.assertRaises(TypeError, exctype, *args[:-1])
# check with one argument too much
self.assertRaises(TypeError, exctype, *(args + ["too much"]))
# check with one argument of the wrong type
wrongargs = [ "spam", b"eggs", b"spam", 42, 1.0, None ]
for i in range(len(args)):
for wrongarg in wrongargs:
if type(wrongarg) is type(args[i]):
continue
# build argument array
callargs = []
for j in range(len(args)):
if i==j:
callargs.append(wrongarg)
else:
callargs.append(args[i])
self.assertRaises(TypeError, exctype, *callargs)
# check with the correct number and type of arguments
exc = exctype(*args)
self.assertEqual(str(exc), msg)
def test_unicodeencodeerror(self):
self.check_exceptionobjectargs(
UnicodeEncodeError,
["ascii", "g\xfcrk", 1, 2, "ouch"],
"'ascii' codec can't encode character '\\xfc' in position 1: ouch"
)
self.check_exceptionobjectargs(
UnicodeEncodeError,
["ascii", "g\xfcrk", 1, 4, "ouch"],
"'ascii' codec can't encode characters in position 1-3: ouch"
)
self.check_exceptionobjectargs(
UnicodeEncodeError,
["ascii", "\xfcx", 0, 1, "ouch"],
"'ascii' codec can't encode character '\\xfc' in position 0: ouch"
)
self.check_exceptionobjectargs(
UnicodeEncodeError,
["ascii", "\u0100x", 0, 1, "ouch"],
"'ascii' codec can't encode character '\\u0100' in position 0: ouch"
)
self.check_exceptionobjectargs(
UnicodeEncodeError,
["ascii", "\uffffx", 0, 1, "ouch"],
"'ascii' codec can't encode character '\\uffff' in position 0: ouch"
)
self.check_exceptionobjectargs(
UnicodeEncodeError,
["ascii", "\U00010000x", 0, 1, "ouch"],
"'ascii' codec can't encode character '\\U00010000' in position 0: ouch"
)
def test_unicodedecodeerror(self):
self.check_exceptionobjectargs(
UnicodeDecodeError,
["ascii", bytearray(b"g\xfcrk"), 1, 2, "ouch"],
"'ascii' codec can't decode byte 0xfc in position 1: ouch"
)
self.check_exceptionobjectargs(
UnicodeDecodeError,
["ascii", bytearray(b"g\xfcrk"), 1, 3, "ouch"],
"'ascii' codec can't decode bytes in position 1-2: ouch"
)
def test_unicodetranslateerror(self):
self.check_exceptionobjectargs(
UnicodeTranslateError,
["g\xfcrk", 1, 2, "ouch"],
"can't translate character '\\xfc' in position 1: ouch"
)
self.check_exceptionobjectargs(
UnicodeTranslateError,
["g\u0100rk", 1, 2, "ouch"],
"can't translate character '\\u0100' in position 1: ouch"
)
self.check_exceptionobjectargs(
UnicodeTranslateError,
["g\uffffrk", 1, 2, "ouch"],
"can't translate character '\\uffff' in position 1: ouch"
)
self.check_exceptionobjectargs(
UnicodeTranslateError,
["g\U00010000rk", 1, 2, "ouch"],
"can't translate character '\\U00010000' in position 1: ouch"
)
self.check_exceptionobjectargs(
UnicodeTranslateError,
["g\xfcrk", 1, 3, "ouch"],
"can't translate characters in position 1-2: ouch"
)
def test_badandgoodstrictexceptions(self):
# "strict" complains about a non-exception passed in
self.assertRaises(
TypeError,
codecs.strict_errors,
42
)
# "strict" complains about the wrong exception type
self.assertRaises(
Exception,
codecs.strict_errors,
Exception("ouch")
)
# If the correct exception is passed in, "strict" raises it
self.assertRaises(
UnicodeEncodeError,
codecs.strict_errors,
UnicodeEncodeError("ascii", "\u3042", 0, 1, "ouch")
)
self.assertRaises(
UnicodeDecodeError,
codecs.strict_errors,
UnicodeDecodeError("ascii", bytearray(b"\xff"), 0, 1, "ouch")
)
self.assertRaises(
UnicodeTranslateError,
codecs.strict_errors,
UnicodeTranslateError("\u3042", 0, 1, "ouch")
)
def test_badandgoodignoreexceptions(self):
# "ignore" complains about a non-exception passed in
self.assertRaises(
TypeError,
codecs.ignore_errors,
42
)
# "ignore" complains about the wrong exception type
self.assertRaises(
TypeError,
codecs.ignore_errors,
UnicodeError("ouch")
)
# If the correct exception is passed in, "ignore" returns an empty replacement
self.assertEqual(
codecs.ignore_errors(
UnicodeEncodeError("ascii", "a\u3042b", 1, 2, "ouch")),
("", 2)
)
self.assertEqual(
codecs.ignore_errors(
UnicodeDecodeError("ascii", bytearray(b"a\xffb"), 1, 2, "ouch")),
("", 2)
)
self.assertEqual(
codecs.ignore_errors(
UnicodeTranslateError("a\u3042b", 1, 2, "ouch")),
("", 2)
)
def test_badandgoodreplaceexceptions(self):
# "replace" complains about a non-exception passed in
self.assertRaises(
TypeError,
codecs.replace_errors,
42
)
# "replace" complains about the wrong exception type
self.assertRaises(
TypeError,
codecs.replace_errors,
UnicodeError("ouch")
)
self.assertRaises(
TypeError,
codecs.replace_errors,
BadObjectUnicodeEncodeError()
)
self.assertRaises(
TypeError,
codecs.replace_errors,
BadObjectUnicodeDecodeError()
)
# With the correct exception, "replace" returns an "?" or "\ufffd" replacement
self.assertEqual(
codecs.replace_errors(
UnicodeEncodeError("ascii", "a\u3042b", 1, 2, "ouch")),
("?", 2)
)
self.assertEqual(
codecs.replace_errors(
UnicodeDecodeError("ascii", bytearray(b"a\xffb"), 1, 2, "ouch")),
("\ufffd", 2)
)
self.assertEqual(
codecs.replace_errors(
UnicodeTranslateError("a\u3042b", 1, 2, "ouch")),
("\ufffd", 2)
)
def test_badandgoodxmlcharrefreplaceexceptions(self):
# "xmlcharrefreplace" complains about a non-exception passed in
self.assertRaises(
TypeError,
codecs.xmlcharrefreplace_errors,
42
)
# "xmlcharrefreplace" complains about the wrong exception types
self.assertRaises(
TypeError,
codecs.xmlcharrefreplace_errors,
UnicodeError("ouch")
)
# "xmlcharrefreplace" can only be used for encoding
self.assertRaises(
TypeError,
codecs.xmlcharrefreplace_errors,
UnicodeDecodeError("ascii", bytearray(b"\xff"), 0, 1, "ouch")
)
self.assertRaises(
TypeError,
codecs.xmlcharrefreplace_errors,
UnicodeTranslateError("\u3042", 0, 1, "ouch")
)
# Use the correct exception
cs = (0, 1, 9, 10, 99, 100, 999, 1000, 9999, 10000, 99999, 100000,
999999, 1000000)
cs += (0xd800, 0xdfff)
s = "".join(chr(c) for c in cs)
self.assertEqual(
codecs.xmlcharrefreplace_errors(
UnicodeEncodeError("ascii", "a" + s + "b",
1, 1 + len(s), "ouch")
),
("".join("&#%d;" % c for c in cs), 1 + len(s))
)
def test_badandgoodbackslashreplaceexceptions(self):
# "backslashreplace" complains about a non-exception passed in
self.assertRaises(
TypeError,
codecs.backslashreplace_errors,
42
)
# "backslashreplace" complains about the wrong exception types
self.assertRaises(
TypeError,
codecs.backslashreplace_errors,
UnicodeError("ouch")
)
# Use the correct exception
tests = [
("\u3042", "\\u3042"),
("\n", "\\x0a"),
("a", "\\x61"),
("\x00", "\\x00"),
("\xff", "\\xff"),
("\u0100", "\\u0100"),
("\uffff", "\\uffff"),
("\U00010000", "\\U00010000"),
("\U0010ffff", "\\U0010ffff"),
# Lone surrogates
("\ud800", "\\ud800"),
("\udfff", "\\udfff"),
("\ud800\udfff", "\\ud800\\udfff"),
]
for s, r in tests:
with self.subTest(str=s):
self.assertEqual(
codecs.backslashreplace_errors(
UnicodeEncodeError("ascii", "a" + s + "b",
1, 1 + len(s), "ouch")),
(r, 1 + len(s))
)
self.assertEqual(
codecs.backslashreplace_errors(
UnicodeTranslateError("a" + s + "b",
1, 1 + len(s), "ouch")),
(r, 1 + len(s))
)
tests = [
(b"a", "\\x61"),
(b"\n", "\\x0a"),
(b"\x00", "\\x00"),
(b"\xff", "\\xff"),
]
for b, r in tests:
with self.subTest(bytes=b):
self.assertEqual(
codecs.backslashreplace_errors(
UnicodeDecodeError("ascii", bytearray(b"a" + b + b"b"),
1, 2, "ouch")),
(r, 2)
)
def test_badandgoodnamereplaceexceptions(self):
# "namereplace" complains about a non-exception passed in
self.assertRaises(
TypeError,
codecs.namereplace_errors,
42
)
# "namereplace" complains about the wrong exception types
self.assertRaises(
TypeError,
codecs.namereplace_errors,
UnicodeError("ouch")
)
# "namereplace" can only be used for encoding
self.assertRaises(
TypeError,
codecs.namereplace_errors,
UnicodeDecodeError("ascii", bytearray(b"\xff"), 0, 1, "ouch")
)
self.assertRaises(
TypeError,
codecs.namereplace_errors,
UnicodeTranslateError("\u3042", 0, 1, "ouch")
)
# Use the correct exception
tests = [
("\u3042", "\\N{HIRAGANA LETTER A}"),
("\x00", "\\x00"),
("\ufbf9", "\\N{ARABIC LIGATURE UIGHUR KIRGHIZ YEH WITH "
"HAMZA ABOVE WITH ALEF MAKSURA ISOLATED FORM}"),
("\U000e007f", "\\N{CANCEL TAG}"),
("\U0010ffff", "\\U0010ffff"),
# Lone surrogates
("\ud800", "\\ud800"),
("\udfff", "\\udfff"),
("\ud800\udfff", "\\ud800\\udfff"),
]
for s, r in tests:
with self.subTest(str=s):
self.assertEqual(
codecs.namereplace_errors(
UnicodeEncodeError("ascii", "a" + s + "b",
1, 1 + len(s), "ouch")),
(r, 1 + len(s))
)
def test_badandgoodsurrogateescapeexceptions(self):
surrogateescape_errors = codecs.lookup_error('surrogateescape')
# "surrogateescape" complains about a non-exception passed in
self.assertRaises(
TypeError,
surrogateescape_errors,
42
)
# "surrogateescape" complains about the wrong exception types
self.assertRaises(
TypeError,
surrogateescape_errors,
UnicodeError("ouch")
)
# "surrogateescape" can not be used for translating
self.assertRaises(
TypeError,
surrogateescape_errors,
UnicodeTranslateError("\udc80", 0, 1, "ouch")
)
# Use the correct exception
for s in ("a", "\udc7f", "\udd00"):
with self.subTest(str=s):
self.assertRaises(
UnicodeEncodeError,
surrogateescape_errors,
UnicodeEncodeError("ascii", s, 0, 1, "ouch")
)
self.assertEqual(
surrogateescape_errors(
UnicodeEncodeError("ascii", "a\udc80b", 1, 2, "ouch")),
(b"\x80", 2)
)
self.assertRaises(
UnicodeDecodeError,
surrogateescape_errors,
UnicodeDecodeError("ascii", bytearray(b"a"), 0, 1, "ouch")
)
self.assertEqual(
surrogateescape_errors(
UnicodeDecodeError("ascii", bytearray(b"a\x80b"), 1, 2, "ouch")),
("\udc80", 2)
)
def test_badandgoodsurrogatepassexceptions(self):
surrogatepass_errors = codecs.lookup_error('surrogatepass')
# "surrogatepass" complains about a non-exception passed in
self.assertRaises(
TypeError,
surrogatepass_errors,
42
)
# "surrogatepass" complains about the wrong exception types
self.assertRaises(
TypeError,
surrogatepass_errors,
UnicodeError("ouch")
)
# "surrogatepass" can not be used for translating
self.assertRaises(
TypeError,
surrogatepass_errors,
UnicodeTranslateError("\ud800", 0, 1, "ouch")
)
# Use the correct exception
for enc in ("utf-8", "utf-16le", "utf-16be", "utf-32le", "utf-32be"):
with self.subTest(encoding=enc):
self.assertRaises(
UnicodeEncodeError,
surrogatepass_errors,
UnicodeEncodeError(enc, "a", 0, 1, "ouch")
)
self.assertRaises(
UnicodeDecodeError,
surrogatepass_errors,
UnicodeDecodeError(enc, "a".encode(enc), 0, 1, "ouch")
)
for s in ("\ud800", "\udfff", "\ud800\udfff"):
with self.subTest(str=s):
self.assertRaises(
UnicodeEncodeError,
surrogatepass_errors,
UnicodeEncodeError("ascii", s, 0, len(s), "ouch")
)
tests = [
("utf-8", "\ud800", b'\xed\xa0\x80', 3),
("utf-16le", "\ud800", b'\x00\xd8', 2),
("utf-16be", "\ud800", b'\xd8\x00', 2),
("utf-32le", "\ud800", b'\x00\xd8\x00\x00', 4),
("utf-32be", "\ud800", b'\x00\x00\xd8\x00', 4),
("utf-8", "\udfff", b'\xed\xbf\xbf', 3),
("utf-16le", "\udfff", b'\xff\xdf', 2),
("utf-16be", "\udfff", b'\xdf\xff', 2),
("utf-32le", "\udfff", b'\xff\xdf\x00\x00', 4),
("utf-32be", "\udfff", b'\x00\x00\xdf\xff', 4),
("utf-8", "\ud800\udfff", b'\xed\xa0\x80\xed\xbf\xbf', 3),
("utf-16le", "\ud800\udfff", b'\x00\xd8\xff\xdf', 2),
("utf-16be", "\ud800\udfff", b'\xd8\x00\xdf\xff', 2),
("utf-32le", "\ud800\udfff", b'\x00\xd8\x00\x00\xff\xdf\x00\x00', 4),
("utf-32be", "\ud800\udfff", b'\x00\x00\xd8\x00\x00\x00\xdf\xff', 4),
]
for enc, s, b, n in tests:
with self.subTest(encoding=enc, str=s, bytes=b):
self.assertEqual(
surrogatepass_errors(
UnicodeEncodeError(enc, "a" + s + "b",
1, 1 + len(s), "ouch")),
(b, 1 + len(s))
)
self.assertEqual(
surrogatepass_errors(
UnicodeDecodeError(enc, bytearray(b"a" + b[:n] + b"b"),
1, 1 + n, "ouch")),
(s[:1], 1 + n)
)
def test_badhandlerresults(self):
results = ( 42, "foo", (1,2,3), ("foo", 1, 3), ("foo", None), ("foo",), ("foo", 1, 3), ("foo", None), ("foo",) )
encs = ("ascii", "latin-1", "iso-8859-1", "iso-8859-15")
for res in results:
codecs.register_error("test.badhandler", lambda x: res)
for enc in encs:
self.assertRaises(
TypeError,
"\u3042".encode,
enc,
"test.badhandler"
)
for (enc, bytes) in (
("ascii", b"\xff"),
("utf-8", b"\xff"),
("utf-7", b"+x-"),
("unicode-internal", b"\x00"),
):
with test.support.check_warnings():
# unicode-internal has been deprecated
self.assertRaises(
TypeError,
bytes.decode,
enc,
"test.badhandler"
)
def test_lookup(self):
self.assertEqual(codecs.strict_errors, codecs.lookup_error("strict"))
self.assertEqual(codecs.ignore_errors, codecs.lookup_error("ignore"))
self.assertEqual(codecs.strict_errors, codecs.lookup_error("strict"))
self.assertEqual(
codecs.xmlcharrefreplace_errors,
codecs.lookup_error("xmlcharrefreplace")
)
self.assertEqual(
codecs.backslashreplace_errors,
codecs.lookup_error("backslashreplace")
)
self.assertEqual(
codecs.namereplace_errors,
codecs.lookup_error("namereplace")
)
def test_unencodablereplacement(self):
def unencrepl(exc):
if isinstance(exc, UnicodeEncodeError):
return ("\u4242", exc.end)
else:
raise TypeError("don't know how to handle %r" % exc)
codecs.register_error("test.unencreplhandler", unencrepl)
for enc in ("ascii", "iso-8859-1", "iso-8859-15"):
self.assertRaises(
UnicodeEncodeError,
"\u4242".encode,
enc,
"test.unencreplhandler"
)
def test_badregistercall(self):
# enhance coverage of:
# Modules/_codecsmodule.c::register_error()
# Python/codecs.c::PyCodec_RegisterError()
self.assertRaises(TypeError, codecs.register_error, 42)
self.assertRaises(TypeError, codecs.register_error, "test.dummy", 42)
def test_badlookupcall(self):
# enhance coverage of:
# Modules/_codecsmodule.c::lookup_error()
self.assertRaises(TypeError, codecs.lookup_error)
def test_unknownhandler(self):
# enhance coverage of:
# Modules/_codecsmodule.c::lookup_error()
self.assertRaises(LookupError, codecs.lookup_error, "test.unknown")
def test_xmlcharrefvalues(self):
# enhance coverage of:
# Python/codecs.c::PyCodec_XMLCharRefReplaceErrors()
# and inline implementations
v = (1, 5, 10, 50, 100, 500, 1000, 5000, 10000, 50000, 100000,
500000, 1000000)
s = "".join([chr(x) for x in v])
codecs.register_error("test.xmlcharrefreplace", codecs.xmlcharrefreplace_errors)
for enc in ("ascii", "iso-8859-15"):
for err in ("xmlcharrefreplace", "test.xmlcharrefreplace"):
s.encode(enc, err)
def test_decodehelper(self):
# enhance coverage of:
# Objects/unicodeobject.c::unicode_decode_call_errorhandler()
# and callers
self.assertRaises(LookupError, b"\xff".decode, "ascii", "test.unknown")
def baddecodereturn1(exc):
return 42
codecs.register_error("test.baddecodereturn1", baddecodereturn1)
self.assertRaises(TypeError, b"\xff".decode, "ascii", "test.baddecodereturn1")
self.assertRaises(TypeError, b"\\".decode, "unicode-escape", "test.baddecodereturn1")
self.assertRaises(TypeError, b"\\x0".decode, "unicode-escape", "test.baddecodereturn1")
self.assertRaises(TypeError, b"\\x0y".decode, "unicode-escape", "test.baddecodereturn1")
self.assertRaises(TypeError, b"\\Uffffeeee".decode, "unicode-escape", "test.baddecodereturn1")
self.assertRaises(TypeError, b"\\uyyyy".decode, "raw-unicode-escape", "test.baddecodereturn1")
def baddecodereturn2(exc):
return ("?", None)
codecs.register_error("test.baddecodereturn2", baddecodereturn2)
self.assertRaises(TypeError, b"\xff".decode, "ascii", "test.baddecodereturn2")
handler = PosReturn()
codecs.register_error("test.posreturn", handler.handle)
# Valid negative position
handler.pos = -1
self.assertEqual(b"\xff0".decode("ascii", "test.posreturn"), "<?>0")
# Valid negative position
handler.pos = -2
self.assertEqual(b"\xff0".decode("ascii", "test.posreturn"), "<?><?>")
# Negative position out of bounds
handler.pos = -3
self.assertRaises(IndexError, b"\xff0".decode, "ascii", "test.posreturn")
# Valid positive position
handler.pos = 1
self.assertEqual(b"\xff0".decode("ascii", "test.posreturn"), "<?>0")
# Largest valid positive position (one beyond end of input)
handler.pos = 2
self.assertEqual(b"\xff0".decode("ascii", "test.posreturn"), "<?>")
# Invalid positive position
handler.pos = 3
self.assertRaises(IndexError, b"\xff0".decode, "ascii", "test.posreturn")
# Restart at the "0"
handler.pos = 6
self.assertEqual(b"\\uyyyy0".decode("raw-unicode-escape", "test.posreturn"), "<?>0")
class D(dict):
def __getitem__(self, key):
raise ValueError
self.assertRaises(UnicodeError, codecs.charmap_decode, b"\xff", "strict", {0xff: None})
self.assertRaises(ValueError, codecs.charmap_decode, b"\xff", "strict", D())
self.assertRaises(TypeError, codecs.charmap_decode, b"\xff", "strict", {0xff: sys.maxunicode+1})
def test_encodehelper(self):
# enhance coverage of:
# Objects/unicodeobject.c::unicode_encode_call_errorhandler()
# and callers
self.assertRaises(LookupError, "\xff".encode, "ascii", "test.unknown")
def badencodereturn1(exc):
return 42
codecs.register_error("test.badencodereturn1", badencodereturn1)
self.assertRaises(TypeError, "\xff".encode, "ascii", "test.badencodereturn1")
def badencodereturn2(exc):
return ("?", None)
codecs.register_error("test.badencodereturn2", badencodereturn2)
self.assertRaises(TypeError, "\xff".encode, "ascii", "test.badencodereturn2")
handler = PosReturn()
codecs.register_error("test.posreturn", handler.handle)
# Valid negative position
handler.pos = -1
self.assertEqual("\xff0".encode("ascii", "test.posreturn"), b"<?>0")
# Valid negative position
handler.pos = -2
self.assertEqual("\xff0".encode("ascii", "test.posreturn"), b"<?><?>")
# Negative position out of bounds
handler.pos = -3
self.assertRaises(IndexError, "\xff0".encode, "ascii", "test.posreturn")
# Valid positive position
handler.pos = 1
self.assertEqual("\xff0".encode("ascii", "test.posreturn"), b"<?>0")
# Largest valid positive position (one beyond end of input
handler.pos = 2
self.assertEqual("\xff0".encode("ascii", "test.posreturn"), b"<?>")
# Invalid positive position
handler.pos = 3
self.assertRaises(IndexError, "\xff0".encode, "ascii", "test.posreturn")
handler.pos = 0
class D(dict):
def __getitem__(self, key):
raise ValueError
for err in ("strict", "replace", "xmlcharrefreplace",
"backslashreplace", "namereplace", "test.posreturn"):
self.assertRaises(UnicodeError, codecs.charmap_encode, "\xff", err, {0xff: None})
self.assertRaises(ValueError, codecs.charmap_encode, "\xff", err, D())
self.assertRaises(TypeError, codecs.charmap_encode, "\xff", err, {0xff: 300})
def test_translatehelper(self):
# enhance coverage of:
# Objects/unicodeobject.c::unicode_encode_call_errorhandler()
# and callers
# (Unfortunately the errors argument is not directly accessible
# from Python, so we can't test that much)
class D(dict):
def __getitem__(self, key):
raise ValueError
#self.assertRaises(ValueError, "\xff".translate, D())
self.assertRaises(ValueError, "\xff".translate, {0xff: sys.maxunicode+1})
self.assertRaises(TypeError, "\xff".translate, {0xff: ()})
def test_bug828737(self):
charmap = {
ord("&"): "&",
ord("<"): "<",
ord(">"): ">",
ord('"'): """,
}
for n in (1, 10, 100, 1000):
text = 'abc<def>ghi'*n
text.translate(charmap)
def test_mutatingdecodehandler(self):
baddata = [
("ascii", b"\xff"),
("utf-7", b"++"),
("utf-8", b"\xff"),
("utf-16", b"\xff"),
("utf-32", b"\xff"),
("unicode-escape", b"\\u123g"),
("raw-unicode-escape", b"\\u123g"),
("unicode-internal", b"\xff"),
]
def replacing(exc):
if isinstance(exc, UnicodeDecodeError):
exc.object = 42
return ("\u4242", 0)
else:
raise TypeError("don't know how to handle %r" % exc)
codecs.register_error("test.replacing", replacing)
with test.support.check_warnings():
# unicode-internal has been deprecated
for (encoding, data) in baddata:
with self.assertRaises(TypeError):
data.decode(encoding, "test.replacing")
def mutating(exc):
if isinstance(exc, UnicodeDecodeError):
exc.object = b""
return ("\u4242", 0)
else:
raise TypeError("don't know how to handle %r" % exc)
codecs.register_error("test.mutating", mutating)
# If the decoder doesn't pick up the modified input the following
# will lead to an endless loop
with test.support.check_warnings():
# unicode-internal has been deprecated
for (encoding, data) in baddata:
self.assertEqual(data.decode(encoding, "test.mutating"), "\u4242")
# issue32583
def test_crashing_decode_handler(self):
# better generating one more character to fill the extra space slot
# so in debug build it can steadily fail
def forward_shorter_than_end(exc):
if isinstance(exc, UnicodeDecodeError):
# size one character, 0 < forward < exc.end
return ('\ufffd', exc.start+1)
else:
raise TypeError("don't know how to handle %r" % exc)
codecs.register_error(
"test.forward_shorter_than_end", forward_shorter_than_end)
self.assertEqual(
b'\xd8\xd8\xd8\xd8\xd8\x00\x00\x00'.decode(
'utf-16-le', 'test.forward_shorter_than_end'),
'\ufffd\ufffd\ufffd\ufffd\xd8\x00'
)
self.assertEqual(
b'\xd8\xd8\xd8\xd8\x00\xd8\x00\x00'.decode(
'utf-16-be', 'test.forward_shorter_than_end'),
'\ufffd\ufffd\ufffd\ufffd\xd8\x00'
)
self.assertEqual(
b'\x11\x11\x11\x11\x11\x00\x00\x00\x00\x00\x00'.decode(
'utf-32-le', 'test.forward_shorter_than_end'),
'\ufffd\ufffd\ufffd\u1111\x00'
)
self.assertEqual(
b'\x11\x11\x11\x00\x00\x11\x11\x00\x00\x00\x00'.decode(
'utf-32-be', 'test.forward_shorter_than_end'),
'\ufffd\ufffd\ufffd\u1111\x00'
)
def replace_with_long(exc):
if isinstance(exc, UnicodeDecodeError):
exc.object = b"\x00" * 8
return ('\ufffd', exc.start)
else:
raise TypeError("don't know how to handle %r" % exc)
codecs.register_error("test.replace_with_long", replace_with_long)
self.assertEqual(
b'\x00'.decode('utf-16', 'test.replace_with_long'),
'\ufffd\x00\x00\x00\x00'
)
self.assertEqual(
b'\x00'.decode('utf-32', 'test.replace_with_long'),
'\ufffd\x00\x00'
)
def test_fake_error_class(self):
handlers = [
codecs.strict_errors,
codecs.ignore_errors,
codecs.replace_errors,
codecs.backslashreplace_errors,
codecs.namereplace_errors,
codecs.xmlcharrefreplace_errors,
codecs.lookup_error('surrogateescape'),
codecs.lookup_error('surrogatepass'),
]
for cls in UnicodeEncodeError, UnicodeDecodeError, UnicodeTranslateError:
class FakeUnicodeError(str):
__class__ = cls
for handler in handlers:
with self.subTest(handler=handler, error_class=cls):
self.assertRaises(TypeError, handler, FakeUnicodeError())
class FakeUnicodeError(Exception):
__class__ = cls
for handler in handlers:
with self.subTest(handler=handler, error_class=cls):
with self.assertRaises((TypeError, FakeUnicodeError)):
handler(FakeUnicodeError())
if __name__ == "__main__":
unittest.main()
| 38.988444 | 120 | 0.547946 |
d873d81111cc48c1055dc203f6cab674ffb91b88
| 12,417 |
py
|
Python
|
ppcls/arch/backbone/model_zoo/se_resnext.py
|
qili93/PaddleClas
|
21a89ee365613890b601001343a6bef2cbd99c2c
|
[
"Apache-2.0"
] | 2 |
2021-06-22T06:28:20.000Z
|
2021-06-22T06:28:23.000Z
|
ppcls/arch/backbone/model_zoo/se_resnext.py
|
sunjianfengHub/PaddleClas
|
dad9fa8b54da97691d2c7f2b6e0c2b4f077177b7
|
[
"Apache-2.0"
] | null | null | null |
ppcls/arch/backbone/model_zoo/se_resnext.py
|
sunjianfengHub/PaddleClas
|
dad9fa8b54da97691d2c7f2b6e0c2b4f077177b7
|
[
"Apache-2.0"
] | 1 |
2021-06-25T17:50:30.000Z
|
2021-06-25T17:50:30.000Z
|
# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import paddle
from paddle import ParamAttr
import paddle.nn as nn
import paddle.nn.functional as F
from paddle.nn import Conv2D, BatchNorm, Linear, Dropout
from paddle.nn import AdaptiveAvgPool2D, MaxPool2D, AvgPool2D
from paddle.nn.initializer import Uniform
import math
from ppcls.utils.save_load import load_dygraph_pretrain, load_dygraph_pretrain_from_url
MODEL_URLS = {
"SE_ResNeXt50_32x4d": "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/SE_ResNeXt50_32x4d_pretrained.pdparams",
"SE_ResNeXt101_32x4d": "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/SE_ResNeXt101_32x4d_pretrained.pdparams",
"SE_ResNeXt152_64x4d": "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/SE_ResNeXt152_64x4d_pretrained.pdparams",
}
__all__ = list(MODEL_URLS.keys())
class ConvBNLayer(nn.Layer):
def __init__(self,
num_channels,
num_filters,
filter_size,
stride=1,
groups=1,
act=None,
name=None,
data_format='NCHW'):
super(ConvBNLayer, self).__init__()
self._conv = Conv2D(
in_channels=num_channels,
out_channels=num_filters,
kernel_size=filter_size,
stride=stride,
padding=(filter_size - 1) // 2,
groups=groups,
weight_attr=ParamAttr(name=name + "_weights"),
bias_attr=False,
data_format=data_format)
bn_name = name + '_bn'
self._batch_norm = BatchNorm(
num_filters,
act=act,
param_attr=ParamAttr(name=bn_name + '_scale'),
bias_attr=ParamAttr(bn_name + '_offset'),
moving_mean_name=bn_name + '_mean',
moving_variance_name=bn_name + '_variance',
data_layout=data_format)
def forward(self, inputs):
y = self._conv(inputs)
y = self._batch_norm(y)
return y
class BottleneckBlock(nn.Layer):
def __init__(self,
num_channels,
num_filters,
stride,
cardinality,
reduction_ratio,
shortcut=True,
if_first=False,
name=None,
data_format="NCHW"):
super(BottleneckBlock, self).__init__()
self.conv0 = ConvBNLayer(
num_channels=num_channels,
num_filters=num_filters,
filter_size=1,
act='relu',
name='conv' + name + '_x1',
data_format=data_format)
self.conv1 = ConvBNLayer(
num_channels=num_filters,
num_filters=num_filters,
filter_size=3,
groups=cardinality,
stride=stride,
act='relu',
name='conv' + name + '_x2',
data_format=data_format)
self.conv2 = ConvBNLayer(
num_channels=num_filters,
num_filters=num_filters * 2 if cardinality == 32 else num_filters,
filter_size=1,
act=None,
name='conv' + name + '_x3',
data_format=data_format)
self.scale = SELayer(
num_channels=num_filters * 2 if cardinality == 32 else num_filters,
num_filters=num_filters * 2 if cardinality == 32 else num_filters,
reduction_ratio=reduction_ratio,
name='fc' + name,
data_format=data_format)
if not shortcut:
self.short = ConvBNLayer(
num_channels=num_channels,
num_filters=num_filters * 2
if cardinality == 32 else num_filters,
filter_size=1,
stride=stride,
name='conv' + name + '_prj',
data_format=data_format)
self.shortcut = shortcut
def forward(self, inputs):
y = self.conv0(inputs)
conv1 = self.conv1(y)
conv2 = self.conv2(conv1)
scale = self.scale(conv2)
if self.shortcut:
short = inputs
else:
short = self.short(inputs)
y = paddle.add(x=short, y=scale)
y = F.relu(y)
return y
class SELayer(nn.Layer):
def __init__(self, num_channels, num_filters, reduction_ratio, name=None, data_format="NCHW"):
super(SELayer, self).__init__()
self.data_format = data_format
self.pool2d_gap = AdaptiveAvgPool2D(1, data_format=self.data_format)
self._num_channels = num_channels
med_ch = int(num_channels / reduction_ratio)
stdv = 1.0 / math.sqrt(num_channels * 1.0)
self.squeeze = Linear(
num_channels,
med_ch,
weight_attr=ParamAttr(
initializer=Uniform(-stdv, stdv), name=name + "_sqz_weights"),
bias_attr=ParamAttr(name=name + '_sqz_offset'))
self.relu = nn.ReLU()
stdv = 1.0 / math.sqrt(med_ch * 1.0)
self.excitation = Linear(
med_ch,
num_filters,
weight_attr=ParamAttr(
initializer=Uniform(-stdv, stdv), name=name + "_exc_weights"),
bias_attr=ParamAttr(name=name + '_exc_offset'))
self.sigmoid = nn.Sigmoid()
def forward(self, input):
pool = self.pool2d_gap(input)
if self.data_format == "NHWC":
pool = paddle.squeeze(pool, axis=[1, 2])
else:
pool = paddle.squeeze(pool, axis=[2, 3])
squeeze = self.squeeze(pool)
squeeze = self.relu(squeeze)
excitation = self.excitation(squeeze)
excitation = self.sigmoid(excitation)
if self.data_format == "NHWC":
excitation = paddle.unsqueeze(excitation, axis=[1, 2])
else:
excitation = paddle.unsqueeze(excitation, axis=[2, 3])
out = input * excitation
return out
class ResNeXt(nn.Layer):
def __init__(self, layers=50, class_dim=1000, cardinality=32, input_image_channel=3, data_format="NCHW"):
super(ResNeXt, self).__init__()
self.layers = layers
self.cardinality = cardinality
self.reduction_ratio = 16
self.data_format = data_format
self.input_image_channel = input_image_channel
supported_layers = [50, 101, 152]
assert layers in supported_layers, \
"supported layers are {} but input layer is {}".format(
supported_layers, layers)
supported_cardinality = [32, 64]
assert cardinality in supported_cardinality, \
"supported cardinality is {} but input cardinality is {}" \
.format(supported_cardinality, cardinality)
if layers == 50:
depth = [3, 4, 6, 3]
elif layers == 101:
depth = [3, 4, 23, 3]
elif layers == 152:
depth = [3, 8, 36, 3]
num_channels = [64, 256, 512, 1024]
num_filters = [128, 256, 512,
1024] if cardinality == 32 else [256, 512, 1024, 2048]
if layers < 152:
self.conv = ConvBNLayer(
num_channels=self.input_image_channel,
num_filters=64,
filter_size=7,
stride=2,
act='relu',
name="conv1",
data_format=self.data_format)
else:
self.conv1_1 = ConvBNLayer(
num_channels=self.input_image_channel,
num_filters=64,
filter_size=3,
stride=2,
act='relu',
name="conv1",
data_format=self.data_format)
self.conv1_2 = ConvBNLayer(
num_channels=64,
num_filters=64,
filter_size=3,
stride=1,
act='relu',
name="conv2",
data_format=self.data_format)
self.conv1_3 = ConvBNLayer(
num_channels=64,
num_filters=128,
filter_size=3,
stride=1,
act='relu',
name="conv3",
data_format=self.data_format)
self.pool2d_max = MaxPool2D(kernel_size=3, stride=2, padding=1, data_format=self.data_format)
self.block_list = []
n = 1 if layers == 50 or layers == 101 else 3
for block in range(len(depth)):
n += 1
shortcut = False
for i in range(depth[block]):
bottleneck_block = self.add_sublayer(
'bb_%d_%d' % (block, i),
BottleneckBlock(
num_channels=num_channels[block] if i == 0 else
num_filters[block] * int(64 // self.cardinality),
num_filters=num_filters[block],
stride=2 if i == 0 and block != 0 else 1,
cardinality=self.cardinality,
reduction_ratio=self.reduction_ratio,
shortcut=shortcut,
if_first=block == 0,
name=str(n) + '_' + str(i + 1),
data_format=self.data_format))
self.block_list.append(bottleneck_block)
shortcut = True
self.pool2d_avg = AdaptiveAvgPool2D(1, data_format=self.data_format)
self.pool2d_avg_channels = num_channels[-1] * 2
stdv = 1.0 / math.sqrt(self.pool2d_avg_channels * 1.0)
self.out = Linear(
self.pool2d_avg_channels,
class_dim,
weight_attr=ParamAttr(
initializer=Uniform(-stdv, stdv), name="fc6_weights"),
bias_attr=ParamAttr(name="fc6_offset"))
def forward(self, inputs):
with paddle.static.amp.fp16_guard():
if self.data_format == "NHWC":
inputs = paddle.tensor.transpose(inputs, [0, 2, 3, 1])
inputs.stop_gradient = True
if self.layers < 152:
y = self.conv(inputs)
else:
y = self.conv1_1(inputs)
y = self.conv1_2(y)
y = self.conv1_3(y)
y = self.pool2d_max(y)
for i, block in enumerate(self.block_list):
y = block(y)
y = self.pool2d_avg(y)
y = paddle.reshape(y, shape=[-1, self.pool2d_avg_channels])
y = self.out(y)
return y
def _load_pretrained(pretrained, model, model_url, use_ssld=False):
if pretrained is False:
pass
elif pretrained is True:
load_dygraph_pretrain_from_url(model, model_url, use_ssld=use_ssld)
elif isinstance(pretrained, str):
load_dygraph_pretrain(model, pretrained)
else:
raise RuntimeError(
"pretrained type is not available. Please use `string` or `boolean` type."
)
def SE_ResNeXt50_32x4d(pretrained=False, use_ssld=False, **kwargs):
model = ResNeXt(layers=50, cardinality=32, **kwargs)
_load_pretrained(pretrained, model, MODEL_URLS["SE_ResNeXt50_32x4d"], use_ssld=use_ssld)
return model
def SE_ResNeXt101_32x4d(pretrained=False, use_ssld=False, **kwargs):
model = ResNeXt(layers=101, cardinality=32, **kwargs)
_load_pretrained(pretrained, model, MODEL_URLS["SE_ResNeXt101_32x4d"], use_ssld=use_ssld)
return model
def SE_ResNeXt152_64x4d(pretrained=False, use_ssld=False, **kwargs):
model = ResNeXt(layers=152, cardinality=64, **kwargs)
_load_pretrained(pretrained, model, MODEL_URLS["SE_ResNeXt152_64x4d"], use_ssld=use_ssld)
return model
| 36.201166 | 137 | 0.575099 |
340d5b281329c2be502e47998df22bfc7c3ebc54
| 9,652 |
py
|
Python
|
core/models/base_models/resnetv1b.py
|
achaiah/awesome-semantic-segmentation-pytorch
|
4f945a1989ae8b1bb6b24f1214fa84a7ca8c8e07
|
[
"Apache-2.0"
] | 1 |
2019-09-09T16:58:48.000Z
|
2019-09-09T16:58:48.000Z
|
core/models/base_models/resnetv1b.py
|
achaiah/awesome-semantic-segmentation-pytorch
|
4f945a1989ae8b1bb6b24f1214fa84a7ca8c8e07
|
[
"Apache-2.0"
] | null | null | null |
core/models/base_models/resnetv1b.py
|
achaiah/awesome-semantic-segmentation-pytorch
|
4f945a1989ae8b1bb6b24f1214fa84a7ca8c8e07
|
[
"Apache-2.0"
] | 1 |
2019-12-04T03:06:07.000Z
|
2019-12-04T03:06:07.000Z
|
import torch
import torch.nn as nn
import torch.utils.model_zoo as model_zoo
__all__ = ['ResNetV1b', 'resnet18_v1b', 'resnet34_v1b', 'resnet50_v1b',
'resnet101_v1b', 'resnet152_v1b', 'resnet152_v1s', 'resnet101_v1s', 'resnet50_v1s']
model_urls = {
'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',
'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',
'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',
'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',
'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',
}
class BasicBlockV1b(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, dilation=1, downsample=None,
previous_dilation=1, norm_layer=nn.BatchNorm2d):
super(BasicBlockV1b, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, 3, stride,
dilation, dilation, bias=False)
self.bn1 = norm_layer(planes)
self.relu = nn.ReLU(True)
self.conv2 = nn.Conv2d(planes, planes, 3, 1, previous_dilation,
dilation=previous_dilation, bias=False)
self.bn2 = norm_layer(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class BottleneckV1b(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, dilation=1, downsample=None,
previous_dilation=1, norm_layer=nn.BatchNorm2d):
super(BottleneckV1b, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, 1, bias=False)
self.bn1 = norm_layer(planes)
self.conv2 = nn.Conv2d(planes, planes, 3, stride,
dilation, dilation, bias=False)
self.bn2 = norm_layer(planes)
self.conv3 = nn.Conv2d(planes, planes * self.expansion, 1, bias=False)
self.bn3 = norm_layer(planes * self.expansion)
self.relu = nn.ReLU(True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class ResNetV1b(nn.Module):
def __init__(self, block, layers, num_classes=1000, dilated=True, deep_stem=False,
zero_init_residual=False, norm_layer=nn.BatchNorm2d):
self.inplanes = 128 if deep_stem else 64
super(ResNetV1b, self).__init__()
if deep_stem:
self.conv1 = nn.Sequential(
nn.Conv2d(3, 64, 3, 2, 1, bias=False),
norm_layer(64),
nn.ReLU(True),
nn.Conv2d(64, 64, 3, 1, 1, bias=False),
norm_layer(64),
nn.ReLU(True),
nn.Conv2d(64, 128, 3, 1, 1, bias=False)
)
else:
self.conv1 = nn.Conv2d(3, 64, 7, 2, 3, bias=False)
self.bn1 = norm_layer(self.inplanes)
self.relu = nn.ReLU(True)
self.maxpool = nn.MaxPool2d(3, 2, 1)
self.layer1 = self._make_layer(block, 64, layers[0], norm_layer=norm_layer)
self.layer2 = self._make_layer(block, 128, layers[1], stride=2, norm_layer=norm_layer)
if dilated:
self.layer3 = self._make_layer(block, 256, layers[2], stride=1, dilation=2, norm_layer=norm_layer)
self.layer4 = self._make_layer(block, 512, layers[3], stride=1, dilation=4, norm_layer=norm_layer)
else:
self.layer3 = self._make_layer(block, 256, layers[2], stride=2, norm_layer=norm_layer)
self.layer4 = self._make_layer(block, 512, layers[3], stride=2, norm_layer=norm_layer)
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.fc = nn.Linear(512 * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
if zero_init_residual:
for m in self.modules():
if isinstance(m, BottleneckV1b):
nn.init.constant_(m.bn3.weight, 0)
elif isinstance(m, BasicBlockV1b):
nn.init.constant_(m.bn2.weight, 0)
def _make_layer(self, block, planes, blocks, stride=1, dilation=1, norm_layer=nn.BatchNorm2d):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion, 1, stride, bias=False),
norm_layer(planes * block.expansion),
)
layers = []
if dilation in (1, 2):
layers.append(block(self.inplanes, planes, stride, dilation=1,
downsample=downsample, previous_dilation=dilation))
elif dilation == 4:
layers.append(block(self.inplanes, planes, stride, dilation=2,
downsample=downsample, previous_dilation=dilation))
else:
raise RuntimeError("=> unknown dilation size: {}".format(dilation))
self.inplanes = planes * block.expansion
for _ in range(1, blocks):
layers.append(block(self.inplanes, planes, dilation=dilation, previous_dilation=dilation))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
def resnet18_v1b(pretrained=False, **kwargs):
model = ResNetV1b(BasicBlockV1b, [2, 2, 2, 2], **kwargs)
if pretrained:
old_dict = model_zoo.load_url(model_urls['resnet18'])
model_dict = model.state_dict()
old_dict = {k: v for k, v in old_dict.items() if (k in model_dict)}
model_dict.update(old_dict)
model.load_state_dict(model_dict)
return model
def resnet34_v1b(pretrained=False, **kwargs):
model = ResNetV1b(BasicBlockV1b, [3, 4, 6, 3], **kwargs)
if pretrained:
old_dict = model_zoo.load_url(model_urls['resnet34'])
model_dict = model.state_dict()
old_dict = {k: v for k, v in old_dict.items() if (k in model_dict)}
model_dict.update(old_dict)
model.load_state_dict(model_dict)
return model
def resnet50_v1b(pretrained=False, **kwargs):
model = ResNetV1b(BottleneckV1b, [3, 4, 6, 3], **kwargs)
if pretrained:
old_dict = model_zoo.load_url(model_urls['resnet50'])
model_dict = model.state_dict()
old_dict = {k: v for k, v in old_dict.items() if (k in model_dict)}
model_dict.update(old_dict)
model.load_state_dict(model_dict)
return model
def resnet101_v1b(pretrained=False, **kwargs):
model = ResNetV1b(BottleneckV1b, [3, 4, 23, 3], **kwargs)
if pretrained:
old_dict = model_zoo.load_url(model_urls['resnet101'])
model_dict = model.state_dict()
old_dict = {k: v for k, v in old_dict.items() if (k in model_dict)}
model_dict.update(old_dict)
model.load_state_dict(model_dict)
return model
def resnet152_v1b(pretrained=False, **kwargs):
model = ResNetV1b(BottleneckV1b, [3, 8, 36, 3], **kwargs)
if pretrained:
old_dict = model_zoo.load_url(model_urls['resnet152'])
model_dict = model.state_dict()
old_dict = {k: v for k, v in old_dict.items() if (k in model_dict)}
model_dict.update(old_dict)
model.load_state_dict(model_dict)
return model
def resnet50_v1s(pretrained=False, root='~/.torch/models', **kwargs):
model = ResNetV1b(BottleneckV1b, [3, 4, 6, 3], deep_stem=True, **kwargs)
if pretrained:
from ..model_store import get_resnet_file
model.load_state_dict(torch.load(get_resnet_file('resnet50', root=root)), strict=False)
return model
def resnet101_v1s(pretrained=False, root='~/.torch/models', **kwargs):
model = ResNetV1b(BottleneckV1b, [3, 4, 23, 3], deep_stem=True, **kwargs)
if pretrained:
from ..model_store import get_resnet_file
model.load_state_dict(torch.load(get_resnet_file('resnet101', root=root)), strict=False)
return model
def resnet152_v1s(pretrained=False, root='~/.torch/models', **kwargs):
model = ResNetV1b(BottleneckV1b, [3, 8, 36, 3], deep_stem=True, **kwargs)
if pretrained:
from ..model_store import get_resnet_file
model.load_state_dict(torch.load(get_resnet_file('resnet152', root=root)), strict=False)
return model
if __name__ == '__main__':
import torch
img = torch.randn(4, 3, 224, 224)
model = resnet50_v1b(True)
output = model(img)
| 36.560606 | 110 | 0.612516 |
fe536215881302676ce4056998bfa8b5e33f225b
| 6,145 |
py
|
Python
|
common/hobbies.py
|
oserikov/dream
|
109ba2df799025dcdada1fddbb7380e1c03100eb
|
[
"Apache-2.0"
] | 34 |
2021-08-18T14:51:44.000Z
|
2022-03-10T14:14:48.000Z
|
common/hobbies.py
|
oserikov/dream
|
109ba2df799025dcdada1fddbb7380e1c03100eb
|
[
"Apache-2.0"
] | 27 |
2021-08-30T14:42:09.000Z
|
2022-03-17T22:11:45.000Z
|
common/hobbies.py
|
oserikov/dream
|
109ba2df799025dcdada1fddbb7380e1c03100eb
|
[
"Apache-2.0"
] | 40 |
2021-08-22T07:13:32.000Z
|
2022-03-29T11:45:32.000Z
|
import re
HOBBIES_RE = re.compile(
r"\b(Acroyoga|Acting|Aerial silk|Airbrushing|Amateur radio"
"|Animation|Aquascaping|Art|Astrology|Babysitting|Baking"
"|Basketball|Baton twirling|Beatboxing|Beer sommelier"
"|Beer tasting|Binge-watching|Blogging|Board game|Board games"
"|Book discussion club|Book restoration|Bowling|Brazilian jiu-jitsu|"
"Breadmaking|Building|Bullet Journal|Bullet journaling|Calligraphy"
"|Candle making|Candy making|Car|Car fixing|Card games|Cardistry"
"|Ceramics|Online chat|Chat|Cheesemaking|Chess|Cleaning|Clothing"
"|Clothesmaking|Home roasting coffee|Coffee roasting|Collecting"
"|Coloring book|Coloring|Communication|Community activism|Computer programming"
"|Confectionery|Construction|Cooking|Cosplaying|Couch surfing|Couponing|Craft"
"|Creative writing|Crocheting|Cross-stitch|Crossword puzzles|Cryptography"
"|Cue sports|Dance|Decorative arts|Decorating|Digital art|Dining|Diorama"
"|Distro-hopping|Distro Hopping|Diving|Djembe|Disc-jockey|DJing|Do it yourself"
"|Drama|Drawing|Mixed drink|Drink mixing|Drinking|Electronic games|Electronics"
"|Embroidery|Engraving|Entertaining|Experimenting|Fantasy sport|Fashion"
"|Fashion design|Feng shui|Feng shui decorating|Filmmaking|Fingerpainting"
"|Fishfarming|Fishkeeping|Flower arranging|Fly tying|Second-language acquisition"
"|Foreign language learning|Furniture|Furniture building|Games|Gaming|Genealogy"
"|Gingerbread house|Gingerbread house making|Giving advice|Glassblowing|Gardening"
"|Gongfu tea|Graphic design|Gunsmithing|Gymnastics|Hacker|Hacking|Computer hardware"
"|Hardware|Herpetoculture|Herp keeping|Home improvement|Homebrewing|Houseplant care"
"|Hula hooping|Humor|Hydroponics|Ice skating|Invention|Inventing|Jewelry making"
"|Jigsaw puzzle|Diary|Journaling|Juggling|Karaoke|Karate|Kendama|Knife making|Knitting"
"|Knot tying|Kombucha|Kombucha brewing|Kung fu|Lace making|Lapidary|Leather crafting"
"|Lego|Lego building|Livestreaming|Music|Listening to music|Podcasts"
"|Listening to podcasts|Lock picking|Machining|Macrame|Magic (illusion)|Magic"
"|Makeup|Massaging|Maze|Mechanics|Meditation|Memory training|Metalworking"
"|Miniature art|Simple living|Minimalism|Model building|Model engineering"
"|Music|Nail art|Needlepoint|Origami|Painting|Palmistry|Performance|Pet|"
"Pet adoption|Pet fostering|Pet sitting|Philately|Photography|Pilates|Planning"
"|Plastic art|Music|Playing musical instruments|Poetry|Poi|Pole dancing|Postcrossing"
"|Pottery|Powerlifting|Practical jokes|Pressed flower craft|Proofreading|Proofreading"
"|Proverbs|Public speaking|Puppetry|Puzzle|Pyrography|Quilling|Quilting|Quizzes"
"|Radio-controlled model|Radio-controlled model playing|Rail transport modeling|Rapping"
"|Reading|Recipe|Recipe creation|Refinishing|Reiki|Gadget|Reviewing Gadgets|Robot combat|"
"Rubik|Scrapbooking|Scuba Diving|Sculpting|Sewing|Shoemaking|Singing|Sketch"
"|Skipping rope|Slot car|Soapmaking|Social media|Spreadsheets|Stamp collecting"
"|Stand-up comedy|Storytelling|Striptease|Stripping|Sudoku|Table tennis"
"|Talking|Tapestry|Tarot|Tatebanko|Tattoo|Tattooing|Taxidermy|joke|Telling jokes"
"|Charity shop|Thrifting|Upcycling|Video editing|Video game development"
"|Video game developing|Video gaming|Videography|Video making|Virtual Reality"
"|VR Gaming|Wargaming|Watchmaker|Watch making|Documentary film|Watching documentaries"
"|Movies|Watching movies|Television program|Watching television|Sealing wax|Wax sealing"
"|Waxing|Weaving|Webtoon|Weight training|Welding|Whisky|Whittling|Wikipedia editing"
"|Wine tasting|Winemaking|Witchcraft|Wood carving|Woodworking|Word searches"
"|Worldbuilding|Writing|Musical composition|Writing music|Yo-yoing|Yoga|Zumba"
"|Air sports|Airsoft|Amateur geology|Amusement park|Amusement park visiting|Archery"
"|Auto detailing|Automobilism|Astronomy|Backpacking|Badminton|BASE jumping|Baseball"
"|Basketball|Beachcombing|Beekeeping|Birdwatching|Blacksmithing|BMX|Board sports"
"|Bodybuilding|Bonsai|Butterfly watching|Bus|Bus riding|Camping|Canoeing|Canyoning"
"|Car|Car riding|Car tuning|Caving|City tourism|City trip|Climbing|Composting"
"|Cycling|Dandy|Dandyism|Dog training|Dog walking|Dowsing|Driving|Farming"
"|Fishing|Flag football|Flower|Flower growing|Aviation|Flying|Flying disc"
"|Model aircraft|Flying model planes|Foraging|Fossicking|Freestyle football"
"|Fruit picking|Gardening|Geocaching|Ghost hunting|Gold prospecting|Graffiti"
"|Groundhopping|Guerrilla gardening|Handball|Herbalism|Herping|High-power rocketry"
"|Hiking|Hobby horse|Hobby horsing|Hobby tunneling|Hooping|Horseback riding"
"|Hunting|Inline skating|Jogging|Jumping rope|Karting|Kayaking|Kite|Kite flying"
"|Kitesurfing|Lacrosse|LARPing|Letterboxing|Lomography|Longboarding|Martial arts"
"|Metal detector|Metal detecting|Motorcycling|Meteorology|Motor sports|Mountain biking"
"|Mountaineering|Museum|Museum visiting|Mushroom hunting|Netball|Noodling"
"|Nordic skating|Orienteering|Paintball|Paragliding|Parkour|Photography|Picnicking"
"|Podcast|Podcast hosting|Polo|Public transport|Public transport riding|Qigong"
"|Radio-controlled model|Radio-controlled model playing|Rafting|Railway|Railway journeys"
"|Rappelling|Renaissance fair|Renovating|Road Cycling|Road biking|Rock climbing"
"|Rock art|Rock painting|Roller skating|Rugby football|Rugby|Running|Sailing"
"|Sand art|Scouting|Scuba diving|Sculling|Shooting|Shopping|Shuffleboard"
"|Skateboarding|Skiing|Skimboarding|Skydiving|Slacklining|sled|Sledding|Snorkeling"
"|Snowboarding|Snowmobiling|Snowshoeing|Soccer|Stone skipping|Storm chasing|Sun bathing"
"|Surfing|Survivalism|Human swimming|Swimming|Taekwondo|Tai chi|Tennis"
"|Thru-hiking|Topiary|Tourism|Trade fair|Trade fair visiting|Travel|Unicycle"
"|Unicycling|Urban exploration|Vacation|Vegetable farming|Vehicle restoration"
"|Videography|Volunteering|Walking|Water sports|Zoo|Zoo visiting)\b",
re.IGNORECASE,
)
| 76.8125 | 94 | 0.799512 |
8626dded68250016d34f68569199572890b81bb5
| 1,056 |
py
|
Python
|
Course_1/Week_4/list_comprehensions.py
|
internetworksio/Google-ITAutomation-Python
|
6027750a33e8df883d762223bb0c4a5a95395bc0
|
[
"MIT"
] | 2 |
2021-03-23T16:02:32.000Z
|
2022-03-13T09:32:56.000Z
|
Course_1/Week_4/list_comprehensions.py
|
internetworksio/Google-ITAutomation-Python
|
6027750a33e8df883d762223bb0c4a5a95395bc0
|
[
"MIT"
] | null | null | null |
Course_1/Week_4/list_comprehensions.py
|
internetworksio/Google-ITAutomation-Python
|
6027750a33e8df883d762223bb0c4a5a95395bc0
|
[
"MIT"
] | 7 |
2021-01-14T05:39:54.000Z
|
2022-03-13T09:33:01.000Z
|
"""
This script is used for course notes.
Author: Erick Marin
Date: 10/20/2020
"""
multiples = []
for x in range(1, 11):
multiples.append(x*7)
print(multiples)
# List comprehension method
# Let us create new lists based on sequences or ranges
multiples = [x * 7 for x in range(1, 11)]
print(multiples)
languages = ["Python", "Perl", "Ruby", "Go", "Java", "C"]
lengths = [len(language) for language in languages]
print(lengths)
z = [x for x in range(0, 101) if x % 3 == 0]
print(z)
# The odd_numbers function returns a list of odd numbers between 1 and n,
# inclusively. Fill in the blanks in the function, using list comprehension.
# Hint: remember that list and range counters start at 0 and end at the limit
# minus 1.
def odd_numbers(n):
return [x for x in range(0, n + 1) if x % 2 != 0]
print(odd_numbers(5)) # Should print [1, 3, 5]
print(odd_numbers(10)) # Should print [1, 3, 5, 7, 9]
print(odd_numbers(11)) # Should print [1, 3, 5, 7, 9, 11]
print(odd_numbers(1)) # Should print [1]
print(odd_numbers(-1)) # Should print []
| 25.756098 | 77 | 0.673295 |
1dc9c5cba0062cf70ffb672c5ade6c7c7b3b8f35
| 1,721 |
py
|
Python
|
qr.py
|
chenyuyou/qr_website
|
7124013089591217e2b14a3b239656bbe5cd1254
|
[
"MIT"
] | null | null | null |
qr.py
|
chenyuyou/qr_website
|
7124013089591217e2b14a3b239656bbe5cd1254
|
[
"MIT"
] | null | null | null |
qr.py
|
chenyuyou/qr_website
|
7124013089591217e2b14a3b239656bbe5cd1254
|
[
"MIT"
] | null | null | null |
import qrcode
from MyQR import myqr
from PIL import Image
def QR_With_Central_Img(link="http://192.168.50.126:8000", central_picture="BackgroudIMG.png", output_file="output_code.png"):
qr = qrcode.QRCode(version=1, error_correction=qrcode.constants.ERROR_CORRECT_H, box_size=10, border=2)
content = link
qr.add_data(content)
qr.make(fit=True)
img=qr.make_image()
img=img.convert("RGBA")
icon = Image.open(central_picture)
img_w, img_h = img.size
factor = 4
size_w = int(img_w / factor)
size_h = int(img_h / factor)
icon_w, icon_h = icon.size
if icon_w > size_w:
icon_w = size_w
if icon_h > size_h:
icon_h = size_h
icon = icon.resize((icon_w, icon_h), Image.ANTIALIAS)
w = int((img_w - icon_w)/2)
h = int((img_h - icon_h)/2)
icon = icon.convert("RGBA")
img.paste(icon, (w,h), icon)
img.save(output_file)
def QR_With_FullBackgroud_Img(link="http://192.168.50.126:8000", backgroud_picture="BackgroudIMG.png", output_file="output_code.png"):
myqr.run(
words = link,
version=1,
level="H",
picture=backgroud_picture,
colorized=True,
contrast=1.0,
brightness=1.0,
save_name=output_file
)
def QR_Single_Code(link="http://192.168.50.126:8000", output_file="output_code.png"):
qr = qrcode.QRCode(version=1, box_size=10, border=2)
content = link
qr.add_data(content)
qr.make(fit=True)
img=qr.make_image()
img.save(output_file)
if __name__ == '__main__':
# QR_Single_Code(link="http://192.168.50.126:8000",output_file="output_file.png")
# QR_With_Central_Img(link="http://192.168.50.126:8000", central_picture="1.jpg", output_file="output_file.png")
QR_With_FullBackgroud_Img(link="http://192.168.50.126:8000", backgroud_picture="1.jpg", output_file="output_file.png")
| 27.758065 | 134 | 0.727484 |
9f4c443cbb65e4e69d441e00dc0026e3ac82ea9b
| 1,490 |
py
|
Python
|
isurveyAPI.py
|
kalcanfor/chatbot-iesb
|
270bd635e8c4f651e8d1593a368bcb13825de53a
|
[
"MIT"
] | null | null | null |
isurveyAPI.py
|
kalcanfor/chatbot-iesb
|
270bd635e8c4f651e8d1593a368bcb13825de53a
|
[
"MIT"
] | null | null | null |
isurveyAPI.py
|
kalcanfor/chatbot-iesb
|
270bd635e8c4f651e8d1593a368bcb13825de53a
|
[
"MIT"
] | 1 |
2021-11-15T18:41:32.000Z
|
2021-11-15T18:41:32.000Z
|
import requests
import json
import pandas as pd
from unicodedata import normalize
import base64
class ApiISurvey:
def __init__(self, username, password):
self.urltoken = "http://portalh.cgee.org.br:6443/cas/v1/tickets?service=https://isurvey.cgee.org.br/iSurveyConfigurador/"
self.payload = {
"username": username,
"password": base64.b64decode(password),
"token": True
}
self.headers = {
"Content-Type": "application/x-www-form-urlencoded"
}
self.headersAuth = ""
def Authenticate(self):
result = False
response = requests.request("POST", self.urltoken, headers=self.headers, data=self.payload, verify=False)
if(response.status_code==201):
self.headersAuth = {
"X-Access-Token": response.text
}
else:
print(response)
return(self.headersAuth)
def get_pessoa_by_email(self, email):
endpoint_buscaPessoa = "https://isurvey.cgee.org.br/iSurveyConfApi/api/configurador/v1/secure/buscaPessoa?page=0&pageSize=10&filter="+str(email)
result = requests.request("GET", endpoint_buscaPessoa, headers=self.headersAuth, verify=False)
if(result.status_code == 200):
data = result.json()
else:
print("Response: ", result.status_code)
print("Error: ",result.text)
data = []
return(data)
| 35.47619 | 152 | 0.604027 |
b7b7133065972ee3fab2e7c9830d780f745a01d3
| 2,088 |
py
|
Python
|
collections/ansible_collections/ansible/tower/plugins/modules/tower_license.py
|
hindman-redhat/automated-smart-management-2
|
5450ccd71f2a4ba568a7f11b03466e1554ae0087
|
[
"MIT"
] | null | null | null |
collections/ansible_collections/ansible/tower/plugins/modules/tower_license.py
|
hindman-redhat/automated-smart-management-2
|
5450ccd71f2a4ba568a7f11b03466e1554ae0087
|
[
"MIT"
] | null | null | null |
collections/ansible_collections/ansible/tower/plugins/modules/tower_license.py
|
hindman-redhat/automated-smart-management-2
|
5450ccd71f2a4ba568a7f11b03466e1554ae0087
|
[
"MIT"
] | 2 |
2021-03-30T14:26:02.000Z
|
2021-04-01T18:17:29.000Z
|
#!/usr/bin/python
# coding: utf-8 -*-
# (c) 2019, John Westcott IV <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: tower_license
author: "John Westcott IV (@john-westcott-iv)"
short_description: Set the license for Ansible Tower
description:
- Get or Set Ansible Tower license. See
U(https://www.ansible.com/tower) for an overview.
options:
manifest:
description:
- file path to a Red Hat subscription manifest (a .zip file)
required: True
type: str
eula_accepted:
description:
- Whether or not the EULA is accepted.
required: True
type: bool
extends_documentation_fragment: ansible.tower.auth
'''
RETURN = ''' # '''
EXAMPLES = '''
- name: Set the license using a file
tower_license:
manifest: "/tmp/my_manifest.zip"
eula_accepted: True
'''
import base64
from ..module_utils.tower_api import TowerAPIModule
def main():
module = TowerAPIModule(
argument_spec=dict(
manifest=dict(type='str', required=True),
eula_accepted=dict(type='bool', required=True),
),
)
json_output = {'changed': True}
if not module.params.get('eula_accepted'):
module.fail_json(msg='You must accept the EULA by passing in the param eula_accepted as True')
try:
manifest = base64.b64encode(
open(module.params.get('manifest'), 'rb').read()
)
except OSError as e:
module.fail_json(msg=str(e))
# Deal with check mode
if module.check_mode:
module.exit_json(**json_output)
module.post_endpoint('config', data={
'eula_accepted': True,
'manifest': manifest.decode()
})
module.exit_json(**json_output)
if __name__ == '__main__':
main()
| 24.564706 | 102 | 0.64272 |
30ebe58e0893fdac61603d5336538939297690f5
| 209 |
py
|
Python
|
train/siamese/utilities/__init__.py
|
openem-team/openem
|
45222c9c77084eacab278da25a8734ae7d43f677
|
[
"MIT"
] | 10 |
2019-01-23T23:58:01.000Z
|
2021-08-30T19:42:35.000Z
|
train/siamese/utilities/__init__.py
|
openem-team/openem
|
45222c9c77084eacab278da25a8734ae7d43f677
|
[
"MIT"
] | 3 |
2020-03-20T15:21:41.000Z
|
2020-09-18T18:49:38.000Z
|
train/siamese/utilities/__init__.py
|
openem-team/openem
|
45222c9c77084eacab278da25a8734ae7d43f677
|
[
"MIT"
] | 2 |
2020-05-08T17:39:12.000Z
|
2020-10-09T01:27:17.000Z
|
from utilities.utilities import *
from utilities.track_data import *
from utilities.class_data import *
from utilities.count_data import *
from utilities.model_data import *
from utilities.detid_data import *
| 29.857143 | 34 | 0.827751 |
db0df3c0f0e8081463caf2869cdb2901b289fdb7
| 1,042 |
py
|
Python
|
logtools/__init__.py
|
adamhadani/logtools
|
0b47662179b6d29be5629702e1eaddefb2b281fb
|
[
"Apache-2.0"
] | 100 |
2015-01-02T18:00:18.000Z
|
2022-01-12T09:39:24.000Z
|
logtools/__init__.py
|
adamhadani/logtools
|
0b47662179b6d29be5629702e1eaddefb2b281fb
|
[
"Apache-2.0"
] | 2 |
2015-09-16T22:08:22.000Z
|
2021-06-12T20:46:05.000Z
|
logtools/__init__.py
|
adamhadani/logtools
|
0b47662179b6d29be5629702e1eaddefb2b281fb
|
[
"Apache-2.0"
] | 25 |
2015-01-02T18:00:21.000Z
|
2022-03-09T05:47:11.000Z
|
#!/usr/bin/env python
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
logging.basicConfig(
level = logging.INFO,
format = "%(asctime)s - %(name)s - %(levelname)s - %(message)s"
)
from _config import *
from _filterbots import *
from _flattenjson import *
from _geoip import *
from _join import *
from _merge import *
from _parse import *
from _urlparse import *
from _plot import *
from _qps import *
from _sample import *
from _filter import *
from _tail import *
from _sumstat import *
from _serve import *
| 27.421053 | 75 | 0.738004 |
f58cf320454e1f144f7f53bb89a10b2316fceec9
| 1,852 |
py
|
Python
|
configs/ocrnet/fcn_hr48_512x1024_40k_b16_rmi_cityscapes.py
|
openseg-group/mmsegmentation
|
23939f09d2b0bd30fc26eb7f8af974f1f5441210
|
[
"Apache-2.0"
] | 2 |
2020-07-10T12:13:56.000Z
|
2020-11-09T07:09:29.000Z
|
configs/ocrnet/fcn_hr48_512x1024_40k_b16_rmi_cityscapes.py
|
openseg-group/mmsegmentation
|
23939f09d2b0bd30fc26eb7f8af974f1f5441210
|
[
"Apache-2.0"
] | null | null | null |
configs/ocrnet/fcn_hr48_512x1024_40k_b16_rmi_cityscapes.py
|
openseg-group/mmsegmentation
|
23939f09d2b0bd30fc26eb7f8af974f1f5441210
|
[
"Apache-2.0"
] | 2 |
2020-07-28T09:12:55.000Z
|
2021-01-04T07:49:59.000Z
|
_base_ = [
'../_base_/datasets/cityscapes.py',
'../_base_/default_runtime.py',
'../_base_/schedules/schedule_40k.py'
]
norm_cfg = dict(type='SyncBN', requires_grad=True)
model = dict(
type='EncoderDecoder',
pretrained='open-mmlab://msra/hrnetv2_w18',
backbone=dict(
type='HRNet',
norm_cfg=norm_cfg,
norm_eval=False,
extra=dict(
stage1=dict(
num_modules=1,
num_branches=1,
block='BOTTLENECK',
num_blocks=(4, ),
num_channels=(64, )),
stage2=dict(
num_modules=1,
num_branches=2,
block='BASIC',
num_blocks=(4, 4),
num_channels=(48, 96)),
stage3=dict(
num_modules=4,
num_branches=3,
block='BASIC',
num_blocks=(4, 4, 4),
num_channels=(48, 96, 192)),
stage4=dict(
num_modules=3,
num_branches=4,
block='BASIC',
num_blocks=(4, 4, 4, 4),
num_channels=(48, 96, 192, 384)))),
decode_head=dict(
type='FCNHead',
in_channels=[48, 96, 192, 384],
channels=sum([48, 96, 192, 384]),
in_index=(0, 1, 2, 3),
input_transform='resize_concat',
kernel_size=1,
num_convs=1,
concat_input=False,
dropout_ratio=-1,
num_classes=19,
norm_cfg=norm_cfg,
align_corners=True,
loss_decode=dict(
type='RMILoss', num_classes=19, loss_weight=1.0))
)
optimizer = dict(lr=0.02)
lr_config = dict(min_lr=2e-4)
data = dict(samples_per_gpu=2, workers_per_gpu=2)
# model training and testing settings
train_cfg = dict()
test_cfg = dict(mode='whole')
| 29.870968 | 61 | 0.515119 |
de1d3ef7f7f54d86a070275471fe7d1638450030
| 10,099 |
py
|
Python
|
management/views.py
|
eark-project/access_dipcreator
|
b1f9034d309dfdb676cbba92c9c4722bf576aba2
|
[
"MIT"
] | null | null | null |
management/views.py
|
eark-project/access_dipcreator
|
b1f9034d309dfdb676cbba92c9c4722bf576aba2
|
[
"MIT"
] | null | null | null |
management/views.py
|
eark-project/access_dipcreator
|
b1f9034d309dfdb676cbba92c9c4722bf576aba2
|
[
"MIT"
] | null | null | null |
import json
import os
from json import JSONDecodeError
from django.template import loader
from django.views.generic.detail import DetailView
from django.utils.decorators import method_decorator
from django.contrib.auth.decorators import login_required
from shutil import rmtree
from eatb.storage.directorypairtreestorage import VersionDirFormat
from earkweb.models import InformationPackage, Representation
from django.http import HttpResponse
from django.views.decorators.csrf import csrf_exempt
from taskbackend.taskutils import extract_and_remove_package, flower_is_running
from config.configuration import config_path_work, verify_certificate
import django_tables2 as tables
from django.utils.safestring import mark_safe
from django.shortcuts import render
from django_tables2 import RequestConfig
from uuid import uuid4
import logging
from submission.views import upload_step1
from util.djangoutils import get_user_api_token
logger = logging.getLogger(__name__)
from django.utils.translation import ugettext_lazy as _
@login_required
def index(request):
template = loader.get_template('management/index.html')
context = {
}
return HttpResponse(template.render(context=context, request=request))
@login_required
@csrf_exempt
def ip_detail_table(request):
logger.info("Updating ip table ...")
pkg_id = request.POST['pkg_id']
ip = InformationPackage.objects.get(pk=pkg_id)
logger.info("- version: %s" % ip.version)
context = {
"ip": ip,
"config_path_work": config_path_work
}
return render(request, 'management/iptable.html', context=context)
@login_required
def index(request):
template = loader.get_template('management/index.html')
context = {
}
return HttpResponse(template.render(context=context, request=request))
@login_required
def sip_detail(request, pk):
ip = InformationPackage.objects.get(pk=pk)
if not ip.uid:
context = {"ip": ip}
return render(request, 'management/checkout.html', context=context)
return upload_step1(request, pk)
def upload_aip(ip_work_dir, upload_path, f):
print("Upload file '%s' to working directory: %s" % (f, upload_path))
if not os.path.exists(upload_path):
os.makedirs(upload_path, exist_ok=True)
destination_file = os.path.join(upload_path, f.name)
with open(destination_file, 'wb+') as destination:
for chunk in f.chunks():
destination.write(chunk)
destination.close()
if f.name.endswith(".tar"):
async_res = extract_and_remove_package.delay(destination_file, upload_path,
os.path.join(ip_work_dir, 'metadata/sip_creation.log'))
print("Package extraction task '%s' to extract package '%s' to working directory: %s" % (
async_res.id, f.name, upload_path))
@login_required
def delete(request, pk):
ip = InformationPackage.objects.get(pk=pk)
template = loader.get_template('management/deleted.html')
if ip.uid:
path = os.path.join(config_path_work, ip.uid)
if os.path.exists(path):
rmtree(path)
context = {
'uid': ip.uid,
}
ip.uid = ""
ip.work_dir = ""
ip.save()
return HttpResponse(template.render(context=context, request=request))
@login_required
def checkout(request, identifier):
ip = InformationPackage.objects.get(identifier=identifier)
uid = None
if not ip.uid:
uid = str(uuid4())
ip.uid = uid
ip.work_dir = os.path.join(config_path_work, uid)
template = loader.get_template('management/checkout_confirm.html')
from config.configuration import django_backend_service_host, django_backend_service_port
import requests
reset_aip = "reset_aip" in request.POST and request.POST["reset_aip"] == "on"
request_url = "/earkweb/api/ips/%s/checkout-working-copy/?reset=%s" % \
(identifier, str(reset_aip).lower())
user_api_token = get_user_api_token(request.user)
response = requests.post(request_url, headers={'Authorization': 'Token %s' % user_api_token},
verify=verify_certificate)
if response.status_code != 201:
err_msg = "An error occurred while trying do the checkout"
try:
json_err = json.loads(response.text)
err_msg = "%s: %s" % (err_msg, json_err["message"])
except JSONDecodeError:
pass
return render(request, 'earkweb/error.html', {
'header': 'Checkout error', 'message': err_msg
})
resp_json = json.loads(response.text)
context = {
'msg_checkout_confirm': resp_json['message'],
'identifier': identifier,
'uid': uid,
'ip': ip,
"jobid": resp_json["job_id"],
'flower_status': flower_is_running()
}
return HttpResponse(template.render(context=context, request=request))
class InformationPackageTable(tables.Table):
from django_tables2.utils import A
area = "management"
identifier = tables.LinkColumn('%s:storage_area' % area, kwargs={'section': area, 'identifier': A('identifier')},
verbose_name=_("Archived Information Package"),
attrs={'a': {'data-toggle': 'tooltip', 'title': _('PackageDirectory')}})
version = tables.Column(verbose_name='Version')
created = tables.DateTimeColumn(format="d.m.Y H:i:s", verbose_name=_("CreationDateTime"))
packagecol = tables.Column(verbose_name=_('WorkingCopy'))
package_name = tables.LinkColumn('%s:resubmit' % area, kwargs={'pk': A('pk')}, verbose_name=_("InternalLabel"),
attrs={'a': {'data-toggle': 'tooltip', 'title': _('PackageOverview')}})
edit = tables.LinkColumn('%s:ip_detail' % area, kwargs={'pk': A('pk')}, verbose_name=_('ChangeIt'))
class Meta:
model = InformationPackage
fields = ('package_name', 'packagecol', 'identifier', 'version', 'created', 'edit')
attrs = {'class': 'table table-striped table-bordered table-condensed'}
row_attrs = {'data-id': lambda record: record.pk}
@staticmethod
def render_version(value):
return VersionDirFormat % value
@staticmethod
def render_edit(value):
return mark_safe(value)
@staticmethod
def render_packagecol(value):
return mark_safe(value)
@staticmethod
def render_statusprocess(value):
if value == "Success":
return mark_safe(
'Success <span class="glyphicon glyphicon-ok-sign" aria-hidden="true" style="color:green"/>'
)
elif value == "Error":
return mark_safe(
'Error <span class="glyphicon glyphicon-exclamation-sign" aria-hidden="true" style="color:#91170A"/>'
)
elif value == "Warning":
return mark_safe(
'Warning <span class="glyphicon glyphicon-warning-sign" aria-hidden="true" style="color:#F6A50B"/>'
)
else:
return value
@login_required
@csrf_exempt
def informationpackages_overview(request):
area = "management"
areacode = "2"
filterword = request.POST['filterword'] if 'filterword' in request.POST.keys() else ""
sql_query = """
select ip.id as id, ip.work_dir as path, ip.uid as uid, ip.package_name as package_name,
CONCAT('<a href="/earkweb/management/modify/',ip.id,'/" data-toggle="tooltip" title="Metadaten ändern oder neue Version übertragen"><i class="glyphicon glyphicon-edit editcol"></i></a>') as edit,
CONCAT('<a href="/earkweb/management/working_area/management/',ip.uid,'/" data-toggle="tooltip" title="View working directory">',ip.uid,'</a><a href="/earkweb/management/delete/',ip.id,'/" data-toggle="tooltip" title="Remove working copy">', IF(uid IS NULL OR uid = '', '', '<i class="glyphicon glyphicon-trash editcol"></i>'), '</a>') as packagecol,
ip.identifier as identifier
from informationpackage as ip
where storage_dir != '' and not deleted > 0 and (ip.uid like '%%{0}%%' or ip.package_name like '%%{0}%%' or ip.identifier like '%%{0}%%')
order by ip.last_change desc;
""".format(filterword, areacode)
# user_id={0} and, request.user.pk
queryset = InformationPackage.objects.raw(sql_query)
table = InformationPackageTable(queryset)
RequestConfig(request, paginate={'per_page': 8}).configure(table)
context = {
'informationpackage': table,
}
if request.method == "POST":
return render(request, 'earkweb/ipstable.html', context=context)
else:
return render(request, '%s/overview.html' % area, {'informationpackage': table})
@login_required
def render_network(request):
template = loader.get_template('management/render_network.html')
context = {
}
return HttpResponse(template.render(context=context, request=request))
def upload_file(upload_path, f):
print("Upload file '%s' to working directory: %s" % (f.name, upload_path))
if not os.path.exists(upload_path):
os.makedirs(upload_path, exist_ok=True)
destination_file = os.path.join(upload_path, f.name)
with open(destination_file, 'wb+') as destination:
for chunk in f.chunks():
destination.write(chunk)
destination.close()
class InformationPackageDetail(DetailView):
"""
Information Package Detail View
"""
model = InformationPackage
context_object_name = 'ip'
template_name = 'management/detail.html'
@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
return super(InformationPackageDetail, self).dispatch(*args, **kwargs)
def get_context_data(self, **kwargs):
context = super(InformationPackageDetail, self).get_context_data(**kwargs)
context['config_path_work'] = config_path_work
context['metadata'] = json.loads(self.object.basic_metadata)
distributions = Representation.objects.filter(ip_id=self.object.pk).values()
context['distributions'] = distributions
return context
| 38.253788 | 354 | 0.67373 |
cb2bb86ffa2a0427fc0b7635ab6c77d411cc3a01
| 6,559 |
py
|
Python
|
kafka/admin_client.py
|
duruyi/kafka-python
|
fb00adbd6c985df0780342c207598e01595cc9e2
|
[
"Apache-2.0"
] | 20 |
2015-07-17T08:37:05.000Z
|
2021-06-01T15:52:16.000Z
|
kafka/admin_client.py
|
duruyi/kafka-python
|
fb00adbd6c985df0780342c207598e01595cc9e2
|
[
"Apache-2.0"
] | 26 |
2015-07-09T20:16:23.000Z
|
2020-04-03T22:00:38.000Z
|
kafka/admin_client.py
|
duruyi/kafka-python
|
fb00adbd6c985df0780342c207598e01595cc9e2
|
[
"Apache-2.0"
] | 14 |
2016-07-01T21:54:00.000Z
|
2019-09-24T05:33:07.000Z
|
import collections
import time
from .errors import NodeNotReadyError
from .protocol.admin import CreateTopicsRequest, DeleteTopicsRequest, CreatePartitionsRequest
from .protocol.metadata import MetadataRequest
"""NewPartitionsInfo
Fields:
name (string): name of topic
count (int): the new partition count
broker_ids_matrix: list(list(brokerids))
the sizes of inner lists are the replica factor of current topic
the size of outer list is the increased partition num of current topic
"""
NewPartitionsInfo = collections.namedtuple(
'NewPartitionsInfo',
['name', 'count', 'broker_ids_matrix']
)
def convert_new_topic_request_format(new_topic):
return (
new_topic.name,
new_topic.num_partitions,
new_topic.replication_factor,
[
(partition_id,replicas)
for partition_id, replicas in new_topic.replica_assignments.items()
],
[
(config_key, config_value)
for config_key, config_value in new_topic.configs.items()
],
)
def convert_topic_partitions_requst_format(topic_partition):
return (
topic_partition.name,
(
topic_partition.count,
topic_partition.broker_ids_matrix
)
)
class NewTopic(object):
""" A class for new topic creation
Arguments:
name (string): name of the topic
num_partitions (int): number of partitions
or -1 if replica_assignment has been specified
replication_factor (int): replication factor or -1 if
replica assignment is specified
replica_assignment (dict of int: [int]): A mapping containing
partition id and replicas to assign to it.
topic_configs (dict of str: str): A mapping of config key
and value for the topic.
"""
def __init__(
self,
name,
num_partitions,
replication_factor,
replica_assignments=None,
configs=None,
):
self.name = name
self.configs = configs or {}
self.num_partitions = num_partitions
self.replication_factor = replication_factor
self.replica_assignments = replica_assignments or {}
def __str__(self):
return "<name>:{}, <num_partitions>:{}, <replication_factor>:{}" \
"<replica_assignments>:{}, <configs>:{}".format(
self.name,
self.num_partitions,
self.replication_factor,
self.replica_assignments,
self.configs,
)
class AdminClient(object):
"""
An api to send CreateTopic requests
"""
def __init__(self, client):
self.client = client
self.metadata_request = MetadataRequest[1]([])
self.topic_request = CreateTopicsRequest[0]
self.delete_topics_request = DeleteTopicsRequest[0]
self.create_partitions_request = CreatePartitionsRequest[0]
def _send_controller_request(self):
response = self._send(
self.client.least_loaded_node(),
self.metadata_request,
)
return response[0].controller_id
def _send(self, node, request):
future = self.client.send(node, request)
return self.client.poll(future=future)
def _send_request(self, request):
controller_id = self._send_controller_request()
while not self.client.ready(controller_id):
# poll until the connection to broker is ready, otherwise send()
# will fail with NodeNotReadyError
self.client.poll()
return self._send(controller_id, request)
def create_partitions(
self,
new_partitions_infos,
timeout,
validate_only,
):
""" Create partitions on topics
Arguments:
new_partitions_infos (list of NewPartitionsInfo): A list containing
infos on increasing partitions with following format
[
NewPartitionsInfo(
'name': String,
'count': Int,
'broker_ids_matrix':
[
[id1, id2, id3],
[id1, id3, id4],
...
]
),
...
]
especially, broker_ids_matrix is a matrix of broker ids. The row size is
the number of newly added partitions and the col size is the replication
factor of the topic
timeout (int): timeout in seconds
validate_only (Boolean): If true then validate the
request without actually increasing the number of
partitions
Returns:
CreatePartitionsResponse: response from the broker
Raises:
NodeNotReadyError: if controller is not ready
"""
request = self.create_partitions_request(
topic_partitions = [
convert_topic_partitions_requst_format(new_partitions_info)
for new_partitions_info in new_partitions_infos
],
timeout=timeout,
validate_only = validate_only,
)
return self._send_request(request)
def create_topics(
self,
topics,
timeout,
):
""" Create topics on the cluster
Arguments:
topics (list of NewTopic): A list containing new
topics to be created
timeout (int): timeout in seconds
Returns:
CreateTopicResponse: response from the broker
Raises:
NodeNotReadyError: if controller is not ready
"""
request = self.topic_request(
create_topic_requests=[
convert_new_topic_request_format(topic)
for topic in topics
],
timeout=timeout,
)
return self._send_request(request)
def delete_topics(self, topics, timeout):
""" Deletes topics on the cluster
Arguments:
topics (list of topic names): Topics to delete
timeout (int): The requested timeout for this operation
Raises:
NodeNotReadyError: if controller is not ready
"""
request = self.delete_topics_request(
topics=topics,
timeout=timeout,
)
return self._send_request(request)
| 31.382775 | 93 | 0.587894 |
3f76b2169db0d95db05c2188f5c91daeec6e69fa
| 8,236 |
py
|
Python
|
main.py
|
qibin0506/CarND-Semantic-Segmentation
|
1249a18b38b068e224d360eafd8d371c585cd1fd
|
[
"MIT"
] | null | null | null |
main.py
|
qibin0506/CarND-Semantic-Segmentation
|
1249a18b38b068e224d360eafd8d371c585cd1fd
|
[
"MIT"
] | null | null | null |
main.py
|
qibin0506/CarND-Semantic-Segmentation
|
1249a18b38b068e224d360eafd8d371c585cd1fd
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
import os.path
import tensorflow as tf
import helper
import warnings
from distutils.version import LooseVersion
import project_tests as tests
# Check TensorFlow Version
assert LooseVersion(tf.__version__) >= LooseVersion('1.0'), 'Please use TensorFlow version 1.0 or newer. You are using {}'.format(tf.__version__)
print('TensorFlow Version: {}'.format(tf.__version__))
# Check for a GPU
if not tf.test.gpu_device_name():
warnings.warn('No GPU found. Please use a GPU to train your neural network.')
else:
print('Default GPU Device: {}'.format(tf.test.gpu_device_name()))
def load_vgg(sess, vgg_path):
"""
Load Pretrained VGG Model into TensorFlow.
:param sess: TensorFlow Session
:param vgg_path: Path to vgg folder, containing "variables/" and "saved_model.pb"
:return: Tuple of Tensors from VGG model (image_input, keep_prob, layer3_out, layer4_out, layer7_out)
"""
# TODO: Implement function
# Use tf.saved_model.loader.load to load the model and weights
vgg_tag = 'vgg16'
vgg_input_tensor_name = 'image_input:0'
vgg_keep_prob_tensor_name = 'keep_prob:0'
vgg_layer3_out_tensor_name = 'layer3_out:0'
vgg_layer4_out_tensor_name = 'layer4_out:0'
vgg_layer7_out_tensor_name = 'layer7_out:0'
tf.saved_model.loader.load(sess, [vgg_tag], vgg_path)
graph = tf.get_default_graph()
input_tensor = graph.get_tensor_by_name(vgg_input_tensor_name)
input_kp_tensor = graph.get_tensor_by_name(vgg_keep_prob_tensor_name)
layer3_tensor = graph.get_tensor_by_name(vgg_layer3_out_tensor_name)
layer4_tensor = graph.get_tensor_by_name(vgg_layer4_out_tensor_name)
layer7_tensor = graph.get_tensor_by_name(vgg_layer7_out_tensor_name)
return input_tensor, input_kp_tensor, layer3_tensor, layer4_tensor, layer7_tensor
tests.test_load_vgg(load_vgg, tf)
def layers(vgg_layer3_out, vgg_layer4_out, vgg_layer7_out, num_classes):
"""
Create the layers for a fully convolutional network. Build skip-layers using the vgg layers.
:param vgg_layer3_out: TF Tensor for VGG Layer 3 output
:param vgg_layer4_out: TF Tensor for VGG Layer 4 output
:param vgg_layer7_out: TF Tensor for VGG Layer 7 output
:param num_classes: Number of classes to classify
:return: The Tensor for the last layer of output
"""
# TODO: Implement function
layer7_out = tf.layers.conv2d(vgg_layer7_out, num_classes, 1, strides=[1, 1],
padding='same', kernel_regularizer=tf.contrib.layers.l2_regularizer(1e-3))
layer7_transpose = tf.layers.conv2d_transpose(layer7_out, num_classes, 4, strides=[2, 2],
padding='same', kernel_regularizer=tf.contrib.layers.l2_regularizer(1e-3))
layer4_out = tf.layers.conv2d(vgg_layer4_out, num_classes, 1, strides=[1, 1],
padding='same', kernel_regularizer=tf.contrib.layers.l2_regularizer(1e-3))
layer4_connect = tf.add(layer7_transpose, layer4_out)
layer4_transpose = tf.layers.conv2d_transpose(layer4_connect, num_classes, 4, strides=[2, 2],
padding='same', kernel_regularizer=tf.contrib.layers.l2_regularizer(1e-3))
layer3_out = tf.layers.conv2d(vgg_layer3_out, num_classes, 1, strides=[1, 1],
padding='same', kernel_regularizer=tf.contrib.layers.l2_regularizer(1e-3))
layer3_connect = tf.add(layer4_transpose, layer3_out)
nn_last_layer = tf.layers.conv2d_transpose(layer3_connect, num_classes, 16, strides=[8, 8],
padding='same', kernel_regularizer=tf.contrib.layers.l2_regularizer(1e-3))
return nn_last_layer
tests.test_layers(layers)
def optimize(nn_last_layer, correct_label, learning_rate, num_classes):
"""
Build the TensorFLow loss and optimizer operations.
:param nn_last_layer: TF Tensor of the last layer in the neural network
:param correct_label: TF Placeholder for the correct label image
:param learning_rate: TF Placeholder for the learning rate
:param num_classes: Number of classes to classify
:return: Tuple of (logits, train_op, cross_entropy_loss)
"""
# TODO: Implement function
logits = tf.reshape(nn_last_layer, (-1, num_classes))
correct_label = tf.reshape(correct_label, (-1, num_classes))
cross_entropy_loss = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(labels=correct_label, logits=logits))
train_op = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cross_entropy_loss)
return logits, train_op, cross_entropy_loss
tests.test_optimize(optimize)
def train_nn(sess, epochs, batch_size, get_batches_fn, train_op, cross_entropy_loss, input_image,
correct_label, keep_prob, learning_rate):
"""
Train neural network and print out the loss during training.
:param sess: TF Session
:param epochs: Number of epochs
:param batch_size: Batch size
:param get_batches_fn: Function to get batches of training data. Call using get_batches_fn(batch_size)
:param train_op: TF Operation to train the neural network
:param cross_entropy_loss: TF Tensor for the amount of loss
:param input_image: TF Placeholder for input images
:param correct_label: TF Placeholder for label images
:param keep_prob: TF Placeholder for dropout keep probability
:param learning_rate: TF Placeholder for learning rate
"""
# TODO: Implement function
sess.run(tf.global_variables_initializer())
for e in range(epochs):
print("start epoch {} / {}".format(e + 1, epochs))
for images, labels in get_batches_fn(batch_size):
_, loss = sess.run([train_op, cross_entropy_loss], feed_dict={input_image: images,
correct_label: labels,
keep_prob: 0.8,
learning_rate: 0.0008})
print("loss:{}".format(loss))
tests.test_train_nn(train_nn)
def run():
epochs = 50
batch_size = 10
num_classes = 2
image_shape = (160, 576) # KITTI dataset uses 160x576 images
data_dir = '/data'
runs_dir = './runs'
tests.test_for_kitti_dataset(data_dir)
# Download pretrained vgg model
helper.maybe_download_pretrained_vgg(data_dir)
# OPTIONAL: Train and Inference on the cityscapes dataset instead of the Kitti dataset.
# You'll need a GPU with at least 10 teraFLOPS to train on.
# https://www.cityscapes-dataset.com/
with tf.Session() as sess:
# Path to vgg model
vgg_path = os.path.join(data_dir, 'vgg')
# Create function to get batches
get_batches_fn = helper.gen_batch_function(os.path.join(data_dir, 'data_road/training'), image_shape)
# OPTIONAL: Augment Images for better results
# https://datascience.stackexchange.com/questions/5224/how-to-prepare-augment-images-for-neural-network
# TODO: Build NN using load_vgg, layers, and optimize function
correct_label = tf.placeholder(tf.int32, shape=[None, None, None, num_classes])
lr = tf.placeholder(tf.float32)
input_tensor, input_kp_tensor, layer3_tensor, layer4_tensor, layer7_tensor = load_vgg(sess, vgg_path)
last_layer = layers(layer3_tensor, layer4_tensor, layer7_tensor, num_classes)
logits, train_op, cross_entropy_loss = optimize(last_layer, correct_label, lr, num_classes)
# TODO: Train NN using the train_nn function
train_nn(sess, epochs, batch_size, get_batches_fn, train_op,
cross_entropy_loss, input_tensor, correct_label, input_kp_tensor, lr)
# TODO: Save inference data using helper.save_inference_samples
# helper.save_inference_samples(runs_dir, data_dir, sess, image_shape, logits, keep_prob, input_image)
helper.save_inference_samples(runs_dir, data_dir, sess, image_shape, logits, input_kp_tensor, input_tensor)
# OPTIONAL: Apply the trained model to a video
if __name__ == '__main__':
run()
| 45.005464 | 146 | 0.697062 |
d5b7183f364b8a493af78eae5b55c73e13d1e592
| 208 |
py
|
Python
|
calculator/admin.py
|
dev-nislam2020/thecalculatorcafe
|
be5b17888006dccdd3d3311e6bc1a84904cca32e
|
[
"MIT"
] | null | null | null |
calculator/admin.py
|
dev-nislam2020/thecalculatorcafe
|
be5b17888006dccdd3d3311e6bc1a84904cca32e
|
[
"MIT"
] | null | null | null |
calculator/admin.py
|
dev-nislam2020/thecalculatorcafe
|
be5b17888006dccdd3d3311e6bc1a84904cca32e
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from .models import Tag, Calculator, CalculatorInfo
# Register your models here.
admin.site.register(Tag)
admin.site.register(Calculator)
admin.site.register(CalculatorInfo)
| 26 | 51 | 0.822115 |
2cab6d1668d08a922abd2f5a3c7c6982473cbed6
| 1,335 |
py
|
Python
|
doge-training/resize-images.py
|
jb1361/memelon
|
98c42f63bbf6447110eb1d6a73da53e5c318ab69
|
[
"MIT"
] | 1 |
2021-04-29T20:14:44.000Z
|
2021-04-29T20:14:44.000Z
|
doge-training/resize-images.py
|
jb1361/memelon
|
98c42f63bbf6447110eb1d6a73da53e5c318ab69
|
[
"MIT"
] | null | null | null |
doge-training/resize-images.py
|
jb1361/memelon
|
98c42f63bbf6447110eb1d6a73da53e5c318ab69
|
[
"MIT"
] | 2 |
2021-02-14T02:20:32.000Z
|
2021-04-29T20:14:46.000Z
|
import os
from PIL import Image
import glob
train_path="E:/memelon/doge-training/doge-classification/train/doge"
uniform_size = (600, 600)
i = 0
def checkFileExists(head, filetype):
global i
if os.path.isfile(head + "/" + "resized_" + str(i) + filetype):
i = i + 20
checkFileExists(head, filetype)
for x in glob.glob(train_path + '/**/*.jpg', recursive=True):
img = Image.open(x)
img.thumbnail(uniform_size)
img = img.resize(uniform_size)
head, tail = os.path.split(x)
checkFileExists(head, ".jpg")
img.save(head + "/" + "resized_" + str(i) + ".jpg", optimize=True, quality=40)
os.remove(x)
i = i + 1
for x in glob.glob(train_path + '/**/*.com**', recursive=True):
img = Image.open(x)
img.thumbnail(uniform_size)
img = img.resize(uniform_size)
head, tail = os.path.split(x)
checkFileExists(head, ".png")
img.save(head + "/" + "resized_" + str(i) + ".png", optimize=True, quality=40)
os.remove(x)
i = i + 1
for x in glob.glob(train_path + '/**/*.png', recursive=True):
img = Image.open(x)
img.thumbnail(uniform_size)
img = img.resize(uniform_size)
head, tail = os.path.split(x)
checkFileExists(head, ".png")
img.save(head + "/" + "resized_" + str(i) + ".png", optimize=True, quality=40)
os.remove(x)
i = i + 1
| 29.666667 | 82 | 0.617978 |
e581a43549cc30a2600489732459ce50ffd58a4f
| 158 |
py
|
Python
|
tests/model_control/detailed/transf_Fisher/model_control_one_enabled_Fisher_ConstantTrend_BestCycle_ARX.py
|
jmabry/pyaf
|
afbc15a851a2445a7824bf255af612dc429265af
|
[
"BSD-3-Clause"
] | null | null | null |
tests/model_control/detailed/transf_Fisher/model_control_one_enabled_Fisher_ConstantTrend_BestCycle_ARX.py
|
jmabry/pyaf
|
afbc15a851a2445a7824bf255af612dc429265af
|
[
"BSD-3-Clause"
] | 1 |
2019-11-30T23:39:38.000Z
|
2019-12-01T04:34:35.000Z
|
tests/model_control/detailed/transf_Fisher/model_control_one_enabled_Fisher_ConstantTrend_BestCycle_ARX.py
|
jmabry/pyaf
|
afbc15a851a2445a7824bf255af612dc429265af
|
[
"BSD-3-Clause"
] | null | null | null |
import pyaf.tests.model_control.test_ozone_custom_models_enabled as testmod
testmod.build_model( ['Fisher'] , ['ConstantTrend'] , ['BestCycle'] , ['ARX'] );
| 39.5 | 80 | 0.746835 |
83e68ffd03e85affd4029c50ae8954c42ae68942
| 4,564 |
py
|
Python
|
benchmark/startQiskit_QC3143.py
|
UCLA-SEAL/QDiff
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
[
"BSD-3-Clause"
] | null | null | null |
benchmark/startQiskit_QC3143.py
|
UCLA-SEAL/QDiff
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
[
"BSD-3-Clause"
] | null | null | null |
benchmark/startQiskit_QC3143.py
|
UCLA-SEAL/QDiff
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
[
"BSD-3-Clause"
] | null | null | null |
# qubit number=4
# total number=47
import cirq
import qiskit
from qiskit import IBMQ
from qiskit.providers.ibmq import least_busy
from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister
from qiskit import BasicAer, execute, transpile
from pprint import pprint
from qiskit.test.mock import FakeVigo
from math import log2
import numpy as np
import networkx as nx
def bitwise_xor(s: str, t: str) -> str:
length = len(s)
res = []
for i in range(length):
res.append(str(int(s[i]) ^ int(t[i])))
return ''.join(res[::-1])
def bitwise_dot(s: str, t: str) -> str:
length = len(s)
res = 0
for i in range(length):
res += int(s[i]) * int(t[i])
return str(res % 2)
def build_oracle(n: int, f) -> QuantumCircuit:
# implement the oracle O_f
# NOTE: use multi_control_toffoli_gate ('noancilla' mode)
# https://qiskit.org/documentation/_modules/qiskit/aqua/circuits/gates/multi_control_toffoli_gate.html
# https://quantumcomputing.stackexchange.com/questions/3943/how-do-you-implement-the-toffoli-gate-using-only-single-qubit-and-cnot-gates
# https://quantumcomputing.stackexchange.com/questions/2177/how-can-i-implement-an-n-bit-toffoli-gate
controls = QuantumRegister(n, "ofc")
target = QuantumRegister(1, "oft")
oracle = QuantumCircuit(controls, target, name="Of")
for i in range(2 ** n):
rep = np.binary_repr(i, n)
if f(rep) == "1":
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
oracle.mct(controls, target[0], None, mode='noancilla')
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.barrier()
return oracle
def make_circuit(n:int,f) -> QuantumCircuit:
# circuit begin
input_qubit = QuantumRegister(n,"qc")
classical = ClassicalRegister(n, "qm")
prog = QuantumCircuit(input_qubit, classical)
prog.h(input_qubit[3]) # number=16
prog.cz(input_qubit[0],input_qubit[3]) # number=17
prog.h(input_qubit[3]) # number=18
prog.x(input_qubit[3]) # number=13
prog.h(input_qubit[3]) # number=24
prog.cz(input_qubit[0],input_qubit[3]) # number=25
prog.h(input_qubit[3]) # number=26
prog.h(input_qubit[1]) # number=2
prog.h(input_qubit[2]) # number=3
prog.h(input_qubit[3]) # number=4
prog.h(input_qubit[0]) # number=5
oracle = build_oracle(n-1, f)
prog.append(oracle.to_gate(),[input_qubit[i] for i in range(n-1)]+[input_qubit[n-1]])
prog.h(input_qubit[1]) # number=6
prog.h(input_qubit[2]) # number=30
prog.cz(input_qubit[0],input_qubit[2]) # number=31
prog.h(input_qubit[2]) # number=32
prog.x(input_qubit[2]) # number=28
prog.h(input_qubit[2]) # number=39
prog.cz(input_qubit[0],input_qubit[2]) # number=40
prog.h(input_qubit[2]) # number=41
prog.h(input_qubit[2]) # number=7
prog.h(input_qubit[3]) # number=8
prog.h(input_qubit[0]) # number=9
prog.h(input_qubit[2]) # number=36
prog.cz(input_qubit[3],input_qubit[2]) # number=37
prog.h(input_qubit[2]) # number=38
prog.h(input_qubit[0]) # number=44
prog.cz(input_qubit[2],input_qubit[0]) # number=45
prog.h(input_qubit[0]) # number=46
prog.h(input_qubit[0]) # number=19
prog.cz(input_qubit[2],input_qubit[0]) # number=20
prog.h(input_qubit[0]) # number=21
prog.h(input_qubit[3]) # number=33
prog.cz(input_qubit[2],input_qubit[3]) # number=34
prog.h(input_qubit[3]) # number=35
prog.x(input_qubit[2]) # number=42
prog.x(input_qubit[2]) # number=43
# circuit end
for i in range(n):
prog.measure(input_qubit[i], classical[i])
return prog
if __name__ == '__main__':
a = "111"
b = "0"
f = lambda rep: bitwise_xor(bitwise_dot(a, rep), b)
prog = make_circuit(4,f)
IBMQ.load_account()
provider = IBMQ.get_provider(hub='ibm-q')
provider.backends()
backend = least_busy(provider.backends(filters=lambda x: x.configuration().n_qubits >= 2 and not x.configuration().simulator and x.status().operational == True))
sample_shot =8000
info = execute(prog, backend=backend, shots=sample_shot).result().get_counts()
backend = FakeVigo()
circuit1 = transpile(prog,backend,optimization_level=2)
writefile = open("../data/startQiskit_QC3143.csv","w")
print(info,file=writefile)
print("results end", file=writefile)
print(circuit1.__len__(),file=writefile)
print(circuit1,file=writefile)
writefile.close()
| 35.379845 | 165 | 0.654689 |
435561c8e80edf26f57dc506c77ee0db124ac0ef
| 53,360 |
py
|
Python
|
salt/transport/zeromq.py
|
anitakrueger/salt
|
ffa430507041e18a783444fc379d67b078b5692f
|
[
"Apache-2.0"
] | 5 |
2018-05-01T20:51:14.000Z
|
2021-11-09T05:43:00.000Z
|
salt/transport/zeromq.py
|
anitakrueger/salt
|
ffa430507041e18a783444fc379d67b078b5692f
|
[
"Apache-2.0"
] | 4 |
2019-02-08T17:53:38.000Z
|
2019-06-06T16:17:27.000Z
|
salt/transport/zeromq.py
|
anitakrueger/salt
|
ffa430507041e18a783444fc379d67b078b5692f
|
[
"Apache-2.0"
] | 7 |
2017-09-29T18:49:53.000Z
|
2021-11-09T05:42:49.000Z
|
# -*- coding: utf-8 -*-
'''
Zeromq transport classes
'''
# Import Python Libs
from __future__ import absolute_import, print_function, unicode_literals
import os
import sys
import copy
import errno
import signal
import socket
import hashlib
import logging
import weakref
import threading
from random import randint
# Import Salt Libs
import salt.auth
import salt.crypt
import salt.log.setup
import salt.utils.event
import salt.utils.files
import salt.utils.minions
import salt.utils.process
import salt.utils.stringutils
import salt.utils.verify
import salt.utils.zeromq
import salt.utils.versions
import salt.payload
import salt.transport.client
import salt.transport.server
import salt.transport.mixins.auth
from salt.ext import six
from salt.exceptions import SaltReqTimeoutError
from salt._compat import ipaddress
from salt.utils.zeromq import zmq, ZMQDefaultLoop, install_zmq, ZMQ_VERSION_INFO, LIBZMQ_VERSION_INFO
import zmq.error
import zmq.eventloop.ioloop
import zmq.eventloop.zmqstream
try:
import zmq.utils.monitor
HAS_ZMQ_MONITOR = True
except ImportError:
HAS_ZMQ_MONITOR = False
# Import Tornado Libs
import tornado
import tornado.gen
import tornado.concurrent
# Import third party libs
try:
from M2Crypto import RSA
HAS_M2 = True
except ImportError:
HAS_M2 = False
try:
from Cryptodome.Cipher import PKCS1_OAEP
except ImportError:
from Crypto.Cipher import PKCS1_OAEP
log = logging.getLogger(__name__)
def _get_master_uri(master_ip,
master_port,
source_ip=None,
source_port=None):
'''
Return the ZeroMQ URI to connect the Minion to the Master.
It supports different source IP / port, given the ZeroMQ syntax:
// Connecting using a IP address and bind to an IP address
rc = zmq_connect(socket, "tcp://192.168.1.17:5555;192.168.1.1:5555"); assert (rc == 0);
Source: http://api.zeromq.org/4-1:zmq-tcp
'''
from salt.utils.zeromq import ip_bracket
master_uri = 'tcp://{master_ip}:{master_port}'.format(
master_ip=ip_bracket(master_ip), master_port=master_port)
if source_ip or source_port:
if LIBZMQ_VERSION_INFO >= (4, 1, 6) and ZMQ_VERSION_INFO >= (16, 0, 1):
# The source:port syntax for ZeroMQ has been added in libzmq 4.1.6
# which is included in the pyzmq wheels starting with 16.0.1.
if source_ip and source_port:
master_uri = 'tcp://{source_ip}:{source_port};{master_ip}:{master_port}'.format(
source_ip=ip_bracket(source_ip), source_port=source_port,
master_ip=ip_bracket(master_ip), master_port=master_port)
elif source_ip and not source_port:
master_uri = 'tcp://{source_ip}:0;{master_ip}:{master_port}'.format(
source_ip=ip_bracket(source_ip),
master_ip=ip_bracket(master_ip), master_port=master_port)
elif source_port and not source_ip:
ip_any = '0.0.0.0' if ipaddress.ip_address(master_ip).version == 4 else ip_bracket('::')
master_uri = 'tcp://{ip_any}:{source_port};{master_ip}:{master_port}'.format(
ip_any=ip_any, source_port=source_port,
master_ip=ip_bracket(master_ip), master_port=master_port)
else:
log.warning('Unable to connect to the Master using a specific source IP / port')
log.warning('Consider upgrading to pyzmq >= 16.0.1 and libzmq >= 4.1.6')
log.warning('Specific source IP / port for connecting to master returner port: configuraion ignored')
return master_uri
class AsyncZeroMQReqChannel(salt.transport.client.ReqChannel):
'''
Encapsulate sending routines to ZeroMQ.
ZMQ Channels default to 'crypt=aes'
'''
# This class is only a singleton per minion/master pair
# mapping of io_loop -> {key -> channel}
instance_map = weakref.WeakKeyDictionary()
def __new__(cls, opts, **kwargs):
'''
Only create one instance of channel per __key()
'''
# do we have any mapping for this io_loop
io_loop = kwargs.get('io_loop')
if io_loop is None:
install_zmq()
io_loop = ZMQDefaultLoop.current()
if io_loop not in cls.instance_map:
cls.instance_map[io_loop] = weakref.WeakValueDictionary()
loop_instance_map = cls.instance_map[io_loop]
key = cls.__key(opts, **kwargs)
obj = loop_instance_map.get(key)
if obj is None:
log.debug('Initializing new AsyncZeroMQReqChannel for %s', key)
# we need to make a local variable for this, as we are going to store
# it in a WeakValueDictionary-- which will remove the item if no one
# references it-- this forces a reference while we return to the caller
obj = object.__new__(cls)
obj.__singleton_init__(opts, **kwargs)
obj._instance_key = key
loop_instance_map[key] = obj
obj._refcount = 1
obj._refcount_lock = threading.RLock()
log.trace('Inserted key into loop_instance_map id %s for key %s and process %s',
id(loop_instance_map), key, os.getpid())
else:
with obj._refcount_lock:
obj._refcount += 1
log.debug('Re-using AsyncZeroMQReqChannel for %s', key)
return obj
def __deepcopy__(self, memo):
cls = self.__class__
result = cls.__new__(cls, copy.deepcopy(self.opts, memo)) # pylint: disable=too-many-function-args
memo[id(self)] = result
for key in self.__dict__:
if key in ('_io_loop', '_refcount', '_refcount_lock'):
continue
# The _io_loop has a thread Lock which will fail to be deep
# copied. Skip it because it will just be recreated on the
# new copy.
if key == 'message_client':
# Recreate the message client because it will fail to be deep
# copied. The reason is the same as the io_loop skip above.
setattr(result, key,
AsyncReqMessageClientPool(result.opts,
args=(result.opts, self.master_uri,),
kwargs={'io_loop': self._io_loop}))
continue
setattr(result, key, copy.deepcopy(self.__dict__[key], memo))
return result
@classmethod
def __key(cls, opts, **kwargs):
return (opts['pki_dir'], # where the keys are stored
opts['id'], # minion ID
kwargs.get('master_uri', opts.get('master_uri')), # master ID
kwargs.get('crypt', 'aes'), # TODO: use the same channel for crypt
)
# has to remain empty for singletons, since __init__ will *always* be called
def __init__(self, opts, **kwargs):
pass
# an init for the singleton instance to call
def __singleton_init__(self, opts, **kwargs):
self.opts = dict(opts)
self.ttype = 'zeromq'
# crypt defaults to 'aes'
self.crypt = kwargs.get('crypt', 'aes')
if 'master_uri' in kwargs:
self.opts['master_uri'] = kwargs['master_uri']
self._io_loop = kwargs.get('io_loop')
if self._io_loop is None:
install_zmq()
self._io_loop = ZMQDefaultLoop.current()
if self.crypt != 'clear':
# we don't need to worry about auth as a kwarg, since its a singleton
self.auth = salt.crypt.AsyncAuth(self.opts, io_loop=self._io_loop)
log.debug('Connecting the Minion to the Master URI (for the return server): %s', self.opts['master_uri'])
self.message_client = AsyncReqMessageClientPool(self.opts,
args=(self.opts, self.opts['master_uri'],),
kwargs={'io_loop': self._io_loop})
self._closing = False
def close(self):
'''
Since the message_client creates sockets and assigns them to the IOLoop we have to
specifically destroy them, since we aren't the only ones with references to the FDs
'''
if self._closing:
return
if self._refcount > 1:
# Decrease refcount
with self._refcount_lock:
self._refcount -= 1
log.debug(
'This is not the last %s instance. Not closing yet.',
self.__class__.__name__
)
return
log.debug('Closing %s instance', self.__class__.__name__)
self._closing = True
if hasattr(self, 'message_client'):
self.message_client.close()
# Remove the entry from the instance map so that a closed entry may not
# be reused.
# This forces this operation even if the reference count of the entry
# has not yet gone to zero.
if self._io_loop in self.__class__.instance_map:
loop_instance_map = self.__class__.instance_map[self._io_loop]
if self._instance_key in loop_instance_map:
del loop_instance_map[self._instance_key]
if not loop_instance_map:
del self.__class__.instance_map[self._io_loop]
def __del__(self):
with self._refcount_lock:
# Make sure we actually close no matter if something
# went wrong with our ref counting
self._refcount = 1
try:
self.close()
except socket.error as exc:
if exc.errno != errno.EBADF:
# If its not a bad file descriptor error, raise
raise
@property
def master_uri(self):
if 'master_ip' in self.opts:
return _get_master_uri(self.opts['master_ip'],
self.opts['master_port'],
source_ip=self.opts.get('source_ip'),
source_port=self.opts.get('source_ret_port'))
return self.opts['master_uri']
def _package_load(self, load):
return {
'enc': self.crypt,
'load': load,
}
@tornado.gen.coroutine
def crypted_transfer_decode_dictentry(self, load, dictkey=None, tries=3, timeout=60):
if not self.auth.authenticated:
# Return control back to the caller, continue when authentication succeeds
yield self.auth.authenticate()
# Return control to the caller. When send() completes, resume by populating ret with the Future.result
ret = yield self.message_client.send(
self._package_load(self.auth.crypticle.dumps(load)),
timeout=timeout,
tries=tries,
)
key = self.auth.get_keys()
if 'key' not in ret:
# Reauth in the case our key is deleted on the master side.
yield self.auth.authenticate()
ret = yield self.message_client.send(
self._package_load(self.auth.crypticle.dumps(load)),
timeout=timeout,
tries=tries,
)
if HAS_M2:
aes = key.private_decrypt(ret['key'],
RSA.pkcs1_oaep_padding)
else:
cipher = PKCS1_OAEP.new(key)
aes = cipher.decrypt(ret['key'])
pcrypt = salt.crypt.Crypticle(self.opts, aes)
data = pcrypt.loads(ret[dictkey])
if six.PY3:
data = salt.transport.frame.decode_embedded_strs(data)
raise tornado.gen.Return(data)
@tornado.gen.coroutine
def _crypted_transfer(self, load, tries=3, timeout=60, raw=False):
'''
Send a load across the wire, with encryption
In case of authentication errors, try to renegotiate authentication
and retry the method.
Indeed, we can fail too early in case of a master restart during a
minion state execution call
:param dict load: A load to send across the wire
:param int tries: The number of times to make before failure
:param int timeout: The number of seconds on a response before failing
'''
@tornado.gen.coroutine
def _do_transfer():
# Yield control to the caller. When send() completes, resume by populating data with the Future.result
data = yield self.message_client.send(
self._package_load(self.auth.crypticle.dumps(load)),
timeout=timeout,
tries=tries,
)
# we may not have always data
# as for example for saltcall ret submission, this is a blind
# communication, we do not subscribe to return events, we just
# upload the results to the master
if data:
data = self.auth.crypticle.loads(data, raw)
if six.PY3 and not raw:
data = salt.transport.frame.decode_embedded_strs(data)
raise tornado.gen.Return(data)
if not self.auth.authenticated:
# Return control back to the caller, resume when authentication succeeds
yield self.auth.authenticate()
try:
# We did not get data back the first time. Retry.
ret = yield _do_transfer()
except salt.crypt.AuthenticationError:
# If auth error, return control back to the caller, continue when authentication succeeds
yield self.auth.authenticate()
ret = yield _do_transfer()
raise tornado.gen.Return(ret)
@tornado.gen.coroutine
def _uncrypted_transfer(self, load, tries=3, timeout=60):
'''
Send a load across the wire in cleartext
:param dict load: A load to send across the wire
:param int tries: The number of times to make before failure
:param int timeout: The number of seconds on a response before failing
'''
ret = yield self.message_client.send(
self._package_load(load),
timeout=timeout,
tries=tries,
)
raise tornado.gen.Return(ret)
@tornado.gen.coroutine
def send(self, load, tries=3, timeout=60, raw=False):
'''
Send a request, return a future which will complete when we send the message
'''
if self.crypt == 'clear':
ret = yield self._uncrypted_transfer(load, tries=tries, timeout=timeout)
else:
ret = yield self._crypted_transfer(load, tries=tries, timeout=timeout, raw=raw)
raise tornado.gen.Return(ret)
class AsyncZeroMQPubChannel(salt.transport.mixins.auth.AESPubClientMixin, salt.transport.client.AsyncPubChannel):
'''
A transport channel backed by ZeroMQ for a Salt Publisher to use to
publish commands to connected minions
'''
def __init__(self,
opts,
**kwargs):
self.opts = opts
self.ttype = 'zeromq'
self.io_loop = kwargs.get('io_loop')
if self.io_loop is None:
install_zmq()
self.io_loop = ZMQDefaultLoop.current()
self.hexid = hashlib.sha1(salt.utils.stringutils.to_bytes(self.opts['id'])).hexdigest()
self.auth = salt.crypt.AsyncAuth(self.opts, io_loop=self.io_loop)
self.serial = salt.payload.Serial(self.opts)
self.context = zmq.Context()
self._socket = self.context.socket(zmq.SUB)
if self.opts['zmq_filtering']:
# TODO: constants file for "broadcast"
self._socket.setsockopt(zmq.SUBSCRIBE, b'broadcast')
if self.opts.get('__role') == 'syndic':
self._socket.setsockopt(zmq.SUBSCRIBE, b'syndic')
else:
self._socket.setsockopt(
zmq.SUBSCRIBE,
salt.utils.stringutils.to_bytes(self.hexid)
)
else:
self._socket.setsockopt(zmq.SUBSCRIBE, b'')
self._socket.setsockopt(zmq.IDENTITY, salt.utils.stringutils.to_bytes(self.opts['id']))
# TODO: cleanup all the socket opts stuff
if hasattr(zmq, 'TCP_KEEPALIVE'):
self._socket.setsockopt(
zmq.TCP_KEEPALIVE, self.opts['tcp_keepalive']
)
self._socket.setsockopt(
zmq.TCP_KEEPALIVE_IDLE, self.opts['tcp_keepalive_idle']
)
self._socket.setsockopt(
zmq.TCP_KEEPALIVE_CNT, self.opts['tcp_keepalive_cnt']
)
self._socket.setsockopt(
zmq.TCP_KEEPALIVE_INTVL, self.opts['tcp_keepalive_intvl']
)
recon_delay = self.opts['recon_default']
if self.opts['recon_randomize']:
recon_delay = randint(self.opts['recon_default'],
self.opts['recon_default'] + self.opts['recon_max'])
log.debug(
"Generated random reconnect delay between '%sms' and '%sms' (%s)",
self.opts['recon_default'],
self.opts['recon_default'] + self.opts['recon_max'],
recon_delay
)
log.debug("Setting zmq_reconnect_ivl to '%sms'", recon_delay)
self._socket.setsockopt(zmq.RECONNECT_IVL, recon_delay)
if hasattr(zmq, 'RECONNECT_IVL_MAX'):
log.debug(
"Setting zmq_reconnect_ivl_max to '%sms'",
self.opts['recon_default'] + self.opts['recon_max']
)
self._socket.setsockopt(
zmq.RECONNECT_IVL_MAX, self.opts['recon_max']
)
if (self.opts['ipv6'] is True or ':' in self.opts['master_ip']) and hasattr(zmq, 'IPV4ONLY'):
# IPv6 sockets work for both IPv6 and IPv4 addresses
self._socket.setsockopt(zmq.IPV4ONLY, 0)
if HAS_ZMQ_MONITOR and self.opts['zmq_monitor']:
self._monitor = ZeroMQSocketMonitor(self._socket)
self._monitor.start_io_loop(self.io_loop)
def close(self):
if hasattr(self, '_monitor') and self._monitor is not None:
self._monitor.stop()
self._monitor = None
if hasattr(self, '_stream'):
if ZMQ_VERSION_INFO < (14, 3, 0):
# stream.close() doesn't work properly on pyzmq < 14.3.0
self._stream.io_loop.remove_handler(self._stream.socket)
self._stream.socket.close(0)
else:
self._stream.close(0)
elif hasattr(self, '_socket'):
self._socket.close(0)
if hasattr(self, 'context') and self.context.closed is False:
self.context.term()
def destroy(self):
# Bacwards compat
salt.utils.versions.warn_until(
'Sodium',
'Calling {0}.destroy() is deprecated. Please call {0}.close() instead.'.format(
self.__class__.__name__
),
stacklevel=3
)
self.close()
def __del__(self):
self.close()
# TODO: this is the time to see if we are connected, maybe use the req channel to guess?
@tornado.gen.coroutine
def connect(self):
if not self.auth.authenticated:
yield self.auth.authenticate()
# if this is changed from the default, we assume it was intentional
if int(self.opts.get('publish_port', 4505)) != 4505:
self.publish_port = self.opts.get('publish_port')
# else take the relayed publish_port master reports
else:
self.publish_port = self.auth.creds['publish_port']
log.debug('Connecting the Minion to the Master publish port, using the URI: %s', self.master_pub)
self._socket.connect(self.master_pub)
@property
def master_pub(self):
'''
Return the master publish port
'''
return _get_master_uri(self.opts['master_ip'],
self.publish_port,
source_ip=self.opts.get('source_ip'),
source_port=self.opts.get('source_publish_port'))
@tornado.gen.coroutine
def _decode_messages(self, messages):
'''
Take the zmq messages, decrypt/decode them into a payload
:param list messages: A list of messages to be decoded
'''
messages_len = len(messages)
# if it was one message, then its old style
if messages_len == 1:
payload = self.serial.loads(messages[0])
# 2 includes a header which says who should do it
elif messages_len == 2:
if (self.opts.get('__role') != 'syndic' and messages[0] not in ('broadcast', self.hexid)) or \
(self.opts.get('__role') == 'syndic' and messages[0] not in ('broadcast', 'syndic')):
log.debug('Publish received for not this minion: %s', messages[0])
raise tornado.gen.Return(None)
payload = self.serial.loads(messages[1])
else:
raise Exception(('Invalid number of messages ({0}) in zeromq pub'
'message from master').format(len(messages_len)))
# Yield control back to the caller. When the payload has been decoded, assign
# the decoded payload to 'ret' and resume operation
ret = yield self._decode_payload(payload)
raise tornado.gen.Return(ret)
@property
def stream(self):
'''
Return the current zmqstream, creating one if necessary
'''
if not hasattr(self, '_stream'):
self._stream = zmq.eventloop.zmqstream.ZMQStream(self._socket, io_loop=self.io_loop)
return self._stream
def on_recv(self, callback):
'''
Register a callback for received messages (that we didn't initiate)
:param func callback: A function which should be called when data is received
'''
if callback is None:
return self.stream.on_recv(None)
@tornado.gen.coroutine
def wrap_callback(messages):
payload = yield self._decode_messages(messages)
if payload is not None:
callback(payload)
return self.stream.on_recv(wrap_callback)
class ZeroMQReqServerChannel(salt.transport.mixins.auth.AESReqServerMixin,
salt.transport.server.ReqServerChannel):
def __init__(self, opts):
salt.transport.server.ReqServerChannel.__init__(self, opts)
self._closing = False
def zmq_device(self):
'''
Multiprocessing target for the zmq queue device
'''
self.__setup_signals()
salt.utils.process.appendproctitle('MWorkerQueue')
self.context = zmq.Context(self.opts['worker_threads'])
# Prepare the zeromq sockets
self.uri = 'tcp://{interface}:{ret_port}'.format(**self.opts)
self.clients = self.context.socket(zmq.ROUTER)
if self.opts['ipv6'] is True and hasattr(zmq, 'IPV4ONLY'):
# IPv6 sockets work for both IPv6 and IPv4 addresses
self.clients.setsockopt(zmq.IPV4ONLY, 0)
self.clients.setsockopt(zmq.BACKLOG, self.opts.get('zmq_backlog', 1000))
self._start_zmq_monitor()
self.workers = self.context.socket(zmq.DEALER)
if self.opts.get('ipc_mode', '') == 'tcp':
self.w_uri = 'tcp://127.0.0.1:{0}'.format(
self.opts.get('tcp_master_workers', 4515)
)
else:
self.w_uri = 'ipc://{0}'.format(
os.path.join(self.opts['sock_dir'], 'workers.ipc')
)
log.info('Setting up the master communication server')
self.clients.bind(self.uri)
self.workers.bind(self.w_uri)
while True:
if self.clients.closed or self.workers.closed:
break
try:
zmq.device(zmq.QUEUE, self.clients, self.workers)
except zmq.ZMQError as exc:
if exc.errno == errno.EINTR:
continue
raise exc
except (KeyboardInterrupt, SystemExit):
break
def close(self):
'''
Cleanly shutdown the router socket
'''
if self._closing:
return
log.info('MWorkerQueue under PID %s is closing', os.getpid())
self._closing = True
# pylint: disable=E0203
if getattr(self, '_monitor', None) is not None:
self._monitor.stop()
self._monitor = None
if getattr(self, '_w_monitor', None) is not None:
self._w_monitor.stop()
self._w_monitor = None
if hasattr(self, 'clients') and self.clients.closed is False:
self.clients.close()
if hasattr(self, 'workers') and self.workers.closed is False:
self.workers.close()
if hasattr(self, 'stream'):
self.stream.close()
if hasattr(self, '_socket') and self._socket.closed is False:
self._socket.close()
if hasattr(self, 'context') and self.context.closed is False:
self.context.term()
# pylint: enable=E0203
def pre_fork(self, process_manager):
'''
Pre-fork we need to create the zmq router device
:param func process_manager: An instance of salt.utils.process.ProcessManager
'''
salt.transport.mixins.auth.AESReqServerMixin.pre_fork(self, process_manager)
process_manager.add_process(self.zmq_device)
def _start_zmq_monitor(self):
'''
Starts ZMQ monitor for debugging purposes.
:return:
'''
# Socket monitor shall be used the only for debug
# purposes so using threading doesn't look too bad here
if HAS_ZMQ_MONITOR and self.opts['zmq_monitor']:
log.debug('Starting ZMQ monitor')
import threading
self._w_monitor = ZeroMQSocketMonitor(self._socket)
threading.Thread(target=self._w_monitor.start_poll).start()
log.debug('ZMQ monitor has been started started')
def post_fork(self, payload_handler, io_loop):
'''
After forking we need to create all of the local sockets to listen to the
router
:param func payload_handler: A function to called to handle incoming payloads as
they are picked up off the wire
:param IOLoop io_loop: An instance of a Tornado IOLoop, to handle event scheduling
'''
self.payload_handler = payload_handler
self.io_loop = io_loop
self.context = zmq.Context(1)
self._socket = self.context.socket(zmq.REP)
self._start_zmq_monitor()
if self.opts.get('ipc_mode', '') == 'tcp':
self.w_uri = 'tcp://127.0.0.1:{0}'.format(
self.opts.get('tcp_master_workers', 4515)
)
else:
self.w_uri = 'ipc://{0}'.format(
os.path.join(self.opts['sock_dir'], 'workers.ipc')
)
log.info('Worker binding to socket %s', self.w_uri)
self._socket.connect(self.w_uri)
salt.transport.mixins.auth.AESReqServerMixin.post_fork(self, payload_handler, io_loop)
self.stream = zmq.eventloop.zmqstream.ZMQStream(self._socket, io_loop=self.io_loop)
self.stream.on_recv_stream(self.handle_message)
@tornado.gen.coroutine
def handle_message(self, stream, payload):
'''
Handle incoming messages from underlying TCP streams
:stream ZMQStream stream: A ZeroMQ stream.
See http://zeromq.github.io/pyzmq/api/generated/zmq.eventloop.zmqstream.html
:param dict payload: A payload to process
'''
try:
payload = self.serial.loads(payload[0])
payload = self._decode_payload(payload)
except Exception as exc:
exc_type = type(exc).__name__
if exc_type == 'AuthenticationError':
log.debug(
'Minion failed to auth to master. Since the payload is '
'encrypted, it is not known which minion failed to '
'authenticate. It is likely that this is a transient '
'failure due to the master rotating its public key.'
)
else:
log.error('Bad load from minion: %s: %s', exc_type, exc)
stream.send(self.serial.dumps('bad load'))
raise tornado.gen.Return()
# TODO helper functions to normalize payload?
if not isinstance(payload, dict) or not isinstance(payload.get('load'), dict):
log.error('payload and load must be a dict. Payload was: %s and load was %s', payload, payload.get('load'))
stream.send(self.serial.dumps('payload and load must be a dict'))
raise tornado.gen.Return()
try:
id_ = payload['load'].get('id', '')
if str('\0') in id_:
log.error('Payload contains an id with a null byte: %s', payload)
stream.send(self.serial.dumps('bad load: id contains a null byte'))
raise tornado.gen.Return()
except TypeError:
log.error('Payload contains non-string id: %s', payload)
stream.send(self.serial.dumps('bad load: id {0} is not a string'.format(id_)))
raise tornado.gen.Return()
# intercept the "_auth" commands, since the main daemon shouldn't know
# anything about our key auth
if payload['enc'] == 'clear' and payload.get('load', {}).get('cmd') == '_auth':
stream.send(self.serial.dumps(self._auth(payload['load'])))
raise tornado.gen.Return()
# TODO: test
try:
# Take the payload_handler function that was registered when we created the channel
# and call it, returning control to the caller until it completes
ret, req_opts = yield self.payload_handler(payload)
except Exception as e:
# always attempt to return an error to the minion
stream.send(self.serial.dumps('Some exception handling minion payload'))
log.error('Some exception handling a payload from minion', exc_info=True)
raise tornado.gen.Return()
req_fun = req_opts.get('fun', 'send')
if req_fun == 'send_clear':
stream.send(self.serial.dumps(ret))
elif req_fun == 'send':
stream.send(self.serial.dumps(self.crypticle.dumps(ret)))
elif req_fun == 'send_private':
stream.send(self.serial.dumps(self._encrypt_private(ret,
req_opts['key'],
req_opts['tgt'],
)))
else:
log.error('Unknown req_fun %s', req_fun)
# always attempt to return an error to the minion
stream.send(self.serial.dumps('Server-side exception handling payload'))
raise tornado.gen.Return()
def __setup_signals(self):
signal.signal(signal.SIGINT, self._handle_signals)
signal.signal(signal.SIGTERM, self._handle_signals)
def _handle_signals(self, signum, sigframe):
msg = '{0} received a '.format(self.__class__.__name__)
if signum == signal.SIGINT:
msg += 'SIGINT'
elif signum == signal.SIGTERM:
msg += 'SIGTERM'
msg += '. Exiting'
log.debug(msg)
self.close()
sys.exit(salt.defaults.exitcodes.EX_OK)
def _set_tcp_keepalive(zmq_socket, opts):
'''
Ensure that TCP keepalives are set as specified in "opts".
Warning: Failure to set TCP keepalives on the salt-master can result in
not detecting the loss of a minion when the connection is lost or when
it's host has been terminated without first closing the socket.
Salt's Presence System depends on this connection status to know if a minion
is "present".
Warning: Failure to set TCP keepalives on minions can result in frequent or
unexpected disconnects!
'''
if hasattr(zmq, 'TCP_KEEPALIVE') and opts:
if 'tcp_keepalive' in opts:
zmq_socket.setsockopt(
zmq.TCP_KEEPALIVE, opts['tcp_keepalive']
)
if 'tcp_keepalive_idle' in opts:
zmq_socket.setsockopt(
zmq.TCP_KEEPALIVE_IDLE, opts['tcp_keepalive_idle']
)
if 'tcp_keepalive_cnt' in opts:
zmq_socket.setsockopt(
zmq.TCP_KEEPALIVE_CNT, opts['tcp_keepalive_cnt']
)
if 'tcp_keepalive_intvl' in opts:
zmq_socket.setsockopt(
zmq.TCP_KEEPALIVE_INTVL, opts['tcp_keepalive_intvl']
)
class ZeroMQPubServerChannel(salt.transport.server.PubServerChannel):
'''
Encapsulate synchronous operations for a publisher channel
'''
_sock_data = threading.local()
def __init__(self, opts):
self.opts = opts
self.serial = salt.payload.Serial(self.opts) # TODO: in init?
self.ckminions = salt.utils.minions.CkMinions(self.opts)
def connect(self):
return tornado.gen.sleep(5)
def _publish_daemon(self, log_queue=None):
'''
Bind to the interface specified in the configuration file
'''
salt.utils.process.appendproctitle(self.__class__.__name__)
if log_queue:
salt.log.setup.set_multiprocessing_logging_queue(log_queue)
salt.log.setup.setup_multiprocessing_logging(log_queue)
# Set up the context
context = zmq.Context(1)
# Prepare minion publish socket
pub_sock = context.socket(zmq.PUB)
_set_tcp_keepalive(pub_sock, self.opts)
# if 2.1 >= zmq < 3.0, we only have one HWM setting
try:
pub_sock.setsockopt(zmq.HWM, self.opts.get('pub_hwm', 1000))
# in zmq >= 3.0, there are separate send and receive HWM settings
except AttributeError:
# Set the High Water Marks. For more information on HWM, see:
# http://api.zeromq.org/4-1:zmq-setsockopt
pub_sock.setsockopt(zmq.SNDHWM, self.opts.get('pub_hwm', 1000))
pub_sock.setsockopt(zmq.RCVHWM, self.opts.get('pub_hwm', 1000))
if self.opts['ipv6'] is True and hasattr(zmq, 'IPV4ONLY'):
# IPv6 sockets work for both IPv6 and IPv4 addresses
pub_sock.setsockopt(zmq.IPV4ONLY, 0)
pub_sock.setsockopt(zmq.BACKLOG, self.opts.get('zmq_backlog', 1000))
pub_sock.setsockopt(zmq.LINGER, -1)
pub_uri = 'tcp://{interface}:{publish_port}'.format(**self.opts)
# Prepare minion pull socket
pull_sock = context.socket(zmq.PULL)
pull_sock.setsockopt(zmq.LINGER, -1)
if self.opts.get('ipc_mode', '') == 'tcp':
pull_uri = 'tcp://127.0.0.1:{0}'.format(
self.opts.get('tcp_master_publish_pull', 4514)
)
else:
pull_uri = 'ipc://{0}'.format(
os.path.join(self.opts['sock_dir'], 'publish_pull.ipc')
)
salt.utils.zeromq.check_ipc_path_max_len(pull_uri)
# Start the minion command publisher
log.info('Starting the Salt Publisher on %s', pub_uri)
pub_sock.bind(pub_uri)
# Securely create socket
log.info('Starting the Salt Puller on %s', pull_uri)
with salt.utils.files.set_umask(0o177):
pull_sock.bind(pull_uri)
try:
while True:
# Catch and handle EINTR from when this process is sent
# SIGUSR1 gracefully so we don't choke and die horribly
try:
log.debug('Publish daemon getting data from puller %s', pull_uri)
package = pull_sock.recv()
log.debug('Publish daemon received payload. size=%d', len(package))
unpacked_package = salt.payload.unpackage(package)
if six.PY3:
unpacked_package = salt.transport.frame.decode_embedded_strs(unpacked_package)
payload = unpacked_package['payload']
log.trace('Accepted unpacked package from puller')
if self.opts['zmq_filtering']:
# if you have a specific topic list, use that
if 'topic_lst' in unpacked_package:
for topic in unpacked_package['topic_lst']:
log.trace('Sending filtered data over publisher %s', pub_uri)
# zmq filters are substring match, hash the topic
# to avoid collisions
htopic = salt.utils.stringutils.to_bytes(hashlib.sha1(topic).hexdigest())
pub_sock.send(htopic, flags=zmq.SNDMORE)
pub_sock.send(payload)
log.trace('Filtered data has been sent')
# Syndic broadcast
if self.opts.get('order_masters'):
log.trace('Sending filtered data to syndic')
pub_sock.send(b'syndic', flags=zmq.SNDMORE)
pub_sock.send(payload)
log.trace('Filtered data has been sent to syndic')
# otherwise its a broadcast
else:
# TODO: constants file for "broadcast"
log.trace('Sending broadcasted data over publisher %s', pub_uri)
pub_sock.send(b'broadcast', flags=zmq.SNDMORE)
pub_sock.send(payload)
log.trace('Broadcasted data has been sent')
else:
log.trace('Sending ZMQ-unfiltered data over publisher %s', pub_uri)
pub_sock.send(payload)
log.trace('Unfiltered data has been sent')
except zmq.ZMQError as exc:
if exc.errno == errno.EINTR:
continue
raise exc
except KeyboardInterrupt:
log.trace('Publish daemon caught Keyboard interupt, tearing down')
# Cleanly close the sockets if we're shutting down
if pub_sock.closed is False:
pub_sock.close()
if pull_sock.closed is False:
pull_sock.close()
if context.closed is False:
context.term()
def pre_fork(self, process_manager, kwargs=None):
'''
Do anything necessary pre-fork. Since this is on the master side this will
primarily be used to create IPC channels and create our daemon process to
do the actual publishing
:param func process_manager: A ProcessManager, from salt.utils.process.ProcessManager
'''
process_manager.add_process(self._publish_daemon, kwargs=kwargs)
@property
def pub_sock(self):
'''
This thread's zmq publisher socket. This socket is stored on the class
so that multiple instantiations in the same thread will re-use a single
zmq socket.
'''
try:
return self._sock_data.sock
except AttributeError:
pass
def pub_connect(self):
'''
Create and connect this thread's zmq socket. If a publisher socket
already exists "pub_close" is called before creating and connecting a
new socket.
'''
if self.pub_sock:
self.pub_close()
ctx = zmq.Context.instance()
self._sock_data.sock = ctx.socket(zmq.PUSH)
self.pub_sock.setsockopt(zmq.LINGER, -1)
if self.opts.get('ipc_mode', '') == 'tcp':
pull_uri = 'tcp://127.0.0.1:{0}'.format(
self.opts.get('tcp_master_publish_pull', 4514)
)
else:
pull_uri = 'ipc://{0}'.format(
os.path.join(self.opts['sock_dir'], 'publish_pull.ipc')
)
log.debug("Connecting to pub server: %s", pull_uri)
self.pub_sock.connect(pull_uri)
return self._sock_data.sock
def pub_close(self):
'''
Disconnect an existing publisher socket and remove it from the local
thread's cache.
'''
if hasattr(self._sock_data, 'sock'):
self._sock_data.sock.close()
delattr(self._sock_data, 'sock')
def publish(self, load):
'''
Publish "load" to minions. This send the load to the publisher daemon
process with does the actual sending to minions.
:param dict load: A load to be sent across the wire to minions
'''
payload = {'enc': 'aes'}
crypticle = salt.crypt.Crypticle(self.opts, salt.master.SMaster.secrets['aes']['secret'].value)
payload['load'] = crypticle.dumps(load)
if self.opts['sign_pub_messages']:
master_pem_path = os.path.join(self.opts['pki_dir'], 'master.pem')
log.debug("Signing data packet")
payload['sig'] = salt.crypt.sign_message(master_pem_path, payload['load'])
int_payload = {'payload': self.serial.dumps(payload)}
# add some targeting stuff for lists only (for now)
if load['tgt_type'] == 'list':
int_payload['topic_lst'] = load['tgt']
# If zmq_filtering is enabled, target matching has to happen master side
match_targets = ["pcre", "glob", "list"]
if self.opts['zmq_filtering'] and load['tgt_type'] in match_targets:
# Fetch a list of minions that match
_res = self.ckminions.check_minions(load['tgt'],
tgt_type=load['tgt_type'])
match_ids = _res['minions']
log.debug("Publish Side Match: %s", match_ids)
# Send list of miions thru so zmq can target them
int_payload['topic_lst'] = match_ids
payload = self.serial.dumps(int_payload)
log.debug(
'Sending payload to publish daemon. jid=%s size=%d',
load.get('jid', None), len(payload),
)
if not self.pub_sock:
self.pub_connect()
self.pub_sock.send(payload)
log.debug('Sent payload to publish daemon.')
class AsyncReqMessageClientPool(salt.transport.MessageClientPool):
'''
Wrapper class of AsyncReqMessageClientPool to avoid blocking waiting while writing data to socket.
'''
def __init__(self, opts, args=None, kwargs=None):
super(AsyncReqMessageClientPool, self).__init__(AsyncReqMessageClient, opts, args=args, kwargs=kwargs)
self._closing = False
def close(self):
if self._closing:
return
self._closing = True
for message_client in self.message_clients:
message_client.close()
self.message_clients = []
def send(self, *args, **kwargs):
message_clients = sorted(self.message_clients, key=lambda x: len(x.send_queue))
return message_clients[0].send(*args, **kwargs)
def destroy(self):
# Bacwards compat
salt.utils.versions.warn_until(
'Sodium',
'Calling {0}.destroy() is deprecated. Please call {0}.close() instead.'.format(
self.__class__.__name__
),
stacklevel=3
)
self.close()
def __del__(self):
self.close()
# TODO: unit tests!
class AsyncReqMessageClient(object):
'''
This class wraps the underlying zeromq REQ socket and gives a future-based
interface to sending and recieving messages. This works around the primary
limitation of serialized send/recv on the underlying socket by queueing the
message sends in this class. In the future if we decide to attempt to multiplex
we can manage a pool of REQ/REP sockets-- but for now we'll just do them in serial
'''
def __init__(self, opts, addr, linger=0, io_loop=None):
'''
Create an asynchronous message client
:param dict opts: The salt opts dictionary
:param str addr: The interface IP address to bind to
:param int linger: The number of seconds to linger on a ZMQ socket. See
http://api.zeromq.org/2-1:zmq-setsockopt [ZMQ_LINGER]
:param IOLoop io_loop: A Tornado IOLoop event scheduler [tornado.ioloop.IOLoop]
'''
self.opts = opts
self.addr = addr
self.linger = linger
if io_loop is None:
install_zmq()
ZMQDefaultLoop.current()
else:
self.io_loop = io_loop
self.serial = salt.payload.Serial(self.opts)
self.context = zmq.Context()
# wire up sockets
self._init_socket()
self.send_queue = []
# mapping of message -> future
self.send_future_map = {}
self.send_timeout_map = {} # message -> timeout
self._closing = False
# TODO: timeout all in-flight sessions, or error
def close(self):
if self._closing:
return
self._closing = True
if hasattr(self, 'stream') and self.stream is not None:
if ZMQ_VERSION_INFO < (14, 3, 0):
# stream.close() doesn't work properly on pyzmq < 14.3.0
if self.stream.socket:
self.stream.socket.close()
self.stream.io_loop.remove_handler(self.stream.socket)
# set this to None, more hacks for messed up pyzmq
self.stream.socket = None
self.socket.close()
else:
self.stream.close()
self.socket = None
self.stream = None
if self.context.closed is False:
self.context.term()
def destroy(self):
# Bacwards compat
salt.utils.versions.warn_until(
'Sodium',
'Calling {0}.destroy() is deprecated. Please call {0}.close() instead.'.format(
self.__class__.__name__
),
stacklevel=3
)
self.close()
def __del__(self):
self.close()
def _init_socket(self):
if hasattr(self, 'stream'):
self.stream.close() # pylint: disable=E0203
self.socket.close() # pylint: disable=E0203
del self.stream
del self.socket
self.socket = self.context.socket(zmq.REQ)
# socket options
if hasattr(zmq, 'RECONNECT_IVL_MAX'):
self.socket.setsockopt(
zmq.RECONNECT_IVL_MAX, 5000
)
_set_tcp_keepalive(self.socket, self.opts)
if self.addr.startswith('tcp://['):
# Hint PF type if bracket enclosed IPv6 address
if hasattr(zmq, 'IPV6'):
self.socket.setsockopt(zmq.IPV6, 1)
elif hasattr(zmq, 'IPV4ONLY'):
self.socket.setsockopt(zmq.IPV4ONLY, 0)
self.socket.linger = self.linger
log.debug('Trying to connect to: %s', self.addr)
self.socket.connect(self.addr)
self.stream = zmq.eventloop.zmqstream.ZMQStream(self.socket, io_loop=self.io_loop)
@tornado.gen.coroutine
def _internal_send_recv(self):
while self.send_queue:
message = self.send_queue[0]
future = self.send_future_map.get(message, None)
if future is None:
# Timedout
del self.send_queue[0]
continue
# send
def mark_future(msg):
if not future.done():
data = self.serial.loads(msg[0])
future.set_result(data)
self.stream.on_recv(mark_future)
self.stream.send(message)
try:
ret = yield future
except Exception as err: # pylint: disable=W0702
log.debug('Re-init ZMQ socket: %s', err)
self._init_socket() # re-init the zmq socket (no other way in zmq)
del self.send_queue[0]
continue
del self.send_queue[0]
self.send_future_map.pop(message, None)
self.remove_message_timeout(message)
def remove_message_timeout(self, message):
if message not in self.send_timeout_map:
return
timeout = self.send_timeout_map.pop(message, None)
if timeout is not None:
# Hasn't been already timedout
self.io_loop.remove_timeout(timeout)
def timeout_message(self, message):
'''
Handle a message timeout by removing it from the sending queue
and informing the caller
:raises: SaltReqTimeoutError
'''
future = self.send_future_map.pop(message, None)
# In a race condition the message might have been sent by the time
# we're timing it out. Make sure the future is not None
if future is not None:
del self.send_timeout_map[message]
if future.attempts < future.tries:
future.attempts += 1
log.debug('SaltReqTimeoutError, retrying. (%s/%s)', future.attempts, future.tries)
self.send(
message,
timeout=future.timeout,
tries=future.tries,
future=future,
)
else:
future.set_exception(SaltReqTimeoutError('Message timed out'))
def send(self, message, timeout=None, tries=3, future=None, callback=None, raw=False):
'''
Return a future which will be completed when the message has a response
'''
if future is None:
future = tornado.concurrent.Future()
future.tries = tries
future.attempts = 0
future.timeout = timeout
# if a future wasn't passed in, we need to serialize the message
message = self.serial.dumps(message)
if callback is not None:
def handle_future(future):
response = future.result()
self.io_loop.add_callback(callback, response)
future.add_done_callback(handle_future)
# Add this future to the mapping
self.send_future_map[message] = future
if self.opts.get('detect_mode') is True:
timeout = 1
if timeout is not None:
send_timeout = self.io_loop.call_later(timeout, self.timeout_message, message)
self.send_timeout_map[message] = send_timeout
if not self.send_queue:
self.io_loop.spawn_callback(self._internal_send_recv)
self.send_queue.append(message)
return future
class ZeroMQSocketMonitor(object):
__EVENT_MAP = None
def __init__(self, socket):
'''
Create ZMQ monitor sockets
More information:
http://api.zeromq.org/4-0:zmq-socket-monitor
'''
self._socket = socket
self._monitor_socket = self._socket.get_monitor_socket()
self._monitor_stream = None
def start_io_loop(self, io_loop):
log.trace("Event monitor start!")
self._monitor_stream = zmq.eventloop.zmqstream.ZMQStream(self._monitor_socket, io_loop=io_loop)
self._monitor_stream.on_recv(self.monitor_callback)
def start_poll(self):
log.trace("Event monitor start!")
try:
while self._monitor_socket is not None and self._monitor_socket.poll():
msg = self._monitor_socket.recv_multipart()
self.monitor_callback(msg)
except (AttributeError, zmq.error.ContextTerminated):
# We cannot log here because we'll get an interrupted system call in trying
# to flush the logging buffer as we terminate
pass
@property
def event_map(self):
if ZeroMQSocketMonitor.__EVENT_MAP is None:
event_map = {}
for name in dir(zmq):
if name.startswith('EVENT_'):
value = getattr(zmq, name)
event_map[value] = name
ZeroMQSocketMonitor.__EVENT_MAP = event_map
return ZeroMQSocketMonitor.__EVENT_MAP
def monitor_callback(self, msg):
evt = zmq.utils.monitor.parse_monitor_message(msg)
evt['description'] = self.event_map[evt['event']]
log.debug("ZeroMQ event: %s", evt)
if evt['event'] == zmq.EVENT_MONITOR_STOPPED:
self.stop()
def stop(self):
if self._socket is None:
return
self._socket.disable_monitor()
self._socket = None
self._monitor_socket = None
if self._monitor_stream is not None:
self._monitor_stream.close()
self._monitor_stream = None
log.trace("Event monitor done!")
| 39.613957 | 119 | 0.589599 |
08a3e3fcc68495d87b83e9c80602463438c42fa3
| 6,332 |
py
|
Python
|
tests/providers/google/cloud/operators/test_mssql_to_gcs.py
|
ianburrell/airflow
|
3de68501b7a76dce24bfd8a8b4659eedcf7ac29c
|
[
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 3 |
2015-08-25T13:56:44.000Z
|
2020-03-21T10:26:58.000Z
|
tests/providers/google/cloud/operators/test_mssql_to_gcs.py
|
ianburrell/airflow
|
3de68501b7a76dce24bfd8a8b4659eedcf7ac29c
|
[
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 4 |
2016-04-15T12:31:24.000Z
|
2020-03-03T12:56:08.000Z
|
tests/providers/google/cloud/operators/test_mssql_to_gcs.py
|
santecapital/airflow
|
7f02e56c9cb8b548624d13e9c2c2b89d753f996b
|
[
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 2 |
2018-07-24T08:54:45.000Z
|
2018-08-31T13:41:50.000Z
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import unittest
import mock
from airflow import PY38
if not PY38:
from airflow.providers.google.cloud.operators.mssql_to_gcs import MSSQLToGCSOperator
TASK_ID = 'test-mssql-to-gcs'
MSSQL_CONN_ID = 'mssql_conn_test'
SQL = 'select 1'
BUCKET = 'gs://test'
JSON_FILENAME = 'test_{}.ndjson'
GZIP = False
ROWS = [
('mock_row_content_1', 42),
('mock_row_content_2', 43),
('mock_row_content_3', 44)
]
CURSOR_DESCRIPTION = (
('some_str', 0, None, None, None, None, None),
('some_num', 3, None, None, None, None, None)
)
NDJSON_LINES = [
b'{"some_num": 42, "some_str": "mock_row_content_1"}\n',
b'{"some_num": 43, "some_str": "mock_row_content_2"}\n',
b'{"some_num": 44, "some_str": "mock_row_content_3"}\n'
]
SCHEMA_FILENAME = 'schema_test.json'
SCHEMA_JSON = [
b'[{"mode": "NULLABLE", "name": "some_str", "type": "STRING"}, ',
b'{"mode": "NULLABLE", "name": "some_num", "type": "INTEGER"}]'
]
@unittest.skipIf(PY38, "Mssql package not avaible when Python >= 3.8.")
class TestMsSqlToGoogleCloudStorageOperator(unittest.TestCase):
def test_init(self):
"""Test MySqlToGoogleCloudStorageOperator instance is properly initialized."""
op = MSSQLToGCSOperator(
task_id=TASK_ID, sql=SQL, bucket=BUCKET, filename=JSON_FILENAME)
self.assertEqual(op.task_id, TASK_ID)
self.assertEqual(op.sql, SQL)
self.assertEqual(op.bucket, BUCKET)
self.assertEqual(op.filename, JSON_FILENAME)
@mock.patch('airflow.providers.google.cloud.operators.mssql_to_gcs.MsSqlHook')
@mock.patch('airflow.providers.google.cloud.operators.sql_to_gcs.GCSHook')
def test_exec_success_json(self, gcs_hook_mock_class, mssql_hook_mock_class):
"""Test successful run of execute function for JSON"""
op = MSSQLToGCSOperator(
task_id=TASK_ID,
mssql_conn_id=MSSQL_CONN_ID,
sql=SQL,
bucket=BUCKET,
filename=JSON_FILENAME)
mssql_hook_mock = mssql_hook_mock_class.return_value
mssql_hook_mock.get_conn().cursor().__iter__.return_value = iter(ROWS)
mssql_hook_mock.get_conn().cursor().description = CURSOR_DESCRIPTION
gcs_hook_mock = gcs_hook_mock_class.return_value
def _assert_upload(bucket, obj, tmp_filename, mime_type=None, gzip=False):
self.assertEqual(BUCKET, bucket)
self.assertEqual(JSON_FILENAME.format(0), obj)
self.assertEqual('application/json', mime_type)
self.assertEqual(GZIP, gzip)
with open(tmp_filename, 'rb') as file:
self.assertEqual(b''.join(NDJSON_LINES), file.read())
gcs_hook_mock.upload.side_effect = _assert_upload
op.execute(None)
mssql_hook_mock_class.assert_called_once_with(mssql_conn_id=MSSQL_CONN_ID)
mssql_hook_mock.get_conn().cursor().execute.assert_called_once_with(SQL)
@mock.patch('airflow.providers.google.cloud.operators.mssql_to_gcs.MsSqlHook')
@mock.patch('airflow.providers.google.cloud.operators.sql_to_gcs.GCSHook')
def test_file_splitting(self, gcs_hook_mock_class, mssql_hook_mock_class):
"""Test that ndjson is split by approx_max_file_size_bytes param."""
mssql_hook_mock = mssql_hook_mock_class.return_value
mssql_hook_mock.get_conn().cursor().__iter__.return_value = iter(ROWS)
mssql_hook_mock.get_conn().cursor().description = CURSOR_DESCRIPTION
gcs_hook_mock = gcs_hook_mock_class.return_value
expected_upload = {
JSON_FILENAME.format(0): b''.join(NDJSON_LINES[:2]),
JSON_FILENAME.format(1): NDJSON_LINES[2],
}
def _assert_upload(bucket, obj, tmp_filename, mime_type=None, gzip=False):
self.assertEqual(BUCKET, bucket)
self.assertEqual('application/json', mime_type)
self.assertEqual(GZIP, gzip)
with open(tmp_filename, 'rb') as file:
self.assertEqual(expected_upload[obj], file.read())
gcs_hook_mock.upload.side_effect = _assert_upload
op = MSSQLToGCSOperator(
task_id=TASK_ID,
sql=SQL,
bucket=BUCKET,
filename=JSON_FILENAME,
approx_max_file_size_bytes=len(expected_upload[JSON_FILENAME.format(0)]))
op.execute(None)
@mock.patch('airflow.providers.google.cloud.operators.mssql_to_gcs.MsSqlHook')
@mock.patch('airflow.providers.google.cloud.operators.sql_to_gcs.GCSHook')
def test_schema_file(self, gcs_hook_mock_class, mssql_hook_mock_class):
"""Test writing schema files."""
mssql_hook_mock = mssql_hook_mock_class.return_value
mssql_hook_mock.get_conn().cursor().__iter__.return_value = iter(ROWS)
mssql_hook_mock.get_conn().cursor().description = CURSOR_DESCRIPTION
gcs_hook_mock = gcs_hook_mock_class.return_value
def _assert_upload(bucket, obj, tmp_filename, mime_type, gzip): # pylint: disable=unused-argument
if obj == SCHEMA_FILENAME:
with open(tmp_filename, 'rb') as file:
self.assertEqual(b''.join(SCHEMA_JSON), file.read())
gcs_hook_mock.upload.side_effect = _assert_upload
op = MSSQLToGCSOperator(
task_id=TASK_ID,
sql=SQL,
bucket=BUCKET,
filename=JSON_FILENAME,
schema_filename=SCHEMA_FILENAME)
op.execute(None)
# once for the file and once for the schema
self.assertEqual(2, gcs_hook_mock.upload.call_count)
| 40.075949 | 106 | 0.689987 |
54bf5fced659879304e12f414d5838f48827cac0
| 1,087 |
py
|
Python
|
proteus/tests/ci/ladr_2d_n.py
|
burgreen/proteus
|
033bbd3fd0ff11d53d8e85b2da1af49e10af9c5d
|
[
"MIT"
] | null | null | null |
proteus/tests/ci/ladr_2d_n.py
|
burgreen/proteus
|
033bbd3fd0ff11d53d8e85b2da1af49e10af9c5d
|
[
"MIT"
] | null | null | null |
proteus/tests/ci/ladr_2d_n.py
|
burgreen/proteus
|
033bbd3fd0ff11d53d8e85b2da1af49e10af9c5d
|
[
"MIT"
] | null | null | null |
from __future__ import absolute_import
from __future__ import division
from builtins import range
from past.utils import old_div
from proteus import *
from proteus.default_n import *
try:
from .ladr_2d_p import *
except:
from ladr_2d_p import *
timeIntegration = BackwardEuler_cfl
stepController = Min_dt_cfl_controller
runCFL=1.0
femSpaces = {0:C0_AffineLinearOnSimplexWithNodalBasis}
elementQuadrature = SimplexGaussQuadrature(nd,3)
elementBoundaryQuadrature = SimplexGaussQuadrature(nd-1,3)
subgridError = AdvectionDiffusionReaction_ASGS(coefficients,nd,lag=False)
shockCapturing = ResGradQuad_SC(coefficients,nd,
shockCapturingFactor=0.99,
lag=True)
numericalFluxType = Advection_DiagonalUpwind_Diffusion_SIPG_exterior
nnx=41; nny=41
tnList=[old_div(float(i),40.0) for i in range(11)]
matrix = SparseMatrix
multilevelLinearSolver = LU
linearSmoother = None
l_atol_res = 1.0e-8
parallelPartitioningType = MeshParallelPartitioningTypes.node
nLayersOfOverlapForParallel = 1
conservativeFlux = None
cfluxtag = None
| 32.939394 | 73 | 0.787489 |
63243699312f37df108d52d3782b6d37c0182123
| 102 |
py
|
Python
|
run.py
|
KennyChenFight/AIBox-Server
|
e2bb4a9afff1db7596741c4084889769510eb27b
|
[
"MIT"
] | 1 |
2018-10-18T14:48:09.000Z
|
2018-10-18T14:48:09.000Z
|
run.py
|
KennyChenFight/AIBox-Server
|
e2bb4a9afff1db7596741c4084889769510eb27b
|
[
"MIT"
] | null | null | null |
run.py
|
KennyChenFight/AIBox-Server
|
e2bb4a9afff1db7596741c4084889769510eb27b
|
[
"MIT"
] | 1 |
2018-09-22T08:18:14.000Z
|
2018-09-22T08:18:14.000Z
|
from app import app
if __name__ == '__main__':
app.run(host='192.168.100.105')
#app.run()
| 20.4 | 36 | 0.607843 |
6a95cf9f155a8e4a83e4fe68dba2488c5d3adfe1
| 395 |
py
|
Python
|
secret.py
|
Xuechunqiu/cgi-lab
|
e7d2325ad1b549296b99254c13fa24e260dc3c4f
|
[
"Apache-2.0"
] | null | null | null |
secret.py
|
Xuechunqiu/cgi-lab
|
e7d2325ad1b549296b99254c13fa24e260dc3c4f
|
[
"Apache-2.0"
] | null | null | null |
secret.py
|
Xuechunqiu/cgi-lab
|
e7d2325ad1b549296b99254c13fa24e260dc3c4f
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: UTF-8 -*-
import cgi
import cgitb
cgitb.enable()
class FollowingTheTAsInstructionsError(Exception):
def __init__(self):
Exception.__init__(self, (
"You must edit secret.py to change the username, password, "
"and to delete this error!"
))
# Edit the following two lines:
username = "admin"
password = "123456"
| 20.789474 | 72 | 0.648101 |
e5f2eaf9bd3cea737b9c1c6720cdb2166f3bdf10
| 18,301 |
py
|
Python
|
numpy/core/tests/test_half.py
|
mspacek/numpy
|
645b9f572f0a22e9049fd736b8b91427be2c8402
|
[
"BSD-3-Clause"
] | null | null | null |
numpy/core/tests/test_half.py
|
mspacek/numpy
|
645b9f572f0a22e9049fd736b8b91427be2c8402
|
[
"BSD-3-Clause"
] | null | null | null |
numpy/core/tests/test_half.py
|
mspacek/numpy
|
645b9f572f0a22e9049fd736b8b91427be2c8402
|
[
"BSD-3-Clause"
] | null | null | null |
from __future__ import division, absolute_import, print_function
import platform
import numpy as np
from numpy import uint16, float16, float32, float64
from numpy.testing import TestCase, run_module_suite, assert_, assert_equal, \
dec
def assert_raises_fpe(strmatch, callable, *args, **kwargs):
try:
callable(*args, **kwargs)
except FloatingPointError as exc:
assert_(str(exc).find(strmatch) >= 0,
"Did not raise floating point %s error" % strmatch)
else:
assert_(False,
"Did not raise floating point %s error" % strmatch)
class TestHalf(TestCase):
def setUp(self):
# An array of all possible float16 values
self.all_f16 = np.arange(0x10000, dtype=uint16)
self.all_f16.dtype = float16
self.all_f32 = np.array(self.all_f16, dtype=float32)
self.all_f64 = np.array(self.all_f16, dtype=float64)
# An array of all non-NaN float16 values, in sorted order
self.nonan_f16 = np.concatenate(
(np.arange(0xfc00,0x7fff,-1, dtype=uint16),
np.arange(0x0000,0x7c01,1, dtype=uint16))
)
self.nonan_f16.dtype = float16
self.nonan_f32 = np.array(self.nonan_f16, dtype=float32)
self.nonan_f64 = np.array(self.nonan_f16, dtype=float64)
# An array of all finite float16 values, in sorted order
self.finite_f16 = self.nonan_f16[1:-1]
self.finite_f32 = self.nonan_f32[1:-1]
self.finite_f64 = self.nonan_f64[1:-1]
def test_half_conversions(self):
"""Checks that all 16-bit values survive conversion
to/from 32-bit and 64-bit float"""
# Because the underlying routines preserve the NaN bits, every
# value is preserved when converting to/from other floats.
# Convert from float32 back to float16
b = np.array(self.all_f32, dtype=float16)
assert_equal(self.all_f16.view(dtype=uint16),
b.view(dtype=uint16))
# Convert from float64 back to float16
b = np.array(self.all_f64, dtype=float16)
assert_equal(self.all_f16.view(dtype=uint16),
b.view(dtype=uint16))
# Convert float16 to longdouble and back
# This doesn't necessarily preserve the extra NaN bits,
# so exclude NaNs.
a_ld = np.array(self.nonan_f16, dtype=np.longdouble)
b = np.array(a_ld, dtype=float16)
assert_equal(self.nonan_f16.view(dtype=uint16),
b.view(dtype=uint16))
# Check the range for which all integers can be represented
i_int = np.arange(-2048,2049)
i_f16 = np.array(i_int, dtype=float16)
j = np.array(i_f16, dtype=np.int)
assert_equal(i_int,j)
def test_nans_infs(self):
with np.errstate(all='ignore'):
# Check some of the ufuncs
assert_equal(np.isnan(self.all_f16), np.isnan(self.all_f32))
assert_equal(np.isinf(self.all_f16), np.isinf(self.all_f32))
assert_equal(np.isfinite(self.all_f16), np.isfinite(self.all_f32))
assert_equal(np.signbit(self.all_f16), np.signbit(self.all_f32))
assert_equal(np.spacing(float16(65504)), np.inf)
# Check comparisons of all values with NaN
nan = float16(np.nan)
assert_(not (self.all_f16 == nan).any())
assert_(not (nan == self.all_f16).any())
assert_((self.all_f16 != nan).all())
assert_((nan != self.all_f16).all())
assert_(not (self.all_f16 < nan).any())
assert_(not (nan < self.all_f16).any())
assert_(not (self.all_f16 <= nan).any())
assert_(not (nan <= self.all_f16).any())
assert_(not (self.all_f16 > nan).any())
assert_(not (nan > self.all_f16).any())
assert_(not (self.all_f16 >= nan).any())
assert_(not (nan >= self.all_f16).any())
def test_half_values(self):
"""Confirms a small number of known half values"""
a = np.array([1.0, -1.0,
2.0, -2.0,
0.0999755859375, 0.333251953125, # 1/10, 1/3
65504, -65504, # Maximum magnitude
2.0**(-14), -2.0**(-14), # Minimum normal
2.0**(-24), -2.0**(-24), # Minimum subnormal
0, -1/1e1000, # Signed zeros
np.inf, -np.inf])
b = np.array([0x3c00, 0xbc00,
0x4000, 0xc000,
0x2e66, 0x3555,
0x7bff, 0xfbff,
0x0400, 0x8400,
0x0001, 0x8001,
0x0000, 0x8000,
0x7c00, 0xfc00], dtype=uint16)
b.dtype = float16
assert_equal(a, b)
def test_half_rounding(self):
"""Checks that rounding when converting to half is correct"""
a = np.array([2.0**-25 + 2.0**-35, # Rounds to minimum subnormal
2.0**-25, # Underflows to zero (nearest even mode)
2.0**-26, # Underflows to zero
1.0+2.0**-11 + 2.0**-16, # rounds to 1.0+2**(-10)
1.0+2.0**-11, # rounds to 1.0 (nearest even mode)
1.0+2.0**-12, # rounds to 1.0
65519, # rounds to 65504
65520], # rounds to inf
dtype=float64)
rounded = [2.0**-24,
0.0,
0.0,
1.0+2.0**(-10),
1.0,
1.0,
65504,
np.inf]
# Check float64->float16 rounding
b = np.array(a, dtype=float16)
assert_equal(b, rounded)
# Check float32->float16 rounding
a = np.array(a, dtype=float32)
b = np.array(a, dtype=float16)
assert_equal(b, rounded)
def test_half_correctness(self):
"""Take every finite float16, and check the casting functions with
a manual conversion."""
# Create an array of all finite float16s
a_f16 = self.finite_f16
a_bits = a_f16.view(dtype=uint16)
# Convert to 64-bit float manually
a_sgn = (-1.0)**((a_bits&0x8000) >> 15)
a_exp = np.array((a_bits&0x7c00) >> 10, dtype=np.int32) - 15
a_man = (a_bits&0x03ff) * 2.0**(-10)
# Implicit bit of normalized floats
a_man[a_exp!=-15] += 1
# Denormalized exponent is -14
a_exp[a_exp==-15] = -14
a_manual = a_sgn * a_man * 2.0**a_exp
a32_fail = np.nonzero(self.finite_f32 != a_manual)[0]
if len(a32_fail) != 0:
bad_index = a32_fail[0]
assert_equal(self.finite_f32, a_manual,
"First non-equal is half value %x -> %g != %g" %
(a[bad_index],
self.finite_f32[bad_index],
a_manual[bad_index]))
a64_fail = np.nonzero(self.finite_f64 != a_manual)[0]
if len(a64_fail) != 0:
bad_index = a64_fail[0]
assert_equal(self.finite_f64, a_manual,
"First non-equal is half value %x -> %g != %g" %
(a[bad_index],
self.finite_f64[bad_index],
a_manual[bad_index]))
def test_half_ordering(self):
"""Make sure comparisons are working right"""
# All non-NaN float16 values in reverse order
a = self.nonan_f16[::-1].copy()
# 32-bit float copy
b = np.array(a, dtype=float32)
# Should sort the same
a.sort()
b.sort()
assert_equal(a, b)
# Comparisons should work
assert_((a[:-1] <= a[1:]).all())
assert_(not (a[:-1] > a[1:]).any())
assert_((a[1:] >= a[:-1]).all())
assert_(not (a[1:] < a[:-1]).any())
# All != except for +/-0
assert_equal(np.nonzero(a[:-1] < a[1:])[0].size, a.size-2)
assert_equal(np.nonzero(a[1:] > a[:-1])[0].size, a.size-2)
def test_half_funcs(self):
"""Test the various ArrFuncs"""
# fill
assert_equal(np.arange(10, dtype=float16),
np.arange(10, dtype=float32))
# fillwithscalar
a = np.zeros((5,), dtype=float16)
a.fill(1)
assert_equal(a, np.ones((5,), dtype=float16))
# nonzero and copyswap
a = np.array([0,0,-1,-1/1e20,0,2.0**-24, 7.629e-6], dtype=float16)
assert_equal(a.nonzero()[0],
[2,5,6])
a = a.byteswap().newbyteorder()
assert_equal(a.nonzero()[0],
[2,5,6])
# dot
a = np.arange(0, 10, 0.5, dtype=float16)
b = np.ones((20,), dtype=float16)
assert_equal(np.dot(a,b),
95)
# argmax
a = np.array([0, -np.inf, -2, 0.5, 12.55, 7.3, 2.1, 12.4], dtype=float16)
assert_equal(a.argmax(),
4)
a = np.array([0, -np.inf, -2, np.inf, 12.55, np.nan, 2.1, 12.4], dtype=float16)
assert_equal(a.argmax(),
5)
# getitem
a = np.arange(10, dtype=float16)
for i in range(10):
assert_equal(a.item(i),i)
def test_spacing_nextafter(self):
"""Test np.spacing and np.nextafter"""
# All non-negative finite #'s
a = np.arange(0x7c00, dtype=uint16)
hinf = np.array((np.inf,), dtype=float16)
a_f16 = a.view(dtype=float16)
assert_equal(np.spacing(a_f16[:-1]), a_f16[1:]-a_f16[:-1])
assert_equal(np.nextafter(a_f16[:-1], hinf), a_f16[1:])
assert_equal(np.nextafter(a_f16[0], -hinf), -a_f16[1])
assert_equal(np.nextafter(a_f16[1:], -hinf), a_f16[:-1])
# switch to negatives
a |= 0x8000
assert_equal(np.spacing(a_f16[0]), np.spacing(a_f16[1]))
assert_equal(np.spacing(a_f16[1:]), a_f16[:-1]-a_f16[1:])
assert_equal(np.nextafter(a_f16[0], hinf), -a_f16[1])
assert_equal(np.nextafter(a_f16[1:], hinf), a_f16[:-1])
assert_equal(np.nextafter(a_f16[:-1], -hinf), a_f16[1:])
def test_half_ufuncs(self):
"""Test the various ufuncs"""
a = np.array([0,1,2,4,2], dtype=float16)
b = np.array([-2,5,1,4,3], dtype=float16)
c = np.array([0,-1,-np.inf,np.nan,6], dtype=float16)
assert_equal(np.add(a,b), [-2,6,3,8,5])
assert_equal(np.subtract(a,b), [2,-4,1,0,-1])
assert_equal(np.multiply(a,b), [0,5,2,16,6])
assert_equal(np.divide(a,b), [0,0.199951171875,2,1,0.66650390625])
assert_equal(np.equal(a,b), [False,False,False,True,False])
assert_equal(np.not_equal(a,b), [True,True,True,False,True])
assert_equal(np.less(a,b), [False,True,False,False,True])
assert_equal(np.less_equal(a,b), [False,True,False,True,True])
assert_equal(np.greater(a,b), [True,False,True,False,False])
assert_equal(np.greater_equal(a,b), [True,False,True,True,False])
assert_equal(np.logical_and(a,b), [False,True,True,True,True])
assert_equal(np.logical_or(a,b), [True,True,True,True,True])
assert_equal(np.logical_xor(a,b), [True,False,False,False,False])
assert_equal(np.logical_not(a), [True,False,False,False,False])
assert_equal(np.isnan(c), [False,False,False,True,False])
assert_equal(np.isinf(c), [False,False,True,False,False])
assert_equal(np.isfinite(c), [True,True,False,False,True])
assert_equal(np.signbit(b), [True,False,False,False,False])
assert_equal(np.copysign(b,a), [2,5,1,4,3])
assert_equal(np.maximum(a,b), [0,5,2,4,3])
x = np.maximum(b,c)
assert_(np.isnan(x[3]))
x[3] = 0
assert_equal(x, [0,5,1,0,6])
assert_equal(np.minimum(a,b), [-2,1,1,4,2])
x = np.minimum(b,c)
assert_(np.isnan(x[3]))
x[3] = 0
assert_equal(x, [-2,-1,-np.inf,0,3])
assert_equal(np.fmax(a,b), [0,5,2,4,3])
assert_equal(np.fmax(b,c), [0,5,1,4,6])
assert_equal(np.fmin(a,b), [-2,1,1,4,2])
assert_equal(np.fmin(b,c), [-2,-1,-np.inf,4,3])
assert_equal(np.floor_divide(a,b), [0,0,2,1,0])
assert_equal(np.remainder(a,b), [0,1,0,0,2])
assert_equal(np.square(b), [4,25,1,16,9])
assert_equal(np.reciprocal(b), [-0.5,0.199951171875,1,0.25,0.333251953125])
assert_equal(np.ones_like(b), [1,1,1,1,1])
assert_equal(np.conjugate(b), b)
assert_equal(np.absolute(b), [2,5,1,4,3])
assert_equal(np.negative(b), [2,-5,-1,-4,-3])
assert_equal(np.sign(b), [-1,1,1,1,1])
assert_equal(np.modf(b), ([0,0,0,0,0],b))
assert_equal(np.frexp(b), ([-0.5,0.625,0.5,0.5,0.75],[2,3,1,3,2]))
assert_equal(np.ldexp(b,[0,1,2,4,2]), [-2,10,4,64,12])
def test_half_coercion(self):
"""Test that half gets coerced properly with the other types"""
a16 = np.array((1,),dtype=float16)
a32 = np.array((1,),dtype=float32)
b16 = float16(1)
b32 = float32(1)
assert_equal(np.power(a16,2).dtype, float16)
assert_equal(np.power(a16,2.0).dtype, float16)
assert_equal(np.power(a16,b16).dtype, float16)
assert_equal(np.power(a16,b32).dtype, float16)
assert_equal(np.power(a16,a16).dtype, float16)
assert_equal(np.power(a16,a32).dtype, float32)
assert_equal(np.power(b16,2).dtype, float64)
assert_equal(np.power(b16,2.0).dtype, float64)
assert_equal(np.power(b16,b16).dtype, float16)
assert_equal(np.power(b16,b32).dtype, float32)
assert_equal(np.power(b16,a16).dtype, float16)
assert_equal(np.power(b16,a32).dtype, float32)
assert_equal(np.power(a32,a16).dtype, float32)
assert_equal(np.power(a32,b16).dtype, float32)
assert_equal(np.power(b32,a16).dtype, float16)
assert_equal(np.power(b32,b16).dtype, float32)
@dec.skipif(platform.machine() == "armv5tel", "See gh-413.")
def test_half_fpe(self):
with np.errstate(all='raise'):
sx16 = np.array((1e-4,),dtype=float16)
bx16 = np.array((1e4,),dtype=float16)
sy16 = float16(1e-4)
by16 = float16(1e4)
# Underflow errors
assert_raises_fpe('underflow', lambda a,b:a*b, sx16, sx16)
assert_raises_fpe('underflow', lambda a,b:a*b, sx16, sy16)
assert_raises_fpe('underflow', lambda a,b:a*b, sy16, sx16)
assert_raises_fpe('underflow', lambda a,b:a*b, sy16, sy16)
assert_raises_fpe('underflow', lambda a,b:a/b, sx16, bx16)
assert_raises_fpe('underflow', lambda a,b:a/b, sx16, by16)
assert_raises_fpe('underflow', lambda a,b:a/b, sy16, bx16)
assert_raises_fpe('underflow', lambda a,b:a/b, sy16, by16)
assert_raises_fpe('underflow', lambda a,b:a/b,
float16(2.**-14), float16(2**11))
assert_raises_fpe('underflow', lambda a,b:a/b,
float16(-2.**-14), float16(2**11))
assert_raises_fpe('underflow', lambda a,b:a/b,
float16(2.**-14+2**-24), float16(2))
assert_raises_fpe('underflow', lambda a,b:a/b,
float16(-2.**-14-2**-24), float16(2))
assert_raises_fpe('underflow', lambda a,b:a/b,
float16(2.**-14+2**-23), float16(4))
# Overflow errors
assert_raises_fpe('overflow', lambda a,b:a*b, bx16, bx16)
assert_raises_fpe('overflow', lambda a,b:a*b, bx16, by16)
assert_raises_fpe('overflow', lambda a,b:a*b, by16, bx16)
assert_raises_fpe('overflow', lambda a,b:a*b, by16, by16)
assert_raises_fpe('overflow', lambda a,b:a/b, bx16, sx16)
assert_raises_fpe('overflow', lambda a,b:a/b, bx16, sy16)
assert_raises_fpe('overflow', lambda a,b:a/b, by16, sx16)
assert_raises_fpe('overflow', lambda a,b:a/b, by16, sy16)
assert_raises_fpe('overflow', lambda a,b:a+b,
float16(65504), float16(17))
assert_raises_fpe('overflow', lambda a,b:a-b,
float16(-65504), float16(17))
assert_raises_fpe('overflow', np.nextafter, float16(65504), float16(np.inf))
assert_raises_fpe('overflow', np.nextafter, float16(-65504), float16(-np.inf))
assert_raises_fpe('overflow', np.spacing, float16(65504))
# Invalid value errors
assert_raises_fpe('invalid', np.divide, float16(np.inf), float16(np.inf))
assert_raises_fpe('invalid', np.spacing, float16(np.inf))
assert_raises_fpe('invalid', np.spacing, float16(np.nan))
assert_raises_fpe('invalid', np.nextafter, float16(np.inf), float16(0))
assert_raises_fpe('invalid', np.nextafter, float16(-np.inf), float16(0))
assert_raises_fpe('invalid', np.nextafter, float16(0), float16(np.nan))
# These should not raise
float16(65472)+float16(32)
float16(2**-13)/float16(2)
float16(2**-14)/float16(2**10)
np.spacing(float16(-65504))
np.nextafter(float16(65504), float16(-np.inf))
np.nextafter(float16(-65504), float16(np.inf))
float16(2**-14)/float16(2**10)
float16(-2**-14)/float16(2**10)
float16(2**-14+2**-23)/float16(2)
float16(-2**-14-2**-23)/float16(2)
def test_half_array_interface(self):
"""Test that half is compatible with __array_interface__"""
class Dummy:
pass
a = np.ones((1,), dtype=float16)
b = Dummy()
b.__array_interface__ = a.__array_interface__
c = np.array(b)
assert_(c.dtype == float16)
assert_equal(a, c)
if __name__ == "__main__":
run_module_suite()
| 41.593182 | 90 | 0.54926 |
17290d747359ba00ff8e98126d73fe5d234e36b1
| 9,690 |
py
|
Python
|
SelectWindowSize/run_LogisticRegression.py
|
tufts-ml/fNIRS-mental-workload-classifiers
|
b5199d6184e659152d1fe650db48eba53a221186
|
[
"MIT"
] | 4 |
2021-12-22T12:04:29.000Z
|
2022-03-23T20:02:21.000Z
|
SelectWindowSize/run_LogisticRegression.py
|
tufts-ml/fNIRS-mental-workload-classifiers
|
b5199d6184e659152d1fe650db48eba53a221186
|
[
"MIT"
] | null | null | null |
SelectWindowSize/run_LogisticRegression.py
|
tufts-ml/fNIRS-mental-workload-classifiers
|
b5199d6184e659152d1fe650db48eba53a221186
|
[
"MIT"
] | 4 |
2021-12-29T09:02:20.000Z
|
2022-02-24T22:15:40.000Z
|
#NOTE: run this script with the bpf data, use 5050 paradigm
import os
import sys
import numpy as np
import argparse
from easydict import EasyDict as edict
from tqdm import trange
# from sklearn.model_selection import KFold
from sklearn.linear_model import LogisticRegression
YOUR_PATH = os.environ['YOUR_PATH']
sys.path.insert(0, os.path.join(YOUR_PATH, 'fNIRS-mental-workload-classifiers/helpers'))
import models
import brain_data
from utils import seed_everything, featurize, makedir_if_not_exist, plot_confusion_matrix, save_pickle, write_performance_info_FixedTrainValSplit
parser = argparse.ArgumentParser()
parser.add_argument('--seed', default=0, type=int, help='random seed')
parser.add_argument('--data_dir', default='../data/bpf_Leon/Visual/size_2sec_10ts_stride_3ts/', help='folder to the train data')
parser.add_argument('--SelectWindowSize_testset_dir', default='../data/bpf_UsedForSelectingWindowSize/Visual/size_2sec_10ts_stride_3ts', help='folder to the test data')
parser.add_argument('--window_size', default=200, type=int, help='window size')
parser.add_argument('--result_save_rootdir', default='./experiments', help='folder to the result')
parser.add_argument('--SubjectId_of_interest', default='1', help='which subject of interest')
parser.add_argument('--classification_task', default='four_class', help='binary or four-class classification')
def train_classifier(args_dict):
#parse args:
data_dir = args_dict.data_dir
SelectWindowSize_testset_dir = args_dict.SelectWindowSize_testset_dir
window_size = args_dict.window_size
result_save_rootdir = args_dict.result_save_rootdir
SubjectId_of_interest = args_dict.SubjectId_of_interest
classification_task = args_dict.classification_task
#load this subject's data
sub_file = 'sub_{}.csv'.format(SubjectId_of_interest)
if window_size == 10:
num_chunk_this_window_size = 2224
elif window_size == 25:
num_chunk_this_window_size = 2144
elif window_size == 50:
num_chunk_this_window_size = 2016
elif window_size == 100:
num_chunk_this_window_size = 1744
elif window_size == 150:
num_chunk_this_window_size = 1488
elif window_size == 200:
num_chunk_this_window_size = 1216
else:
raise NameError('not supported window size')
if classification_task == 'binary':
data_loading_function = brain_data.read_subject_csv_binary
data_loading_function_testset = brain_data.read_subject_csv_binary_SelectWindowSize
confusion_matrix_figure_labels = ['0back', '2back']
# elif classification_task == 'four_class':
# data_loading_function = brain_data.read_subject_csv
# data_loading_function_testset = brain_data.read_subject_csv_SelectWindowSize
# confusion_matrix_figure_labels = ['0back', '1back', '2back', '3back']
else:
raise NameError('not supported classification type')
#load the subject's data
sub_feature_array, sub_label_array = data_loading_function(os.path.join(data_dir, sub_file), num_chunk_this_window_size=num_chunk_this_window_size)
#load the test data from bpf_UsedForSelectWindowSize folder
sub_test_feature_array, sub_test_label_array = data_loading_function_testset(os.path.join(SelectWindowSize_testset_dir, sub_file))
sub_data_len = len(sub_label_array)
#use the 1st half as train
half_sub_data_len = int(sub_data_len/2)
sub_train_feature_array = sub_feature_array[:half_sub_data_len]
sub_train_label_array = sub_label_array[:half_sub_data_len]
transformed_sub_train_feature_array = featurize(sub_train_feature_array, classification_task)
transformed_sub_test_feature_array = featurize(sub_test_feature_array, classification_task)
#cross validation
Cs = np.logspace(-5,5,11)
for C in Cs:
experiment_name = 'C{}'.format(C)
#derived args
result_save_subjectdir = os.path.join(result_save_rootdir, SubjectId_of_interest, experiment_name)
result_save_subject_checkpointdir = os.path.join(result_save_subjectdir, 'checkpoint')
result_save_subject_predictionsdir = os.path.join(result_save_subjectdir, 'predictions')
result_save_subject_resultanalysisdir = os.path.join(result_save_subjectdir, 'result_analysis')
result_save_subject_trainingcurvedir = os.path.join(result_save_subjectdir, 'trainingcurve')
makedir_if_not_exist(result_save_subjectdir)
makedir_if_not_exist(result_save_subject_checkpointdir)
makedir_if_not_exist(result_save_subject_predictionsdir)
makedir_if_not_exist(result_save_subject_resultanalysisdir)
makedir_if_not_exist(result_save_subject_trainingcurvedir)
result_save_dict = dict()
if classification_task == 'binary':
if window_size == 200:
total_number_train_chunks = 304
total_index = np.arange(total_number_train_chunks)
train_index = total_index[:152]
val_index = total_index[152:]
elif window_size == 150:
total_number_train_chunks = 368
total_index = np.arange(total_number_train_chunks)
train_index = total_index[:184]
val_index = total_index[184:]
elif window_size == 100:
total_number_train_chunks = 436
total_index = np.arange(total_number_train_chunks)
train_index = total_index[:218]
val_index = total_index[218:]
elif window_size == 50:
total_number_train_chunks = 504
total_index = np.arange(total_number_train_chunks)
train_index = total_index[:252]
val_index = total_index[252:]
elif window_size == 25:
total_number_train_chunks = 536
total_index = np.arange(total_number_train_chunks)
train_index = total_index[:268]
val_index = total_index[268:]
elif window_size == 10:
total_number_train_chunks = 556
total_index = np.arange(total_number_train_chunks)
train_index = total_index[:278]
val_index = total_index[278:]
else:
raise NameError('not supported window size')
else:
raise NameError('not implemented classification task')
#only do 1 fold cross validation:
#dataset object
sub_cv_train_feature_array = transformed_sub_train_feature_array[train_index]
sub_cv_train_label_array = sub_train_label_array[train_index]
sub_cv_val_feature_array = transformed_sub_train_feature_array[val_index]
sub_cv_val_label_array = sub_train_label_array[val_index]
#create Logistic Regression object
model = LogisticRegression(C=C, random_state=0, max_iter=5000, solver='lbfgs').fit(sub_cv_train_feature_array, sub_cv_train_label_array)
# val performance
val_accuracy = model.score(sub_cv_val_feature_array, sub_cv_val_label_array) * 100
result_save_dict['bestepoch_val_accuracy'] = val_accuracy
# test performance
test_accuracy = model.score(transformed_sub_test_feature_array, sub_test_label_array) * 100
test_logits = model.predict_proba(transformed_sub_test_feature_array)
test_class_predictions = test_logits.argmax(1)
result_save_dict['bestepoch_test_accuracy'] = test_accuracy
result_save_dict['bestepoch_test_logits'] = test_logits.copy()
result_save_dict['bestepoch_test_class_labels'] = sub_test_label_array.copy()
plot_confusion_matrix(test_class_predictions, sub_test_label_array, confusion_matrix_figure_labels, result_save_subject_resultanalysisdir, 'test_confusion_matrix.png')
save_pickle(result_save_subject_predictionsdir, 'result_save_dict.pkl', result_save_dict)
#write performance to txt file
write_performance_info_FixedTrainValSplit('NA', result_save_subject_resultanalysisdir, val_accuracy, test_accuracy)
if __name__=='__main__':
#parse args
args = parser.parse_args()
seed = args.seed
data_dir = args.data_dir
SelectWindowSize_testset_dir = args.SelectWindowSize_testset_dir
window_size = args.window_size
result_save_rootdir = args.result_save_rootdir
SubjectId_of_interest = args.SubjectId_of_interest
classification_task = args.classification_task
#sanity check
print('type(data_dir): {}'.format(type(data_dir)))
print('type(SelectWindowSize_testset_dir): {}'.format(type(SelectWindowSize_testset_dir)))
print('type(window_size): {}'.format(type(window_size)))
print('type(SubjectId_of_interest): {}'.format(type(SubjectId_of_interest)))
print('type(result_save_rootdir): {}'.format(type(result_save_rootdir)))
print('type(classification_task): {}'.format(type(classification_task)))
args_dict = edict()
args_dict.data_dir = data_dir
args_dict.SelectWindowSize_testset_dir = SelectWindowSize_testset_dir
args_dict.window_size = window_size
args_dict.result_save_rootdir = result_save_rootdir
args_dict.SubjectId_of_interest = SubjectId_of_interest
args_dict.classification_task = classification_task
seed_everything(seed)
train_classifier(args_dict)
| 41.234043 | 175 | 0.709804 |
e355efaf0f7227ed490abb3e81e3f509a8f91363
| 548 |
py
|
Python
|
LeetCode/599_minimum_index_sum_of_two_lists/findRestaurant.py
|
harveyc95/ProgrammingProblems
|
d81dc58de0347fa155f5e25f27d3d426ce13cdc6
|
[
"MIT"
] | null | null | null |
LeetCode/599_minimum_index_sum_of_two_lists/findRestaurant.py
|
harveyc95/ProgrammingProblems
|
d81dc58de0347fa155f5e25f27d3d426ce13cdc6
|
[
"MIT"
] | null | null | null |
LeetCode/599_minimum_index_sum_of_two_lists/findRestaurant.py
|
harveyc95/ProgrammingProblems
|
d81dc58de0347fa155f5e25f27d3d426ce13cdc6
|
[
"MIT"
] | null | null | null |
class Solution(object):
def findRestaurant(self, list1, list2):
dict1 = {rest:idx for idx, rest in enumerate(list1)}
print dict1
ans = []
minSum = 3000
for idx, rest in enumerate(list2):
if rest in dict1:
curSum = dict1[rest] + idx
if curSum == minSum:
ans.append(rest)
elif curSum < minSum:
del ans[:]
ans.append(rest)
minSum = curSum
return ans
| 30.444444 | 60 | 0.458029 |
1bcb58b6efe0786aacd88cf0134c4fef6bf14f19
| 4,809 |
py
|
Python
|
gen.py
|
AgamChopra/Unsupervised-image2image-shape-deformation-
|
36a600a016374d3d2a6e5b19053469b57c1faa06
|
[
"MIT"
] | 1 |
2021-06-10T12:47:57.000Z
|
2021-06-10T12:47:57.000Z
|
gen.py
|
AgamChopra/Unsupervised-image2image-shape-deformation
|
36a600a016374d3d2a6e5b19053469b57c1faa06
|
[
"MIT"
] | null | null | null |
gen.py
|
AgamChopra/Unsupervised-image2image-shape-deformation
|
36a600a016374d3d2a6e5b19053469b57c1faa06
|
[
"MIT"
] | null | null | null |
# generator
import torch.nn as nn
import torch
def E2L(ic,hc,oc,k,s):
out = nn.Sequential(nn.Conv2d(in_channels=ic, out_channels=hc, kernel_size=1, bias=False),
nn.BatchNorm2d(hc),nn.ReLU(),
nn.Conv2d(in_channels=hc, out_channels=oc, kernel_size=1, bias=False),
nn.BatchNorm2d(oc),nn.ReLU(),
nn.MaxPool2d(kernel_size=k,stride=s))
return out
def D2L(ic,hc,oc,k,s):
out = nn.Sequential(nn.ConvTranspose2d(in_channels=ic, out_channels=ic, kernel_size=k, stride=s, bias=False),
nn.BatchNorm2d(ic),nn.ReLU(),
nn.Conv2d(in_channels=ic, out_channels=hc, kernel_size=1, bias=False),
nn.BatchNorm2d(hc),nn.ReLU(),
nn.Conv2d(in_channels=hc, out_channels=oc, kernel_size=1, bias=False),
nn.BatchNorm2d(oc),nn.ReLU())
return out
class generator(nn.Module):
def __init__(self):
super(generator,self).__init__()
self.f1 = E2L(3,4,6,3,1)
self.f2 = E2L(6,18,24,3,1)
self.f3 = E2L(24,40,48,3,1)
self.f4 = E2L(48,80,96,3,1)
self.f5 = E2L(96,160,192,3,1)
self.f6 = D2L(192,160,96,3,1)
self.f7 = D2L(192,80,48,3,1)
self.f8 = D2L(96,40,24,3,1)
self.f9 = D2L(48,18,6,3,1)
self.f10 = D2L(12,6,3,3,1)
self.f11 = nn.Conv2d(in_channels=3, out_channels=3, kernel_size=1)
def forward(self, x):
# encoder
y1 = self.f1(x)
y2 = self.f2(y1)
y3 = self.f3(y2)
y4 = self.f4(y3)
y = self.f5(y4)
# decoder
#print(y.shape)
y = self.f6(y)
#print(y.shape)
#print(y4.shape)
y = self.f7(torch.cat([y,y4], dim=1))
#print(y.shape)
#print(y3.shape)
y = self.f8(torch.cat([y,y3], dim=1))
#print(y.shape)
#print(y2.shape)
y = self.f9(torch.cat([y,y2], dim=1))
#print(y.shape)
#print(y1.shape)
y = self.f10(torch.cat([y,y1], dim=1))
#print(y.shape)
y = self.f11(y)
#print(y.shape)
return y
class generator_dense(nn.Module):
def __init__(self):
super(generator_dense,self).__init__()
self.pad = nn.ZeroPad2d(1)
#first layer
self.f1 = nn.Conv2d(in_channels=3, out_channels=6, kernel_size=5,stride=1)
#recursive layer
self.f2 = nn.Sequential(nn.Conv2d(in_channels=6, out_channels=12, kernel_size=3,stride=1),
nn.MaxPool2d(kernel_size=2, stride=2, padding=1))
#A
self.a1 = nn.Sequential(nn.BatchNorm2d(6),nn.ReLU(),nn.Conv2d(in_channels=6, out_channels=12, kernel_size=3,stride=1))
self.a2 = nn.Sequential(nn.BatchNorm2d(18),nn.ReLU(),nn.Conv2d(in_channels=18, out_channels=12, kernel_size=3,stride=1))
self.a3 = nn.Sequential(nn.BatchNorm2d(30),nn.ReLU(),nn.Conv2d(in_channels=30, out_channels=12, kernel_size=3,stride=1))
self.a4 = nn.Sequential(nn.BatchNorm2d(42),nn.ReLU(),nn.Conv2d(in_channels=42, out_channels=6, kernel_size=1,stride=1),
nn.BatchNorm2d(6),nn.ReLU())
#ls
self.fls = nn.Sequential(nn.BatchNorm2d(12),nn.ReLU())
#recursivelayers
self.f3 = nn.Sequential(nn.ConvTranspose2d(in_channels=12, out_channels=3, kernel_size=63),
nn.Conv2d(in_channels=3, out_channels=3, kernel_size=1,stride=1))#concat A
#last layer concat out of 2nd A with f1
self.f4 = nn.Sequential(nn.ConvTranspose2d(in_channels=12, out_channels=6, kernel_size=3),
nn.Conv2d(in_channels=6, out_channels=3, kernel_size=1,stride=1, padding=1),
nn.BatchNorm2d(3),nn.Sigmoid())
self.ft = nn.Conv2d(6, 3, 1)
def forward(self, x):
#f1
y1 = self.f1(x)
#A
y = self.a1(y1)
c = torch.cat([y1,self.pad(y)],1)
y = self.a2(c)
c = torch.cat([c,self.pad(y)],1)
y = self.a3(c)
c = torch.cat([c,self.pad(y)],1)
y2 = self.a4(c)
#f2
y = self.f2(y2)
y = self.fls(y)
#f3
y = self.f3(y)
#A
y2t = self.ft(y2)
x = torch.cat([y,y2t],1)
y = self.a1(x)
c = torch.cat([x,self.pad(y)],1)
y = self.a2(c)
c = torch.cat([c,self.pad(y)],1)
y = self.a3(c)
c = torch.cat([c,self.pad(y)],1)
y = self.a4(c)
#f4
y = torch.cat([y,y1],1)
y = self.f4(y)
return y
| 38.166667 | 129 | 0.520898 |
069386d2c1467ed9047bd1898047a78ccb1652ee
| 11,543 |
py
|
Python
|
hydra/_internal/grammar/grammar_functions.py
|
evdcush/hydra
|
5a34a01eaa0f0426d967e918a3ecd8ac6fcf9f47
|
[
"MIT"
] | 1 |
2020-09-25T07:12:14.000Z
|
2020-09-25T07:12:14.000Z
|
hydra/_internal/grammar/grammar_functions.py
|
evdcush/hydra
|
5a34a01eaa0f0426d967e918a3ecd8ac6fcf9f47
|
[
"MIT"
] | 7 |
2021-06-28T20:30:25.000Z
|
2022-02-27T10:27:47.000Z
|
hydra/_internal/grammar/grammar_functions.py
|
evdcush/hydra
|
5a34a01eaa0f0426d967e918a3ecd8ac6fcf9f47
|
[
"MIT"
] | 1 |
2020-10-10T21:40:08.000Z
|
2020-10-10T21:40:08.000Z
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import builtins
import random
from copy import copy
from typing import Any, Callable, Dict, List, Optional, Union
from hydra._internal.grammar.utils import is_type_matching
from hydra.core.override_parser.types import (
ChoiceSweep,
Glob,
IntervalSweep,
ParsedElementType,
QuotedString,
RangeSweep,
Sweep,
)
ElementType = Union[str, int, bool, float, list, dict]
def apply_to_dict_values(
# val
value: Dict[Any, Any],
# func
function: Callable[..., Any],
) -> Dict[Any, Any]:
ret_dict: Dict[str, Any] = {}
for key, value in value.items():
ret_dict[key] = function(value)
return ret_dict
def cast_choice(value: ChoiceSweep, function: Callable[..., Any]) -> ChoiceSweep:
choices = []
for item in value.list:
choice = function(item)
assert is_type_matching(choice, ElementType)
choices.append(choice)
return ChoiceSweep(simple_form=value.simple_form, list=choices)
def cast_interval(value: IntervalSweep, function: Callable[..., Any]) -> IntervalSweep:
return IntervalSweep(
start=function(value.start), end=function(value.end), tags=copy(value.tags)
)
def cast_range(value: RangeSweep, function: Callable[..., Any]) -> RangeSweep:
if function not in (cast_float, cast_int):
raise ValueError("Range can only be cast to int or float")
return RangeSweep(
start=function(value.start),
stop=function(value.stop),
step=function(value.step),
)
CastType = Union[ParsedElementType, Sweep]
def _list_to_simple_choice(*args: Any) -> ChoiceSweep:
choices: List[ParsedElementType] = []
for arg in args:
assert is_type_matching(arg, ParsedElementType)
choices.append(arg)
return ChoiceSweep(list=builtins.list(choices), simple_form=True)
def _normalize_cast_value(*args: CastType, value: Optional[CastType]) -> CastType:
if len(args) > 0 and value is not None:
raise TypeError("cannot use both position and named arguments")
if value is not None:
return value
if len(args) == 0:
raise TypeError("No positional args or value specified")
if len(args) == 1:
return args[0]
if len(args) > 1:
return _list_to_simple_choice(*args)
assert False
def cast_int(*args: CastType, value: Optional[CastType] = None) -> Any:
value = _normalize_cast_value(*args, value=value)
if isinstance(value, QuotedString):
return cast_int(value.text)
if isinstance(value, dict):
return apply_to_dict_values(value, cast_int)
if isinstance(value, list):
return list(map(cast_int, value))
elif isinstance(value, ChoiceSweep):
return cast_choice(value, cast_int)
elif isinstance(value, RangeSweep):
return cast_range(value, cast_int)
elif isinstance(value, IntervalSweep):
return cast_interval(value, cast_int)
assert isinstance(value, (int, float, bool, str))
return int(value)
def cast_float(*args: CastType, value: Optional[CastType] = None) -> Any:
value = _normalize_cast_value(*args, value=value)
if isinstance(value, QuotedString):
return cast_float(value.text)
if isinstance(value, dict):
return apply_to_dict_values(value, cast_float)
if isinstance(value, list):
return list(map(cast_float, value))
elif isinstance(value, ChoiceSweep):
return cast_choice(value, cast_float)
elif isinstance(value, RangeSweep):
return cast_range(value, cast_float)
elif isinstance(value, IntervalSweep):
return cast_interval(value, cast_float)
assert isinstance(value, (int, float, bool, str))
return float(value)
def cast_str(*args: CastType, value: Optional[CastType] = None) -> Any:
value = _normalize_cast_value(*args, value=value)
if isinstance(value, QuotedString):
return cast_str(value.text)
if isinstance(value, dict):
return apply_to_dict_values(value, cast_str)
if isinstance(value, list):
return list(map(cast_str, value))
elif isinstance(value, ChoiceSweep):
return cast_choice(value, cast_str)
elif isinstance(value, RangeSweep):
return cast_range(value, cast_str)
elif isinstance(value, IntervalSweep):
raise ValueError("Intervals cannot be cast to str")
assert isinstance(value, (int, float, bool, str))
if isinstance(value, bool):
return str(value).lower()
else:
return str(value)
def cast_bool(*args: CastType, value: Optional[CastType] = None) -> Any:
value = _normalize_cast_value(*args, value=value)
if isinstance(value, QuotedString):
return cast_bool(value.text)
if isinstance(value, dict):
return apply_to_dict_values(value, cast_bool)
if isinstance(value, list):
return list(map(cast_bool, value))
elif isinstance(value, ChoiceSweep):
return cast_choice(value, cast_bool)
elif isinstance(value, RangeSweep):
return cast_range(value, cast_bool)
elif isinstance(value, IntervalSweep):
raise ValueError("Intervals cannot be cast to bool")
if isinstance(value, str):
if value.lower() == "false":
return False
elif value.lower() == "true":
return True
else:
raise ValueError(f"Cannot cast '{value}' to bool")
return bool(value)
def choice(
*args: Union[str, int, float, bool, Dict[Any, Any], List[Any], ChoiceSweep]
) -> ChoiceSweep:
"""
A choice sweep over the specified values
"""
if len(args) == 0:
raise ValueError("empty choice is not legal")
if len(args) == 1:
first = args[0]
if isinstance(first, ChoiceSweep):
if first.simple_form:
first.simple_form = False
return first
else:
raise ValueError("nesting choices is not supported")
return ChoiceSweep(list=list(args)) # type: ignore
def range(
start: Union[int, float], stop: Union[int, float], step: Union[int, float] = 1
) -> RangeSweep:
"""
Range is defines a sweeep over a range of integer or floating-point values.
For a positive step, the contents of a range r are determined by the formula
r[i] = start + step*i where i >= 0 and r[i] < stop.
For a negative step, the contents of the range are still determined by the formula
r[i] = start + step*i, but the constraints are i >= 0 and r[i] > stop.
"""
return RangeSweep(start=start, stop=stop, step=step)
def interval(start: Union[int, float], end: Union[int, float]) -> IntervalSweep:
"""
A continuous interval between two floating point values.
value=interval(x,y) is interpreted as x <= value < y
"""
return IntervalSweep(start=float(start), end=float(end))
def tag(*args: Union[str, Union[Sweep]], sweep: Optional[Sweep] = None) -> Sweep:
"""
Tags the sweep with a list of string tags.
"""
if len(args) < 1:
raise ValueError("Not enough arguments to tag, must take at least a sweep")
if sweep is not None:
return tag(*(list(args) + [sweep]))
last = args[-1]
if isinstance(last, Sweep):
sweep = last
tags = set()
for tag_ in args[0:-1]:
if not isinstance(tag_, str):
raise ValueError(
f"tag arguments type must be string, got {type(tag_).__name__}"
)
tags.add(tag_)
sweep.tags = tags
return sweep
else:
raise ValueError(
f"Last argument to tag() must be a choice(), range() or interval(), got {type(sweep).__name__}"
)
def shuffle(
*args: Union[ElementType, ChoiceSweep, RangeSweep],
sweep: Optional[Union[ChoiceSweep, RangeSweep]] = None,
list: Optional[List[Any]] = None,
) -> Union[List[Any], ChoiceSweep, RangeSweep]:
"""
Shuffle input list or sweep (does not support interval)
"""
if list is not None:
return shuffle(list)
if sweep is not None:
return shuffle(sweep)
if len(args) == 1:
arg = args[0]
if isinstance(arg, (ChoiceSweep, RangeSweep)):
sweep = copy(arg)
sweep.shuffle = True
return sweep
if isinstance(arg, builtins.list):
lst = copy(arg)
random.shuffle(lst)
return lst
else:
return [arg]
else:
simple_choice = _list_to_simple_choice(*args)
simple_choice.shuffle = True
return simple_choice
def sort(
*args: Union[ElementType, ChoiceSweep, RangeSweep],
sweep: Optional[Union[ChoiceSweep, RangeSweep]] = None,
list: Optional[List[Any]] = None,
reverse: bool = False,
) -> Any:
"""
Sort an input list or sweep.
reverse=True reverses the order
"""
if list is not None:
return sort(list, reverse=reverse)
if sweep is not None:
return _sort_sweep(sweep, reverse)
if len(args) == 1:
arg = args[0]
if isinstance(arg, (ChoiceSweep, RangeSweep)):
# choice: sort(choice(a,b,c))
# range: sort(range(1,10))
return _sort_sweep(arg, reverse)
elif isinstance(arg, builtins.list):
return sorted(arg, reverse=reverse)
elif is_type_matching(arg, ParsedElementType):
return arg
else:
raise TypeError(f"Invalid arguments: {args}")
else:
primitives = (int, float, bool, str)
for arg in args:
if not isinstance(arg, primitives):
raise TypeError(f"Invalid arguments: {args}")
if len(args) == 0:
raise ValueError("empty sort input")
elif len(args) > 1:
cw = _list_to_simple_choice(*args)
return _sort_sweep(cw, reverse)
def _sort_sweep(
sweep: Union[ChoiceSweep, RangeSweep], reverse: bool
) -> Union[ChoiceSweep, RangeSweep]:
sweep = copy(sweep)
if isinstance(sweep, ChoiceSweep):
sweep.list = sorted(sweep.list, reverse=reverse)
return sweep
elif isinstance(sweep, RangeSweep):
assert sweep.start is not None
assert sweep.stop is not None
if not reverse:
# ascending
if sweep.start > sweep.stop:
start = sweep.stop + abs(sweep.step)
stop = sweep.start + abs(sweep.step)
sweep.start = start
sweep.stop = stop
sweep.step = -sweep.step
else:
# descending
if sweep.start < sweep.stop:
start = sweep.stop - abs(sweep.step)
stop = sweep.start - abs(sweep.step)
sweep.start = start
sweep.stop = stop
sweep.step = -sweep.step
return sweep
else:
assert False
def glob(
include: Union[List[str], str], exclude: Optional[Union[List[str], str]] = None
) -> Glob:
"""
A glob selects from all options in the config group.
inputs are in glob format. e.g: *, foo*, *foo.
:param include: a string or a list of strings to use as include globs
:param exclude: a string or a list of strings to use as exclude globs
"""
if isinstance(include, str):
include = [include]
if exclude is None:
exclude = []
elif isinstance(exclude, str):
exclude = [exclude]
return Glob(include=include, exclude=exclude)
| 32.424157 | 107 | 0.628 |
c48f4943f874b8dbbf6f1f4472318dff456de758
| 7,161 |
py
|
Python
|
tri_loss/model/ShuffleNetV2.py
|
mbaharan/person-reid
|
7caea3a1ab2c440c9a5e20a66633f2e3fcd065b5
|
[
"BSD-3-Clause"
] | 12 |
2019-05-10T09:56:39.000Z
|
2021-08-09T03:42:28.000Z
|
tri_loss/model/ShuffleNetV2.py
|
mbaharan/person-reid
|
7caea3a1ab2c440c9a5e20a66633f2e3fcd065b5
|
[
"BSD-3-Clause"
] | 1 |
2020-02-03T13:50:06.000Z
|
2020-02-03T13:50:06.000Z
|
tri_loss/model/ShuffleNetV2.py
|
mbaharan/person-reid
|
7caea3a1ab2c440c9a5e20a66633f2e3fcd065b5
|
[
"BSD-3-Clause"
] | 8 |
2019-10-04T15:23:18.000Z
|
2021-05-08T07:08:09.000Z
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from collections import OrderedDict
from torch.nn import init
import math
def conv_bn(inp, oup, stride):
return nn.Sequential(
nn.Conv2d(inp, oup, 3, stride, 1, bias=False),
nn.BatchNorm2d(oup),
nn.ReLU(inplace=True)
)
def conv_1x1_bn(inp, oup):
return nn.Sequential(
nn.Conv2d(inp, oup, 1, 1, 0, bias=False),
nn.BatchNorm2d(oup),
nn.ReLU(inplace=True)
)
def channel_shuffle(x, groups):
batchsize, num_channels, height, width = x.data.size()
channels_per_group = num_channels // groups
# reshape
x = x.view(batchsize, groups,
channels_per_group, height, width)
x = torch.transpose(x, 1, 2).contiguous()
# flatten
x = x.view(batchsize, -1, height, width)
return x
class InvertedResidual(nn.Module):
def __init__(self, inp, oup, stride, benchmodel):
super(InvertedResidual, self).__init__()
self.benchmodel = benchmodel
self.stride = stride
assert stride in [1, 2]
oup_inc = oup//2
if self.benchmodel == 1:
#assert inp == oup_inc
self.banch2 = nn.Sequential(
# pw
nn.Conv2d(oup_inc, oup_inc, 1, 1, 0, bias=False),
nn.BatchNorm2d(oup_inc),
nn.ReLU(inplace=True),
# dw
nn.Conv2d(oup_inc, oup_inc, 3, stride, 1, groups=oup_inc, bias=False),
nn.BatchNorm2d(oup_inc),
# pw-linear
nn.Conv2d(oup_inc, oup_inc, 1, 1, 0, bias=False),
nn.BatchNorm2d(oup_inc),
nn.ReLU(inplace=True),
)
else:
self.banch1 = nn.Sequential(
# dw
nn.Conv2d(inp, inp, 3, stride, 1, groups=inp, bias=False),
nn.BatchNorm2d(inp),
# pw-linear
nn.Conv2d(inp, oup_inc, 1, 1, 0, bias=False),
nn.BatchNorm2d(oup_inc),
nn.ReLU(inplace=True),
)
self.banch2 = nn.Sequential(
# pw
nn.Conv2d(inp, oup_inc, 1, 1, 0, bias=False),
nn.BatchNorm2d(oup_inc),
nn.ReLU(inplace=True),
# dw
nn.Conv2d(oup_inc, oup_inc, 3, stride, 1, groups=oup_inc, bias=False),
nn.BatchNorm2d(oup_inc),
# pw-linear
nn.Conv2d(oup_inc, oup_inc, 1, 1, 0, bias=False),
nn.BatchNorm2d(oup_inc),
nn.ReLU(inplace=True),
)
@staticmethod
def _concat(x, out):
# concatenate along channel axis
return torch.cat((x, out), 1)
def forward(self, x):
if 1==self.benchmodel:
x1 = x[:, :(x.shape[1]//2), :, :]
x2 = x[:, (x.shape[1]//2):, :, :]
out = self._concat(x1, self.banch2(x2))
elif 2==self.benchmodel:
out = self._concat(self.banch1(x), self.banch2(x))
return channel_shuffle(out, 2)
class ShuffleNetV2(nn.Module):
def __init__(self, n_class=1000, input_size=224, width_mult=1.):
super(ShuffleNetV2, self).__init__()
assert input_size % 32 == 0
self.stage_repeats = [4, 8, 4]
# index 0 is invalid and should never be called.
# only used for indexing convenience.
if width_mult == 0.5:
self.stage_out_channels = [-1, 24, 48, 96, 192, 1024]
elif width_mult == 1.0:
self.stage_out_channels = [-1, 24, 116, 232, 464, 1024]
elif width_mult == 1.5:
self.stage_out_channels = [-1, 24, 176, 352, 704, 1024]
elif width_mult == 2.0:
self.stage_out_channels = [-1, 24, 224, 488, 976, 2048]
else:
raise ValueError(
"""{} groups is not supported for
1x1 Grouped Convolutions""".format('num_groups'))
# building first layer
input_channel = self.stage_out_channels[1]
self.conv1 = conv_bn(3, input_channel, 2)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.features = []
# building inverted residual blocks
for idxstage in range(len(self.stage_repeats)):
numrepeat = self.stage_repeats[idxstage]
output_channel = self.stage_out_channels[idxstage+2]
for i in range(numrepeat):
if i == 0:
#inp, oup, stride, benchmodel):
self.features.append(InvertedResidual(input_channel, output_channel, 2, 2))
else:
self.features.append(InvertedResidual(input_channel, output_channel, 1, 1))
input_channel = output_channel
# make it nn.Sequential
self.features = nn.Sequential(*self.features)
# building last several layers
self.conv_last = conv_1x1_bn(input_channel, self.stage_out_channels[-1])
self.globalpool = nn.Sequential(nn.AvgPool2d(int(input_size/32)))
# building classifier
#self.classifier = nn.Sequential(nn.Linear(self.stage_out_channels[-1], n_class))
def forward(self, x):
x = self.conv1(x)
x = self.maxpool(x)
x = self.features(x)
x = self.conv_last(x)
x = self.globalpool(x)
# x = x.view(-1, self.stage_out_channels[-1])
# x = self.classifier(x)
return x
def remove_fc(state_dict):
"""Remove the fc layer parameters from state_dict."""
for k in list(state_dict.keys()):
if k.startswith('classifier'):
del state_dict[k]
'''
for key, value in state_dict.items():
if key.startswith('classifier'):
del state_dict[key]
'''
return state_dict
class ShuffleNetFeature(nn.Module):
def __init__(self, shuffleNet):
super(ShuffleNetFeature, self).__init__()
self.shuffleNet = shuffleNet
def forward(self, x1):
output = self.shuffleNet(x1)
output = output.view(output.size(0), -1)
return output
def shufflenetFeature(model):
model = ShuffleNetFeature(model)
return model
def shufflenetv2(width_mult=2., pretrained=False, path_to_predefined_model=''):
model = ShuffleNetV2(width_mult=width_mult)
if pretrained:
import os
if os.path.isfile(path_to_predefined_model):
print("=> Loading model at'{}'".format(path_to_predefined_model))
model.load_state_dict(remove_fc(torch.load(path_to_predefined_model))) # Remoce the classifier from model.
print(model)
#input("We are using a pre-trained network.")
else:
print("=> No model found at '{}'".format(path_to_predefined_model))
quit()
return model
if __name__ == "__main__":
"""Testing
"""
model = ShuffleNetV2()
print(model)
| 32.402715 | 118 | 0.557604 |
2690c7b93af744b56685883982d249f706bb6fff
| 401 |
py
|
Python
|
envdsys/envnet/migrations/0016_daqregistration_config2.py
|
NOAA-PMEL/envDataSystem
|
4db4a3569d2329658799a3eef06ce36dd5c0597d
|
[
"Unlicense"
] | 1 |
2021-11-06T19:22:53.000Z
|
2021-11-06T19:22:53.000Z
|
envdsys/envnet/migrations/0016_daqregistration_config2.py
|
NOAA-PMEL/envDataSystem
|
4db4a3569d2329658799a3eef06ce36dd5c0597d
|
[
"Unlicense"
] | 25 |
2019-06-18T20:40:36.000Z
|
2021-07-23T20:56:48.000Z
|
envdsys/envnet/migrations/0016_daqregistration_config2.py
|
NOAA-PMEL/envDataSystem
|
4db4a3569d2329658799a3eef06ce36dd5c0597d
|
[
"Unlicense"
] | null | null | null |
# Generated by Django 3.1.7 on 2021-04-14 23:38
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('envnet', '0015_auto_20210223_0034'),
]
operations = [
migrations.AddField(
model_name='daqregistration',
name='config2',
field=models.JSONField(blank=True, null=True),
),
]
| 21.105263 | 58 | 0.605985 |
56b7ff7a65f9cbd432cccaada4987e91b6859e51
| 1,099 |
py
|
Python
|
Python/max-points-on-a-line.py
|
black-shadows/LeetCode-Solutions
|
b1692583f7b710943ffb19b392b8bf64845b5d7a
|
[
"Fair",
"Unlicense"
] | null | null | null |
Python/max-points-on-a-line.py
|
black-shadows/LeetCode-Solutions
|
b1692583f7b710943ffb19b392b8bf64845b5d7a
|
[
"Fair",
"Unlicense"
] | null | null | null |
Python/max-points-on-a-line.py
|
black-shadows/LeetCode-Solutions
|
b1692583f7b710943ffb19b392b8bf64845b5d7a
|
[
"Fair",
"Unlicense"
] | null | null | null |
# Time: O(n^2)
# Space: O(n)
import collections
# Definition for a point
class Point(object):
def __init__(self, a=0, b=0):
self.x = a
self.y = b
class Solution(object):
def maxPoints(self, points):
"""
:type points: List[Point]
:rtype: int
"""
max_points = 0
for i, start in enumerate(points):
slope_count, same = collections.defaultdict(int), 1
for j in xrange(i + 1, len(points)):
end = points[j]
if start.x == end.x and start.y == end.y:
same += 1
else:
slope = float("inf")
if start.x - end.x != 0:
slope = (start.y - end.y) * 1.0 / (start.x - end.x)
slope_count[slope] += 1
current_max = same
for slope in slope_count:
current_max = max(current_max, slope_count[slope] + same)
max_points = max(max_points, current_max)
return max_points
| 27.475 | 76 | 0.464968 |
df23e41f51c16e0d0e0629178c103699ffbc36c1
| 7,525 |
py
|
Python
|
deeplearning/overthinking_experiments.py
|
yuuu14/multi-exit-network
|
eabf7b23fe93e261a5350ed03075dcb49fdeb0f8
|
[
"MIT"
] | null | null | null |
deeplearning/overthinking_experiments.py
|
yuuu14/multi-exit-network
|
eabf7b23fe93e261a5350ed03075dcb49fdeb0f8
|
[
"MIT"
] | null | null | null |
deeplearning/overthinking_experiments.py
|
yuuu14/multi-exit-network
|
eabf7b23fe93e261a5350ed03075dcb49fdeb0f8
|
[
"MIT"
] | null | null | null |
# overthinking_experiments.py
# runs the experiments in section 4
# quantifies the wasteful and destructive effects
# produces the images to explain these effects
import torch
import torchvision.utils
import numpy as np
import pprint
import os
import time
from shutil import copyfile
from collections import Counter
from deeplearning import aux_funcs as af
from deeplearning import model_funcs as mf
from deeplearning import network_architectures as arcs
# To quantify the wasteful effect of overthinking
def wasteful_overthinking_experiment(models_path, device='cpu'):
#task = 'cifar10'
#task = 'cifar100'
task = 'tinyimagenet'
network = 'vgg16bn'
#network = 'resnet56'
#network = 'wideresnet32_4'
#network = 'mobilenet'
sdn_name = task + '_' + network + '_sdn_ic_only'
sdn_model, sdn_params = arcs.load_model(models_path, sdn_name, epoch=-1)
sdn_model.to(device)
dataset = af.get_dataset(sdn_params['task'])
top1_test, top5_test = mf.sdn_test(sdn_model, dataset.test_loader, device)
print('Top1 Test accuracy: {}'.format(top1_test))
print('Top5 Test accuracy: {}'.format(top5_test))
layer_correct, _, _, _ = mf.sdn_get_detailed_results(sdn_model, loader=dataset.test_loader, device=device)
layers = sorted(list(layer_correct.keys()))
end_correct = layer_correct[layers[-1]]
total = 10000
# to quantify the computational waste
c_i = [0.15, 0.3, 0.45, 0.6, 0.75, 0.9]
total_comp = 0
cum_correct = set()
for layer in layers:
cur_correct = layer_correct[layer]
unique_correct = cur_correct - cum_correct
cum_correct = cum_correct | cur_correct
print('Output: {}'.format(layer))
print('Current correct: {}'.format(len(cur_correct)))
print('Cumulative correct: {}'.format(len(cum_correct)))
print('Unique correct: {}\n'.format(len(unique_correct)))
if layer < layers[-1]:
total_comp += len(unique_correct) * c_i[layer]
else:
total_comp += total - (len(cum_correct) - len(unique_correct))
print('Total Comp: {}'.format(total_comp))
# to explain the wasteful effect
def get_simple_complex(models_path, device='cpu'):
sdn_name = 'tinyimagenet_vgg16bn_sdn_ic_only'
sdn_model, sdn_params = arcs.load_model(models_path, sdn_name, epoch=-1)
sdn_model.to(device)
dataset = af.get_dataset(sdn_params['task'])
output_path = 'simple_complex_images'
af.create_path(output_path)
dog_path = output_path+'/'+'dog'
cat_path = output_path+'/'+'cat'
af.create_path(dog_path)
af.create_path(cat_path)
# n02099601 dog 26
# n02123394 cat 31
layer_correct, layer_wrong, _, _ = mf.sdn_get_detailed_results(sdn_model, loader=dataset.test_loader, device=device)
layers = sorted(list(layer_correct.keys()))
wrong_until = layer_wrong[layers[0]] | layer_correct[layers[0]]
for layer in layers[:-1]:
instances = layer_correct[layer] & wrong_until
wrong_until = wrong_until - layer_correct[layer]
print('IC: {}, Num images: {}'.format(layer, len(instances)))
for instance_id in instances:
instance_path = dataset.testset_paths.imgs[instance_id][0]
filename = '{}_{}'.format(layer, os.path.basename(instance_path))
if 'n02099601' in instance_path:
copyfile(instance_path, dog_path+'/'+filename)
if 'n02123394' in instance_path:
copyfile(instance_path, cat_path+'/'+filename)
# To quantify the destructive effects of overthinking
def destructive_overthinking_experiment(models_path, device='cpu'):
#sdn_name = 'cifar10_vgg16bn_bd_sdn_converted'; add_trigger = True # for the backdoored network
add_trigger = False
#task = 'cifar10'
#task = 'cifar100'
task = 'tinyimagenet'
network = 'vgg16bn'
#network = 'resnet56'
#network = 'wideresnet32_4'
#network = 'mobilenet'
sdn_name = task + '_' + network + '_sdn_ic_only'
sdn_model, sdn_params = arcs.load_model(models_path, sdn_name, epoch=-1)
sdn_model.to(device)
dataset = af.get_dataset(sdn_params['task'], add_trigger=add_trigger)
top1_test, top5_test = mf.sdn_test(sdn_model, dataset.test_loader, device)
print('Top1 Test accuracy: {}'.format(top1_test))
print('Top5 Test accuracy: {}'.format(top5_test))
layer_correct, layer_wrong, _, layer_confidence = mf.sdn_get_detailed_results(sdn_model, loader=dataset.test_loader, device=device)
layers = sorted(list(layer_correct.keys()))
end_wrong = layer_wrong[layers[-1]]
cum_correct = set()
for layer in layers:
cur_correct = layer_correct[layer]
cum_correct = cum_correct | cur_correct
cur_overthinking = cur_correct & end_wrong
print('Output: {}'.format(layer))
print('Current correct: {}'.format(len(cur_correct)))
print('Cumulative correct: {}'.format(len(cum_correct)))
print('Cur cat. overthinking: {}\n'.format(len(cur_overthinking)))
total_confidence = 0.0
for instance in cur_overthinking:
total_confidence += layer_confidence[layer][instance]
print('Average confidence on destructive overthinking instances:{}'.format(total_confidence/(0.1 + len(cur_overthinking))))
total_confidence = 0.0
for instance in cur_correct:
total_confidence += layer_confidence[layer][instance]
print('Average confidence on correctly classified :{}'.format(total_confidence/(0.1 + len(cur_correct))))
# to explain the destructive effect
def get_destructive_overthinking_samples(models_path, device='cpu'):
sdn_name = 'tinyimagenet_vgg16bn_sdn_ic_only'
sdn_model, sdn_params = arcs.load_model(models_path, sdn_name, epoch=-1)
sdn_model.to(device)
dataset = af.get_dataset(sdn_params['task'])
output_path = 'only_first'
af.create_path(output_path)
layer_correct, layer_wrong, layer_predictions, _ = mf.sdn_get_detailed_results(sdn_model, loader=dataset.test_loader, device=device)
layers = sorted(list(layer_correct.keys()))
all_correct = set()
for layer in layers[1:]:
all_correct = all_correct | layer_correct[layer]
only_first = layer_correct[layers[0]] - all_correct
for instance_id in only_first:
instance_path = dataset.testset_paths.imgs[instance_id][0]
filename = os.path.basename(instance_path)
print(instance_path)
first_predict = layer_predictions[0][instance_id][0]
last_predict = layer_predictions[layers[-1]][instance_id][0]
first_predict = dataset.testset_paths.classes[first_predict]
last_predict = dataset.testset_paths.classes[last_predict]
filename = '{}_{}_{}'.format(first_predict, last_predict, filename)
copyfile(instance_path, output_path+'/'+filename)
def main():
torch.manual_seed(af.get_random_seed()) # reproducible
np.random.seed(af.get_random_seed())
device = af.get_pytorch_device()
trained_models_path = 'networks/{}'.format(af.get_random_seed())
wasteful_overthinking_experiment(trained_models_path, device)
get_simple_complex(trained_models_path, device)
destructive_overthinking_experiment(trained_models_path, device)
get_destructive_overthinking_samples(trained_models_path, device)
if __name__ == '__main__':
main()
| 37.625 | 136 | 0.689701 |
ff27c896686a2b0c4bf352e79868d6180f3f7ff2
| 6,993 |
py
|
Python
|
src/htmlparser/fsm_config.py
|
greenius/ctemplate
|
2c37d13940c555abb7eb9932a5ab428a99ff6db4
|
[
"BSD-3-Clause"
] | 6 |
2016-01-27T18:01:52.000Z
|
2019-09-10T02:27:09.000Z
|
src/htmlparser/fsm_config.py
|
greenius/ctemplate
|
2c37d13940c555abb7eb9932a5ab428a99ff6db4
|
[
"BSD-3-Clause"
] | 2 |
2019-01-14T01:35:28.000Z
|
2019-02-03T08:32:51.000Z
|
src/htmlparser/fsm_config.py
|
greenius/ctemplate
|
2c37d13940c555abb7eb9932a5ab428a99ff6db4
|
[
"BSD-3-Clause"
] | 6 |
2016-10-27T22:32:28.000Z
|
2019-09-03T16:56:35.000Z
|
#!/usr/bin/env python
#
# Copyright (c) 2008, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# ---
#
# Create a state machine object based on a definition file.
#
__author__ = '[email protected] (Filipe Almeida)'
class OrderedDict:
"""Ordered dictionary implementation."""
# Define the minimum functionality we need for our application.
# Easiser would be to subclass from UserDict.DictMixin, and only
# define __getitem__, __setitem__, __delitem__, and keys, but that's
# not as portable. We don't need to define much more, so we just do.
def __init__(self):
self._dict = {}
self._keys = []
def __getitem__(self, key):
return self._dict[key]
def __setitem__(self, key, value):
if key not in self._keys:
self._keys.append(key)
self._dict[key] = value
def __delitem__(self, key):
self._keys.remove(key)
del self._dict[key]
def keys(self):
return self._keys
# Below are all we have to define in addition to what DictMixin would need
def __len__(self):
return len(self.keys())
def __contains__(self, key):
return self.has_key(key)
def __iter__(self):
# It's not as portable -- though it would be more space-efficient -- to do
# for k in self.keys(): yield k
return iter(self.keys())
class State(object):
"""Contains information about a specific state."""
def __init__(self):
pass
name = None
external_name = None
transitions = []
class Transition(object):
"""Contains information about a specific transition."""
def __init__(self, condition, source, destination):
self.condition = condition
self.source = source
self.destination = destination
class FSMConfig(object):
"""Container for the statemachine definition."""
sm = {} # dictionary that contains the finite state machine definition
# loaded from a config file.
transitions = [] # List of transitions.
conditions = {} # Mapping between the condition name and the bracket
# expression.
states = OrderedDict() # Ordered dictionary of states.
name = None
comment = None
def AddState(self, **dic):
"""Called from the definition file with the description of the state.
Receives a dictionary and populates internal structures based on it. The
dictionary is in the following format:
{'name': state_name,
'external': exposed state name,
'transitions': [
[condition, destination_state ],
[condition, destination_state ]
]
}
"""
state = State()
state.name = dic['name']
state.external_name = dic['external']
state_transitions = []
for (condition, destination) in dic['transitions']:
transition = Transition(condition, state.name, destination)
state_transitions.append(transition)
self.transitions.extend(state_transitions)
state.transitions = state_transitions
self.states[state.name] = state
def AddCondition(self, name, expression):
"""Called from the definition file with the definition of a condition.
Receives the name of the condition and it's expression.
"""
self.conditions[name] = expression
def Load(self, filename):
"""Load the state machine definition file.
In the definition file, which is based on the python syntax, the following
variables and functions are defined.
name: Name of the state machine
comment: Comment line on the generated file.
condition(): A mapping between condition names and bracket expressions.
state(): Defines a state and it's transitions. It accepts the following
attributes:
name: name of the state
external: exported name of the state. The exported name can be used
multiple times in order to create a super state.
transitions: List of pairs containing the condition for the transition
and the destination state. Transitions are ordered so if
a default rule is used, it must be the last one in the list.
Example:
name = 'c comment parser'
condition('/', '/')
condition('*', '*')
condition('linefeed', '\\n')
condition('default', '[:default:]')
state(name = 'text',
external = 'comment',
transitions = [
[ '/', 'comment_start' ],
[ 'default', 'text' ]
])
state(name = 'comment_start',
external = 'comment',
transitions = [
[ '/', 'comment_line' ],
[ '*', 'comment_multiline' ],
[ 'default', 'text' ]
])
state(name = 'comment_line',
external = 'comment',
transitions = [
[ 'linefeed', 'text' ],
[ 'default', 'comment_line' ]
])
state(name = 'comment_multiline',
external = 'comment',
transitions = [
[ '*', 'comment_multiline_close' ],
[ 'default', 'comment_multiline' ]
])
state(name = 'comment_multiline_close',
external = 'comment',
transitions = [
[ '/', 'text' ],
[ 'default', 'comment_multiline' ]
])
"""
self.sm['state'] = self.AddState
self.sm['condition'] = self.AddCondition
execfile(filename, self.sm)
self.name = self.sm['name']
if not self.name.isalnum():
raise Exception("State machine name must consist of only alphanumeric"
"characters.")
self.comment = self.sm['comment']
def __init__(self):
pass
| 31.5 | 79 | 0.660518 |
6c339f4312de4a2a7b18527e4fb0f0a0591eef19
| 3,398 |
py
|
Python
|
Validation/HGCalValidation/python/HGCalValidator_cfi.py
|
ryanm124/cmssw
|
9c955a497467f8cc57a7cc1eefc39d41d2d3d89e
|
[
"Apache-2.0"
] | 13 |
2015-11-30T15:49:45.000Z
|
2022-02-08T16:11:30.000Z
|
Validation/HGCalValidation/python/HGCalValidator_cfi.py
|
ryanm124/cmssw
|
9c955a497467f8cc57a7cc1eefc39d41d2d3d89e
|
[
"Apache-2.0"
] | 640 |
2015-02-11T18:55:47.000Z
|
2022-03-31T14:12:23.000Z
|
Validation/HGCalValidation/python/HGCalValidator_cfi.py
|
ryanm124/cmssw
|
9c955a497467f8cc57a7cc1eefc39d41d2d3d89e
|
[
"Apache-2.0"
] | 51 |
2015-08-11T21:01:40.000Z
|
2022-03-30T07:31:34.000Z
|
import FWCore.ParameterSet.Config as cms
from Validation.HGCalValidation.CaloParticleSelectionForEfficiency_cfi import *
from Validation.HGCalValidation.HGVHistoProducerAlgoBlock_cfi import *
from SimCalorimetry.HGCalAssociatorProducers.LCToCPAssociation_cfi import layerClusterCaloParticleAssociation
from SimCalorimetry.HGCalAssociatorProducers.LCToSCAssociation_cfi import layerClusterSimClusterAssociation
from DQMServices.Core.DQMEDAnalyzer import DQMEDAnalyzer
from RecoHGCal.TICL.iterativeTICL_cff import ticlIterLabels, ticlIterLabelsMerge
labelTst = [cms.InputTag("ticlTracksters"+iteration) for iteration in ticlIterLabelsMerge]
labelTst.extend(["ticlSimTracksters"])
lcInputMask = [cms.InputTag("ticlTracksters"+iteration) for iteration in ticlIterLabels]
lcInputMask.extend(["ticlSimTracksters"])
hgcalValidator = DQMEDAnalyzer(
"HGCalValidator",
### general settings ###
# selection of CP for evaluation of efficiency #
CaloParticleSelectionForEfficiency,
### reco input configuration ###
#2DLayerClusters, PFClusters, Tracksters
label_lcl = layerClusterCaloParticleAssociation.label_lc,
label_tst = cms.VInputTag(labelTst),
associator = cms.untracked.InputTag("layerClusterCaloParticleAssociationProducer"),
associatorSim = cms.untracked.InputTag("layerClusterSimClusterAssociationProducer"),
#General info on layers etc.
SaveGeneralInfo = cms.untracked.bool(True),
#CaloParticle related plots
doCaloParticlePlots = cms.untracked.bool(True),
#Select caloParticles for efficiency or pass through
doCaloParticleSelection = cms.untracked.bool(True),
#SimCluster related plots
doSimClustersPlots = cms.untracked.bool(True),
#Layer Cluster related plots
doLayerClustersPlots = cms.untracked.bool(True),
#Trackster related plots
doTrackstersPlots = cms.untracked.bool(True),
#The cumulative material budget in front of each layer. To be more specific, it
#is the material budget just in front of the active material (not including it).
#This file is created using the official material budget code.
cummatbudinxo = cms.FileInPath('Validation/HGCalValidation/data/D41.cumulative.xo'),
### sim input configuration ###
label_cp_effic = layerClusterCaloParticleAssociation.label_cp,
label_cp_fake = cms.InputTag("mix","MergedCaloTruth"),
#simClusters
label_scl = layerClusterSimClusterAssociation.label_scl,
simVertices = cms.InputTag("g4SimHits"),
LayerClustersInputMask = cms.VInputTag(lcInputMask),
#Total number of layers of HGCal that we want to monitor
#Could get this also from HGCalImagingAlgo::maxlayer but better to get it from here
totallayers_to_monitor = cms.int32(52),
#Thicknesses we want to monitor. -1 is for scintillator
thicknesses_to_monitor = cms.vint32(120,200,300,-1),
# HistoProducerAlgo. Defines the set of plots to be booked and filled
histoProducerAlgoBlock = HGVHistoProducerAlgoBlock,
### output configuration
dirName = cms.string('HGCAL/HGCalValidator/')
)
from Configuration.ProcessModifiers.premix_stage2_cff import premix_stage2
premix_stage2.toModify(hgcalValidator,
label_cp_fake = "mixData:MergedCaloTruth"
)
from Configuration.Eras.Modifier_phase2_hgcalV10_cff import phase2_hgcalV10
phase2_hgcalV10.toModify(hgcalValidator, totallayers_to_monitor = cms.int32(50))
| 41.439024 | 109 | 0.791054 |
768b2b209ab18ecc8d925bdc2064b920145939fb
| 2,833 |
py
|
Python
|
inpaint/model/transformer/transformer_method.py
|
Janspiry/A-Demo-for-Image-Inpainting-by-React
|
e1951027d10677a1fd4463b05c7acf473481decb
|
[
"MIT"
] | 2 |
2021-12-07T03:13:32.000Z
|
2021-12-26T04:44:40.000Z
|
inpaint/model/transformer/transformer_method.py
|
Janspiry/A-Demo-for-Image-Inpainting-by-React
|
e1951027d10677a1fd4463b05c7acf473481decb
|
[
"MIT"
] | null | null | null |
inpaint/model/transformer/transformer_method.py
|
Janspiry/A-Demo-for-Image-Inpainting-by-React
|
e1951027d10677a1fd4463b05c7acf473481decb
|
[
"MIT"
] | null | null | null |
from .transformer import TransformerLayer
from .transformer_withmask import TransformerLayer as Mask_TransformerLayer
from .swin_transformer import TransformerModule as Swin_TransformerLayer
import torch
import torch.nn as nn
class TransformerBlock(nn.Module):
def __init__(self, size, patch_size, MiniTransFormer=None, use_local=False, use_global=False):
super(TransformerBlock, self).__init__()
model_dim = 256
num_layers = 6
num_heads = 8
ffn_dim = 512
self.p = patch_size # patch_size
if MiniTransFormer is not None:
model_dim, num_layers, num_heads, ffn_dim = MiniTransFormer
self.transformer_global = TransformerLayer(
size=size,
patch_size=patch_size,
MiniTransFormer=MiniTransFormer
)
self.transformer_local = Swin_TransformerLayer(
in_channels=size[0],
hidden_dimension=model_dim,
layers=2,
patch_size=patch_size,
num_heads=num_heads,
window_size=8,
relative_pos_embedding=True
)
self.use_local = use_local
self.use_global = use_global
assert self.use_local or self.use_global, 'self.use_local and self.use_global are false.'
def forward(self, x):
b, c, h, w = x.size()
if(self.use_global):
x = self.transformer_global(x)
if(self.use_local):
x = self.transformer_local(x)
return x
class MaskTransformer(nn.Module):
def __init__(self, size, patch_size, MiniTransFormer=None, use_local=False, use_global=False):
super(MaskTransformer, self).__init__()
model_dim = 256
num_layers = 6
num_heads = 8
ffn_dim = 512
self.p = patch_size # patch_size
if MiniTransFormer is not None:
model_dim, num_layers, num_heads, ffn_dim = MiniTransFormer
self.transformer_global = Mask_TransformerLayer(
size=size,
patch_size=patch_size,
MiniTransFormer=MiniTransFormer
)
self.transformer_local = Swin_TransformerLayer(
in_channels=size[0],
hidden_dimension=model_dim,
layers=2,
patch_size=patch_size,
num_heads=num_heads,
window_size=8,
relative_pos_embedding=True
)
self.use_local = use_local
self.use_global = use_global
assert self.use_local or self.use_global, 'self.use_local and self.use_global are false.'
def forward(self, x, masks):
b, c, h, w = x.size()
if(self.use_global):
x = self.transformer_global(x, masks=masks)
if(self.use_local):
x = self.transformer_local(x)
return x
| 35.4125 | 98 | 0.622661 |
f413e834b4b1996e62cfc16774efbd53c949c33a
| 10,696 |
py
|
Python
|
scripts/NavPy/demo_c172.py
|
AuraUAS/aura-core
|
4711521074db72ba9089213e14455d89dc5306c0
|
[
"MIT",
"BSD-2-Clause-FreeBSD"
] | 8 |
2016-08-03T19:35:03.000Z
|
2019-12-15T06:25:05.000Z
|
scripts/NavPy/demo_c172.py
|
jarilq/aura-core
|
7880ed265396bf8c89b783835853328e6d7d1589
|
[
"MIT",
"BSD-2-Clause-FreeBSD"
] | 4 |
2018-09-27T15:48:56.000Z
|
2018-11-05T12:38:10.000Z
|
scripts/NavPy/demo_c172.py
|
jarilq/aura-core
|
7880ed265396bf8c89b783835853328e6d7d1589
|
[
"MIT",
"BSD-2-Clause-FreeBSD"
] | 5 |
2017-06-28T19:15:36.000Z
|
2020-02-19T19:31:24.000Z
|
from loading_mat_data import load_struct
import numpy as np
import matplotlib.pyplot as plt
import insgps_quat_15state
import navpy
np.set_printoptions(precision=5,suppress=True)
plt.close()
# >>> UMN UAV Flight Data
#path = '/Users/adhika/Dropbox/python_code/SysID/data/'
#fname = 'ibis_flight05_doublets_2013_07_07_cal.mat'
#fname = 'thor_flight91_doublet_claw_2013_07_07.mat'
# <<<
# >>> Flight Gear Simulation Data
path = ''
fname = 'data/C172_10042012mod.mat'
# <<<
flight_data, flight_info = load_struct(path+fname)
# ============================ NOISE CONFIGURATION =============================
# --- Process Noise
# White Noise Part
#sig_w_ax = 0.3
#sig_w_ay = 0.3
#sig_w_az = 0.3
#sig_w_gx = np.deg2rad(0.3)
#sig_w_gy = np.deg2rad(0.3)
#sig_w_gz = np.deg2rad(0.3)
# Time-correlated Part
#sig_a_d = 5e-3*9.81
#tau_a = 100.0
#sig_g_d = np.deg2rad(0.05)
#tau_g = 50.0
#Rw = np.diag([sig_w_ax**2, sig_w_ay**2, sig_w_az**2,
# sig_w_gx**2, sig_w_gy**2, sig_w_gz**2,
# 2*sig_a_d**2/tau_a, 2*sig_a_d**2/tau_a, 2*sig_a_d**2/tau_a,
# 2*sig_g_d**2/tau_g, 2*sig_g_d**2/tau_g, 2*sig_g_d**2/tau_g])
# --- Measurement Noise
#sig_gps_p_ne = 3;
#sig_gps_p_d = 5;
#sig_gps_v = 0.5; # Inflated, GPS antennas are located off CG and not compensated
#R = np.diag([sig_gps_p_ne**2, sig_gps_p_ne**2, sig_gps_p_d**2,
# sig_gps_v**2, sig_gps_v**2, sig_gps_v**2])
# =========================== PLACEHOLDERS ===============================
drl = len(flight_data.time)
estPOS = np.nan*np.ones((drl,3))
estVEL = np.nan*np.ones((drl,3))
estATT = np.nan*np.ones((drl,4))
estAB = np.nan*np.ones((drl,3))
estGB = np.nan*np.ones((drl,3))
Pp = np.nan*np.ones((drl,3))
Pvel = np.nan*np.ones((drl,3))
Patt = np.nan*np.ones((drl,3))
Pab = np.nan*np.ones((drl,3))
Pgb = np.nan*np.ones((drl,3))
stateInnov = np.nan*np.ones((drl,6))
# ============================ VARIABLE INITIALIZER ============================
# moved to init(): H = np.hstack( (np.eye(6), np.zeros((6,9))) )
# moved to init(): NAV_INIT = False
# moved to init(): IMU_CAL_INIT = False
# moved to init(): TU_COUNT = 0
idx_init = []
# moved to init(): tcpu = -1.0; old_tcpu = -1.0
# moved to init(): tow = -1.0; old_tow = -1.0
# =============================== MAIN LOOP ====================================
filter = insgps_quat_15state.Filter()
nav_init = False
for i in range(0,drl):
# prepare the sensor data
imu = insgps_quat_15state.IMU( flight_data.time[i], flight_data.navValid[i],
flight_data.p[i], flight_data.q[i], flight_data.r[i],
flight_data.ax[i], flight_data.ay[i], flight_data.az[i] )
gps = insgps_quat_15state.GPS( flight_data.time[i], flight_data.navValid[i],
flight_data.GPS_TOW[i],
flight_data.lat[i], flight_data.lon[i],
flight_data.alt[i],
flight_data.vn[i], flight_data.ve[i], flight_data.vd[i] )
# update the filter
est = filter.update(imu, gps)
# save the results for plotting
if not nav_init and est.valid:
nav_init = True
idx_init.append(i)
elif not est.valid:
nav_init = False
estPOS[i,:] = est.estPOS[:]
estVEL[i,:] = est.estVEL[:]
estATT[i,:] = est.estATT[:]
estAB[i,:] = est.estAB[:]
estGB[i,:] = est.estGB[:]
Pp[i,:] = np.diag(est.P[0:3,0:3])
Pvel[i,:] = np.diag(est.P[3:6,3:6])
Patt[i,:] = np.diag(est.P[6:9,6:9])
Pab[i,:] = np.diag(est.P[9:12,9:12])
Pgb[i,:] = np.diag(est.P[12:15,12:15])
stateInnov[i,:] = est.stateInnov[:]
psi, theta, phi = navpy.quat2angle(estATT[:,0],estATT[:,1:4],output_unit='deg')
# Calculate Attitude Error
delta_att = np.nan*np.zeros((drl,3))
for i in range(0,drl):
C1 = navpy.angle2dcm(flight_data.psi[i],flight_data.theta[i],flight_data.phi[i]).T
C2 = navpy.angle2dcm(psi[i],theta[i],phi[i],input_unit='deg').T
dC = C2.dot(C1.T)-np.eye(3)
# dC contains delta angle. To match delta quaternion, divide by 2.
delta_att[i,:] = [-dC[1,2]/2.0, dC[0,2]/2.0, -dC[0,1]/2.0]
lat_ref = estPOS[idx_init[0],0]
lon_ref = estPOS[idx_init[0],1]
alt_ref = estPOS[idx_init[0],2]
ecef_ref = navpy.lla2ecef(lat_ref,lon_ref,alt_ref)
ecef = navpy.lla2ecef(estPOS[:,0],estPOS[:,1],estPOS[:,2])
filter_ned = navpy.ecef2ned(ecef-ecef_ref,lat_ref,lon_ref,alt_ref)
ecef = navpy.lla2ecef(flight_data.lat,flight_data.lon,flight_data.alt)
gnss_ned = navpy.ecef2ned(ecef-ecef_ref,lat_ref,lon_ref,alt_ref)
gnss_ned[0:idx_init[0],:] = np.nan
ecef = navpy.lla2ecef(flight_data.navlat,flight_data.navlon,flight_data.navalt,latlon_unit='rad')
ref_ned = navpy.ecef2ned(ecef-ecef_ref,lat_ref,lon_ref,alt_ref)
ref_ned[0:idx_init[0],:] = np.nan
# ============================= INS PLOTS ======================================
nsig = 3
istart = idx_init[0]
istop = drl
pos_fig, pos_ax = plt.subplots(1)
pos_ax.plot(gnss_ned[:,1],gnss_ned[:,0],'*',label='GNSS')
pos_ax.plot(ref_ned[:,1],ref_ned[:,0],label='Ref')
pos_ax.plot(filter_ned[:,1],filter_ned[:,0],label='Filter')
pos_ax.set_title('Location')
pos_ax.set_ylabel('North (m)')
pos_ax.set_xlabel('East (m)')
pos_ax.legend(loc='best')
pos_ax.set_aspect('equal')
vel_fig, vel_ax = plt.subplots(3,2, sharex=True)
vel_ax[0,0].plot(flight_data.time[istart:istop],flight_data.navvn[istart:istop],label='True')
vel_ax[0,0].plot(flight_data.time[istart:istop],estVEL[istart:istop,0],'r',label='Filter')
vel_ax[0,0].set_ylabel('$V_N$ (m/s)')
vel_ax[0,0].legend()
vel_ax[1,0].plot(flight_data.time[istart:istop],flight_data.navve[istart:istop],label='True')
vel_ax[1,0].plot(flight_data.time[istart:istop],estVEL[istart:istop,1],'r',label='Filter')
vel_ax[1,0].set_ylabel('$V_E$ (m/s)')
vel_ax[2,0].plot(flight_data.time[istart:istop],flight_data.navvd[istart:istop],label='True')
vel_ax[2,0].plot(flight_data.time[istart:istop],estVEL[istart:istop,2],'r',label='Filter')
vel_ax[2,0].set_ylabel('$V_D$ (m/s)')
vel_ax[0,1].plot(flight_data.time[istart:istop],flight_data.navvn[istart:istop]-estVEL[istart:istop,0],'r')
vel_ax[0,1].plot(flight_data.time[istart:istop],nsig*np.sqrt(Pvel[istart:istop,0]),'k')
vel_ax[0,1].plot(flight_data.time[istart:istop],-nsig*np.sqrt(Pvel[istart:istop,0]),'k')
vel_ax[1,1].plot(flight_data.time[istart:istop],flight_data.navve[istart:istop]-estVEL[istart:istop,1],'r')
vel_ax[1,1].plot(flight_data.time[istart:istop],nsig*np.sqrt(Pvel[istart:istop,1]),'k')
vel_ax[1,1].plot(flight_data.time[istart:istop],-nsig*np.sqrt(Pvel[istart:istop,1]),'k')
vel_ax[2,1].plot(flight_data.time[istart:istop],flight_data.navvd[istart:istop]-estVEL[istart:istop,2],'r')
vel_ax[2,1].plot(flight_data.time[istart:istop],nsig*np.sqrt(Pvel[istart:istop,2]),'k')
vel_ax[2,1].plot(flight_data.time[istart:istop],-nsig*np.sqrt(Pvel[istart:istop,2]),'k')
vel_ax[0,0].set_title('Velocity (m/s)')
vel_ax[0,1].set_title('Velocity Error (m/s)')
vel_ax[2,0].set_xlabel('Time (sec)')
vel_ax[2,1].set_xlabel('Time (sec)')
att_fig, att_ax = plt.subplots(3,2, sharex=True)
att_ax[0,0].plot(flight_data.time[istart:istop],np.rad2deg(flight_data.phi[istart:istop]),label='True')
att_ax[0,0].plot(flight_data.time[istart:istop],phi[istart:istop],'r',label='Filter')
att_ax[0,0].set_ylabel(r'$phi$ (deg)')
att_ax[0,0].legend()
att_ax[1,0].plot(flight_data.time[istart:istop],np.rad2deg(flight_data.theta[istart:istop]),label='True')
att_ax[1,0].plot(flight_data.time[istart:istop],theta[istart:istop],'r',label='Filter')
att_ax[1,0].set_ylabel(r'$theta$ (deg)')
att_ax[2,0].plot(flight_data.time[istart:istop],np.rad2deg(flight_data.psi[istart:istop]),label='True')
att_ax[2,0].plot(flight_data.time[istart:istop],psi[istart:istop],'r',label='Filter')
att_ax[2,0].set_ylabel(r'$psi$ (deg)')
att_ax[0,1].plot(flight_data.time[istart:istop],np.rad2deg(delta_att[istart:istop,0]),'r')
att_ax[0,1].plot(flight_data.time[istart:istop],nsig*np.rad2deg(np.sqrt(Patt[istart:istop,0])),'k')
att_ax[0,1].plot(flight_data.time[istart:istop],-nsig*np.rad2deg(np.sqrt(Patt[istart:istop,0])),'k')
att_ax[1,1].plot(flight_data.time[istart:istop],np.rad2deg(delta_att[istart:istop,1]),'r')
att_ax[1,1].plot(flight_data.time[istart:istop],nsig*np.rad2deg(np.sqrt(Patt[istart:istop,1])),'k')
att_ax[1,1].plot(flight_data.time[istart:istop],-nsig*np.rad2deg(np.sqrt(Patt[istart:istop,1])),'k')
att_ax[2,1].plot(flight_data.time[istart:istop],np.rad2deg(delta_att[istart:istop,2]),'r')
att_ax[2,1].plot(flight_data.time[istart:istop],nsig*np.rad2deg(np.sqrt(Patt[istart:istop,2])),'k')
att_ax[2,1].plot(flight_data.time[istart:istop],-nsig*np.rad2deg(np.sqrt(Patt[istart:istop,2])),'k')
att_ax[0,0].set_title('Euler Angle (deg)')
att_ax[0,1].set_title('Attitude Error (deg)')
att_ax[2,0].set_xlabel('Time (sec)')
att_ax[2,1].set_xlabel('Time (sec)')
ab_fig, ab_ax = plt.subplots(3, sharex=True)
try:
ab_ax[0].plot(flight_data.time[istart:istop],flight_data.ax_bias[istart:istop],label='True')
except AttributeError:
pass
ab_ax[0].plot(flight_data.time[istart:istop],estAB[istart:istop,0],label='Filter')
ab_ax[0].set_ylabel('$b_{ax}$ (m/s$^2$)')
ab_ax[0].set_title('Accelerometer Bias')
try:
ab_ax[1].plot(flight_data.time[istart:istop],flight_data.ay_bias[istart:istop],label='True')
except AttributeError:
pass
ab_ax[1].plot(flight_data.time[istart:istop],estAB[istart:istop,1],label='Filter')
ab_ax[1].set_ylabel('$b_{ay}$ (m/s$^2$)')
ab_ax[1].legend(loc='best')
try:
ab_ax[2].plot(flight_data.time[istart:istop],flight_data.az_bias[istart:istop],label='True')
except AttributeError:
pass
ab_ax[2].plot(flight_data.time[istart:istop],estAB[istart:istop,2],label='Filter')
ab_ax[2].set_ylabel('$b_{az}$ (m/s$^2$)')
ab_ax[2].set_xlabel('Time (sec)')
gb_fig, gb_ax = plt.subplots(3, sharex=True)
try:
gb_ax[0].plot(flight_data.time[istart:istop],np.rad2deg(flight_data.p_bias[istart:istop]),label='True')
except AttributeError:
pass
gb_ax[0].plot(flight_data.time[istart:istop],np.rad2deg(estGB[istart:istop,0]),label='Filter')
gb_ax[0].set_ylabel('$b_{gx}$ (deg/s)')
gb_ax[0].set_title('Gyro Bias')
try:
gb_ax[1].plot(flight_data.time[istart:istop],np.rad2deg(flight_data.q_bias[istart:istop]),label='True')
except AttributeError:
pass
gb_ax[1].plot(flight_data.time[istart:istop],np.rad2deg(estGB[istart:istop,1]),label='Filter')
gb_ax[1].set_ylabel('$b_{gy}$ (deg/s)')
gb_ax[1].legend(loc='best')
try:
gb_ax[2].plot(flight_data.time[istart:istop],np.rad2deg(flight_data.r_bias[istart:istop]),label='True')
except AttributeError:
pass
gb_ax[2].plot(flight_data.time[istart:istop],np.rad2deg(estGB[istart:istop,2]),label='Filter')
gb_ax[2].set_ylabel('$b_{gz}$ (deg/s)')
gb_ax[2].set_xlabel('Time (sec)')
plt.show()
| 39.179487 | 107 | 0.675112 |
b885e1f7ce772d5955ccf0e19011312e902048d0
| 323 |
py
|
Python
|
exercícios/EX_CursoEmVideo/ex018.py
|
jose-carlos-code/CursoEmvideo-python
|
8c9b82db2c2b906f6d8f2359a680b9b3af25da43
|
[
"MIT"
] | 1 |
2021-01-11T15:10:36.000Z
|
2021-01-11T15:10:36.000Z
|
exercícios/EX_CursoEmVideo/ex018.py
|
jose-carlos-code/CursoEmvideo-python
|
8c9b82db2c2b906f6d8f2359a680b9b3af25da43
|
[
"MIT"
] | null | null | null |
exercícios/EX_CursoEmVideo/ex018.py
|
jose-carlos-code/CursoEmvideo-python
|
8c9b82db2c2b906f6d8f2359a680b9b3af25da43
|
[
"MIT"
] | null | null | null |
import math
numero = float(input('digite o valor de um ângulo: '))
s = math.sin(math.radians(numero))
c = math.cos(math.radians(numero))
t = math.tan(math.radians(numero))
print('o seno de {} é {:.2f}'.format(numero,s))
print('o cosseno de {} é {:.2f}'.format(numero,c))
print('a tangente de {} é {:.2f}'.format(numero,t))
| 35.888889 | 54 | 0.662539 |
9f4e4a7f0e4441c49d34e3c5b1a7b4aaaa480985
| 67,420 |
py
|
Python
|
node_modules/node-gyp/gyp/pylib/gyp/MSVSSettings_test.py
|
firojkabir/lsg
|
ff8b5edc02b2d45f6bc602c7a2aa592706009345
|
[
"MIT"
] | 8 |
2016-08-11T16:27:15.000Z
|
2021-08-10T06:20:09.000Z
|
node_modules/node-gyp/gyp/pylib/gyp/MSVSSettings_test.py
|
firojkabir/lsg
|
ff8b5edc02b2d45f6bc602c7a2aa592706009345
|
[
"MIT"
] | 7 |
2020-03-10T07:47:34.000Z
|
2022-02-12T00:20:30.000Z
|
node_modules/node-gyp/gyp/pylib/gyp/MSVSSettings_test.py
|
firojkabir/lsg
|
ff8b5edc02b2d45f6bc602c7a2aa592706009345
|
[
"MIT"
] | 6 |
2015-07-08T20:31:37.000Z
|
2022-03-18T01:33:27.000Z
|
#!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Unit tests for the MSVSSettings.py file."""
import StringIO
import unittest
import gyp.MSVSSettings as MSVSSettings
class TestSequenceFunctions(unittest.TestCase):
def setUp(self):
self.stderr = StringIO.StringIO()
def _ExpectedWarnings(self, expected):
"""Compares recorded lines to expected warnings."""
self.stderr.seek(0)
actual = self.stderr.read().split('\n')
actual = [line for line in actual if line]
self.assertEqual(sorted(expected), sorted(actual))
def testValidateMSVSSettings_tool_names(self):
"""Tests that only MSVS tool names are allowed."""
MSVSSettings.ValidateMSVSSettings(
{'VCCLCompilerTool': {},
'VCLinkerTool': {},
'VCMIDLTool': {},
'foo': {},
'VCResourceCompilerTool': {},
'VCLibrarianTool': {},
'VCManifestTool': {},
'ClCompile': {}},
self.stderr)
self._ExpectedWarnings([
'Warning: unrecognized tool foo',
'Warning: unrecognized tool ClCompile'])
def testValidateMSVSSettings_settings(self):
"""Tests that for invalid MSVS settings."""
MSVSSettings.ValidateMSVSSettings(
{'VCCLCompilerTool': {
'AdditionalIncludeDirectories': 'folder1;folder2',
'AdditionalOptions': ['string1', 'string2'],
'AdditionalUsingDirectories': 'folder1;folder2',
'AssemblerListingLocation': 'a_file_name',
'AssemblerOutput': '0',
'BasicRuntimeChecks': '5',
'BrowseInformation': 'fdkslj',
'BrowseInformationFile': 'a_file_name',
'BufferSecurityCheck': 'true',
'CallingConvention': '-1',
'CompileAs': '1',
'DebugInformationFormat': '2',
'DefaultCharIsUnsigned': 'true',
'Detect64BitPortabilityProblems': 'true',
'DisableLanguageExtensions': 'true',
'DisableSpecificWarnings': 'string1;string2',
'EnableEnhancedInstructionSet': '1',
'EnableFiberSafeOptimizations': 'true',
'EnableFunctionLevelLinking': 'true',
'EnableIntrinsicFunctions': 'true',
'EnablePREfast': 'true',
'Enableprefast': 'bogus',
'ErrorReporting': '1',
'ExceptionHandling': '1',
'ExpandAttributedSource': 'true',
'FavorSizeOrSpeed': '1',
'FloatingPointExceptions': 'true',
'FloatingPointModel': '1',
'ForceConformanceInForLoopScope': 'true',
'ForcedIncludeFiles': 'file1;file2',
'ForcedUsingFiles': 'file1;file2',
'GeneratePreprocessedFile': '1',
'GenerateXMLDocumentationFiles': 'true',
'IgnoreStandardIncludePath': 'true',
'InlineFunctionExpansion': '1',
'KeepComments': 'true',
'MinimalRebuild': 'true',
'ObjectFile': 'a_file_name',
'OmitDefaultLibName': 'true',
'OmitFramePointers': 'true',
'OpenMP': 'true',
'Optimization': '1',
'PrecompiledHeaderFile': 'a_file_name',
'PrecompiledHeaderThrough': 'a_file_name',
'PreprocessorDefinitions': 'string1;string2',
'ProgramDataBaseFileName': 'a_file_name',
'RuntimeLibrary': '1',
'RuntimeTypeInfo': 'true',
'ShowIncludes': 'true',
'SmallerTypeCheck': 'true',
'StringPooling': 'true',
'StructMemberAlignment': '1',
'SuppressStartupBanner': 'true',
'TreatWChar_tAsBuiltInType': 'true',
'UndefineAllPreprocessorDefinitions': 'true',
'UndefinePreprocessorDefinitions': 'string1;string2',
'UseFullPaths': 'true',
'UsePrecompiledHeader': '1',
'UseUnicodeResponseFiles': 'true',
'WarnAsError': 'true',
'WarningLevel': '1',
'WholeProgramOptimization': 'true',
'XMLDocumentationFileName': 'a_file_name',
'ZZXYZ': 'bogus'},
'VCLinkerTool': {
'AdditionalDependencies': 'file1;file2',
'AdditionalDependencies_excluded': 'file3',
'AdditionalLibraryDirectories': 'folder1;folder2',
'AdditionalManifestDependencies': 'file1;file2',
'AdditionalOptions': 'a string1',
'AddModuleNamesToAssembly': 'file1;file2',
'AllowIsolation': 'true',
'AssemblyDebug': '2',
'AssemblyLinkResource': 'file1;file2',
'BaseAddress': 'a string1',
'CLRImageType': '2',
'CLRThreadAttribute': '2',
'CLRUnmanagedCodeCheck': 'true',
'DataExecutionPrevention': '2',
'DelayLoadDLLs': 'file1;file2',
'DelaySign': 'true',
'Driver': '2',
'EmbedManagedResourceFile': 'file1;file2',
'EnableCOMDATFolding': '2',
'EnableUAC': 'true',
'EntryPointSymbol': 'a string1',
'ErrorReporting': '2',
'FixedBaseAddress': '2',
'ForceSymbolReferences': 'file1;file2',
'FunctionOrder': 'a_file_name',
'GenerateDebugInformation': 'true',
'GenerateManifest': 'true',
'GenerateMapFile': 'true',
'HeapCommitSize': 'a string1',
'HeapReserveSize': 'a string1',
'IgnoreAllDefaultLibraries': 'true',
'IgnoreDefaultLibraryNames': 'file1;file2',
'IgnoreEmbeddedIDL': 'true',
'IgnoreImportLibrary': 'true',
'ImportLibrary': 'a_file_name',
'KeyContainer': 'a_file_name',
'KeyFile': 'a_file_name',
'LargeAddressAware': '2',
'LinkIncremental': '2',
'LinkLibraryDependencies': 'true',
'LinkTimeCodeGeneration': '2',
'ManifestFile': 'a_file_name',
'MapExports': 'true',
'MapFileName': 'a_file_name',
'MergedIDLBaseFileName': 'a_file_name',
'MergeSections': 'a string1',
'MidlCommandFile': 'a_file_name',
'ModuleDefinitionFile': 'a_file_name',
'OptimizeForWindows98': '1',
'OptimizeReferences': '2',
'OutputFile': 'a_file_name',
'PerUserRedirection': 'true',
'Profile': 'true',
'ProfileGuidedDatabase': 'a_file_name',
'ProgramDatabaseFile': 'a_file_name',
'RandomizedBaseAddress': '2',
'RegisterOutput': 'true',
'ResourceOnlyDLL': 'true',
'SetChecksum': 'true',
'ShowProgress': '2',
'StackCommitSize': 'a string1',
'StackReserveSize': 'a string1',
'StripPrivateSymbols': 'a_file_name',
'SubSystem': '2',
'SupportUnloadOfDelayLoadedDLL': 'true',
'SuppressStartupBanner': 'true',
'SwapRunFromCD': 'true',
'SwapRunFromNet': 'true',
'TargetMachine': '2',
'TerminalServerAware': '2',
'TurnOffAssemblyGeneration': 'true',
'TypeLibraryFile': 'a_file_name',
'TypeLibraryResourceID': '33',
'UACExecutionLevel': '2',
'UACUIAccess': 'true',
'UseLibraryDependencyInputs': 'true',
'UseUnicodeResponseFiles': 'true',
'Version': 'a string1'},
'VCMIDLTool': {
'AdditionalIncludeDirectories': 'folder1;folder2',
'AdditionalOptions': 'a string1',
'CPreprocessOptions': 'a string1',
'DefaultCharType': '1',
'DLLDataFileName': 'a_file_name',
'EnableErrorChecks': '1',
'ErrorCheckAllocations': 'true',
'ErrorCheckBounds': 'true',
'ErrorCheckEnumRange': 'true',
'ErrorCheckRefPointers': 'true',
'ErrorCheckStubData': 'true',
'GenerateStublessProxies': 'true',
'GenerateTypeLibrary': 'true',
'HeaderFileName': 'a_file_name',
'IgnoreStandardIncludePath': 'true',
'InterfaceIdentifierFileName': 'a_file_name',
'MkTypLibCompatible': 'true',
'notgood': 'bogus',
'OutputDirectory': 'a string1',
'PreprocessorDefinitions': 'string1;string2',
'ProxyFileName': 'a_file_name',
'RedirectOutputAndErrors': 'a_file_name',
'StructMemberAlignment': '1',
'SuppressStartupBanner': 'true',
'TargetEnvironment': '1',
'TypeLibraryName': 'a_file_name',
'UndefinePreprocessorDefinitions': 'string1;string2',
'ValidateParameters': 'true',
'WarnAsError': 'true',
'WarningLevel': '1'},
'VCResourceCompilerTool': {
'AdditionalOptions': 'a string1',
'AdditionalIncludeDirectories': 'folder1;folder2',
'Culture': '1003',
'IgnoreStandardIncludePath': 'true',
'notgood2': 'bogus',
'PreprocessorDefinitions': 'string1;string2',
'ResourceOutputFileName': 'a string1',
'ShowProgress': 'true',
'SuppressStartupBanner': 'true',
'UndefinePreprocessorDefinitions': 'string1;string2'},
'VCLibrarianTool': {
'AdditionalDependencies': 'file1;file2',
'AdditionalLibraryDirectories': 'folder1;folder2',
'AdditionalOptions': 'a string1',
'ExportNamedFunctions': 'string1;string2',
'ForceSymbolReferences': 'a string1',
'IgnoreAllDefaultLibraries': 'true',
'IgnoreSpecificDefaultLibraries': 'file1;file2',
'LinkLibraryDependencies': 'true',
'ModuleDefinitionFile': 'a_file_name',
'OutputFile': 'a_file_name',
'SuppressStartupBanner': 'true',
'UseUnicodeResponseFiles': 'true'},
'VCManifestTool': {
'AdditionalManifestFiles': 'file1;file2',
'AdditionalOptions': 'a string1',
'AssemblyIdentity': 'a string1',
'ComponentFileName': 'a_file_name',
'DependencyInformationFile': 'a_file_name',
'GenerateCatalogFiles': 'true',
'InputResourceManifests': 'a string1',
'ManifestResourceFile': 'a_file_name',
'OutputManifestFile': 'a_file_name',
'RegistrarScriptFile': 'a_file_name',
'ReplacementsFile': 'a_file_name',
'SuppressStartupBanner': 'true',
'TypeLibraryFile': 'a_file_name',
'UpdateFileHashes': 'truel',
'UpdateFileHashesSearchPath': 'a_file_name',
'UseFAT32Workaround': 'true',
'UseUnicodeResponseFiles': 'true',
'VerboseOutput': 'true'}},
self.stderr)
self._ExpectedWarnings([
'Warning: for VCCLCompilerTool/BasicRuntimeChecks, '
'index value (5) not in expected range [0, 4)',
'Warning: for VCCLCompilerTool/BrowseInformation, '
"invalid literal for int() with base 10: 'fdkslj'",
'Warning: for VCCLCompilerTool/CallingConvention, '
'index value (-1) not in expected range [0, 4)',
'Warning: for VCCLCompilerTool/DebugInformationFormat, '
'converted value for 2 not specified.',
'Warning: unrecognized setting VCCLCompilerTool/Enableprefast',
'Warning: unrecognized setting VCCLCompilerTool/ZZXYZ',
'Warning: for VCLinkerTool/TargetMachine, '
'converted value for 2 not specified.',
'Warning: unrecognized setting VCMIDLTool/notgood',
'Warning: unrecognized setting VCResourceCompilerTool/notgood2',
'Warning: for VCManifestTool/UpdateFileHashes, '
"expected bool; got 'truel'"
''])
def testValidateMSBuildSettings_settings(self):
"""Tests that for invalid MSBuild settings."""
MSVSSettings.ValidateMSBuildSettings(
{'ClCompile': {
'AdditionalIncludeDirectories': 'folder1;folder2',
'AdditionalOptions': ['string1', 'string2'],
'AdditionalUsingDirectories': 'folder1;folder2',
'AssemblerListingLocation': 'a_file_name',
'AssemblerOutput': 'NoListing',
'BasicRuntimeChecks': 'StackFrameRuntimeCheck',
'BrowseInformation': 'false',
'BrowseInformationFile': 'a_file_name',
'BufferSecurityCheck': 'true',
'BuildingInIDE': 'true',
'CallingConvention': 'Cdecl',
'CompileAs': 'CompileAsC',
'CompileAsManaged': 'true',
'CreateHotpatchableImage': 'true',
'DebugInformationFormat': 'ProgramDatabase',
'DisableLanguageExtensions': 'true',
'DisableSpecificWarnings': 'string1;string2',
'EnableEnhancedInstructionSet': 'StreamingSIMDExtensions',
'EnableFiberSafeOptimizations': 'true',
'EnablePREfast': 'true',
'Enableprefast': 'bogus',
'ErrorReporting': 'Prompt',
'ExceptionHandling': 'SyncCThrow',
'ExpandAttributedSource': 'true',
'FavorSizeOrSpeed': 'Neither',
'FloatingPointExceptions': 'true',
'FloatingPointModel': 'Precise',
'ForceConformanceInForLoopScope': 'true',
'ForcedIncludeFiles': 'file1;file2',
'ForcedUsingFiles': 'file1;file2',
'FunctionLevelLinking': 'false',
'GenerateXMLDocumentationFiles': 'true',
'IgnoreStandardIncludePath': 'true',
'InlineFunctionExpansion': 'OnlyExplicitInline',
'IntrinsicFunctions': 'false',
'MinimalRebuild': 'true',
'MultiProcessorCompilation': 'true',
'ObjectFileName': 'a_file_name',
'OmitDefaultLibName': 'true',
'OmitFramePointers': 'true',
'OpenMPSupport': 'true',
'Optimization': 'Disabled',
'PrecompiledHeader': 'NotUsing',
'PrecompiledHeaderFile': 'a_file_name',
'PrecompiledHeaderOutputFile': 'a_file_name',
'PreprocessKeepComments': 'true',
'PreprocessorDefinitions': 'string1;string2',
'PreprocessOutputPath': 'a string1',
'PreprocessSuppressLineNumbers': 'false',
'PreprocessToFile': 'false',
'ProcessorNumber': '33',
'ProgramDataBaseFileName': 'a_file_name',
'RuntimeLibrary': 'MultiThreaded',
'RuntimeTypeInfo': 'true',
'ShowIncludes': 'true',
'SmallerTypeCheck': 'true',
'StringPooling': 'true',
'StructMemberAlignment': '1Byte',
'SuppressStartupBanner': 'true',
'TrackerLogDirectory': 'a_folder',
'TreatSpecificWarningsAsErrors': 'string1;string2',
'TreatWarningAsError': 'true',
'TreatWChar_tAsBuiltInType': 'true',
'UndefineAllPreprocessorDefinitions': 'true',
'UndefinePreprocessorDefinitions': 'string1;string2',
'UseFullPaths': 'true',
'UseUnicodeForAssemblerListing': 'true',
'WarningLevel': 'TurnOffAllWarnings',
'WholeProgramOptimization': 'true',
'XMLDocumentationFileName': 'a_file_name',
'ZZXYZ': 'bogus'},
'Link': {
'AdditionalDependencies': 'file1;file2',
'AdditionalLibraryDirectories': 'folder1;folder2',
'AdditionalManifestDependencies': 'file1;file2',
'AdditionalOptions': 'a string1',
'AddModuleNamesToAssembly': 'file1;file2',
'AllowIsolation': 'true',
'AssemblyDebug': '',
'AssemblyLinkResource': 'file1;file2',
'BaseAddress': 'a string1',
'BuildingInIDE': 'true',
'CLRImageType': 'ForceIJWImage',
'CLRSupportLastError': 'Enabled',
'CLRThreadAttribute': 'MTAThreadingAttribute',
'CLRUnmanagedCodeCheck': 'true',
'CreateHotPatchableImage': 'X86Image',
'DataExecutionPrevention': 'false',
'DelayLoadDLLs': 'file1;file2',
'DelaySign': 'true',
'Driver': 'NotSet',
'EmbedManagedResourceFile': 'file1;file2',
'EnableCOMDATFolding': 'false',
'EnableUAC': 'true',
'EntryPointSymbol': 'a string1',
'FixedBaseAddress': 'false',
'ForceFileOutput': 'Enabled',
'ForceSymbolReferences': 'file1;file2',
'FunctionOrder': 'a_file_name',
'GenerateDebugInformation': 'true',
'GenerateMapFile': 'true',
'HeapCommitSize': 'a string1',
'HeapReserveSize': 'a string1',
'IgnoreAllDefaultLibraries': 'true',
'IgnoreEmbeddedIDL': 'true',
'IgnoreSpecificDefaultLibraries': 'a_file_list',
'ImageHasSafeExceptionHandlers': 'true',
'ImportLibrary': 'a_file_name',
'KeyContainer': 'a_file_name',
'KeyFile': 'a_file_name',
'LargeAddressAware': 'false',
'LinkDLL': 'true',
'LinkErrorReporting': 'SendErrorReport',
'LinkStatus': 'true',
'LinkTimeCodeGeneration': 'UseLinkTimeCodeGeneration',
'ManifestFile': 'a_file_name',
'MapExports': 'true',
'MapFileName': 'a_file_name',
'MergedIDLBaseFileName': 'a_file_name',
'MergeSections': 'a string1',
'MidlCommandFile': 'a_file_name',
'MinimumRequiredVersion': 'a string1',
'ModuleDefinitionFile': 'a_file_name',
'MSDOSStubFileName': 'a_file_name',
'NoEntryPoint': 'true',
'OptimizeReferences': 'false',
'OutputFile': 'a_file_name',
'PerUserRedirection': 'true',
'PreventDllBinding': 'true',
'Profile': 'true',
'ProfileGuidedDatabase': 'a_file_name',
'ProgramDatabaseFile': 'a_file_name',
'RandomizedBaseAddress': 'false',
'RegisterOutput': 'true',
'SectionAlignment': '33',
'SetChecksum': 'true',
'ShowProgress': 'LinkVerboseREF',
'SpecifySectionAttributes': 'a string1',
'StackCommitSize': 'a string1',
'StackReserveSize': 'a string1',
'StripPrivateSymbols': 'a_file_name',
'SubSystem': 'Console',
'SupportNobindOfDelayLoadedDLL': 'true',
'SupportUnloadOfDelayLoadedDLL': 'true',
'SuppressStartupBanner': 'true',
'SwapRunFromCD': 'true',
'SwapRunFromNET': 'true',
'TargetMachine': 'MachineX86',
'TerminalServerAware': 'false',
'TrackerLogDirectory': 'a_folder',
'TreatLinkerWarningAsErrors': 'true',
'TurnOffAssemblyGeneration': 'true',
'TypeLibraryFile': 'a_file_name',
'TypeLibraryResourceID': '33',
'UACExecutionLevel': 'AsInvoker',
'UACUIAccess': 'true',
'Version': 'a string1'},
'ResourceCompile': {
'AdditionalIncludeDirectories': 'folder1;folder2',
'AdditionalOptions': 'a string1',
'Culture': '0x236',
'IgnoreStandardIncludePath': 'true',
'NullTerminateStrings': 'true',
'PreprocessorDefinitions': 'string1;string2',
'ResourceOutputFileName': 'a string1',
'ShowProgress': 'true',
'SuppressStartupBanner': 'true',
'TrackerLogDirectory': 'a_folder',
'UndefinePreprocessorDefinitions': 'string1;string2'},
'Midl': {
'AdditionalIncludeDirectories': 'folder1;folder2',
'AdditionalOptions': 'a string1',
'ApplicationConfigurationMode': 'true',
'ClientStubFile': 'a_file_name',
'CPreprocessOptions': 'a string1',
'DefaultCharType': 'Signed',
'DllDataFileName': 'a_file_name',
'EnableErrorChecks': 'EnableCustom',
'ErrorCheckAllocations': 'true',
'ErrorCheckBounds': 'true',
'ErrorCheckEnumRange': 'true',
'ErrorCheckRefPointers': 'true',
'ErrorCheckStubData': 'true',
'GenerateClientFiles': 'Stub',
'GenerateServerFiles': 'None',
'GenerateStublessProxies': 'true',
'GenerateTypeLibrary': 'true',
'HeaderFileName': 'a_file_name',
'IgnoreStandardIncludePath': 'true',
'InterfaceIdentifierFileName': 'a_file_name',
'LocaleID': '33',
'MkTypLibCompatible': 'true',
'OutputDirectory': 'a string1',
'PreprocessorDefinitions': 'string1;string2',
'ProxyFileName': 'a_file_name',
'RedirectOutputAndErrors': 'a_file_name',
'ServerStubFile': 'a_file_name',
'StructMemberAlignment': 'NotSet',
'SuppressCompilerWarnings': 'true',
'SuppressStartupBanner': 'true',
'TargetEnvironment': 'Itanium',
'TrackerLogDirectory': 'a_folder',
'TypeLibFormat': 'NewFormat',
'TypeLibraryName': 'a_file_name',
'UndefinePreprocessorDefinitions': 'string1;string2',
'ValidateAllParameters': 'true',
'WarnAsError': 'true',
'WarningLevel': '1'},
'Lib': {
'AdditionalDependencies': 'file1;file2',
'AdditionalLibraryDirectories': 'folder1;folder2',
'AdditionalOptions': 'a string1',
'DisplayLibrary': 'a string1',
'ErrorReporting': 'PromptImmediately',
'ExportNamedFunctions': 'string1;string2',
'ForceSymbolReferences': 'a string1',
'IgnoreAllDefaultLibraries': 'true',
'IgnoreSpecificDefaultLibraries': 'file1;file2',
'LinkTimeCodeGeneration': 'true',
'MinimumRequiredVersion': 'a string1',
'ModuleDefinitionFile': 'a_file_name',
'Name': 'a_file_name',
'OutputFile': 'a_file_name',
'RemoveObjects': 'file1;file2',
'SubSystem': 'Console',
'SuppressStartupBanner': 'true',
'TargetMachine': 'MachineX86i',
'TrackerLogDirectory': 'a_folder',
'TreatLibWarningAsErrors': 'true',
'UseUnicodeResponseFiles': 'true',
'Verbose': 'true'},
'Manifest': {
'AdditionalManifestFiles': 'file1;file2',
'AdditionalOptions': 'a string1',
'AssemblyIdentity': 'a string1',
'ComponentFileName': 'a_file_name',
'EnableDPIAwareness': 'fal',
'GenerateCatalogFiles': 'truel',
'GenerateCategoryTags': 'true',
'InputResourceManifests': 'a string1',
'ManifestFromManagedAssembly': 'a_file_name',
'notgood3': 'bogus',
'OutputManifestFile': 'a_file_name',
'OutputResourceManifests': 'a string1',
'RegistrarScriptFile': 'a_file_name',
'ReplacementsFile': 'a_file_name',
'SuppressDependencyElement': 'true',
'SuppressStartupBanner': 'true',
'TrackerLogDirectory': 'a_folder',
'TypeLibraryFile': 'a_file_name',
'UpdateFileHashes': 'true',
'UpdateFileHashesSearchPath': 'a_file_name',
'VerboseOutput': 'true'},
'ProjectReference': {
'LinkLibraryDependencies': 'true',
'UseLibraryDependencyInputs': 'true'},
'ManifestResourceCompile': {
'ResourceOutputFileName': 'a_file_name'},
'': {
'EmbedManifest': 'true',
'GenerateManifest': 'true',
'IgnoreImportLibrary': 'true',
'LinkIncremental': 'false'}},
self.stderr)
self._ExpectedWarnings([
'Warning: unrecognized setting ClCompile/Enableprefast',
'Warning: unrecognized setting ClCompile/ZZXYZ',
'Warning: unrecognized setting Manifest/notgood3',
'Warning: for Manifest/GenerateCatalogFiles, '
"expected bool; got 'truel'",
'Warning: for Lib/TargetMachine, unrecognized enumerated value '
'MachineX86i',
"Warning: for Manifest/EnableDPIAwareness, expected bool; got 'fal'"])
def testConvertToMSBuildSettings_empty(self):
"""Tests an empty conversion."""
msvs_settings = {}
expected_msbuild_settings = {}
actual_msbuild_settings = MSVSSettings.ConvertToMSBuildSettings(
msvs_settings,
self.stderr)
self.assertEqual(expected_msbuild_settings, actual_msbuild_settings)
self._ExpectedWarnings([])
def testConvertToMSBuildSettings_minimal(self):
"""Tests a minimal conversion."""
msvs_settings = {
'VCCLCompilerTool': {
'AdditionalIncludeDirectories': 'dir1',
'AdditionalOptions': '/foo',
'BasicRuntimeChecks': '0',
},
'VCLinkerTool': {
'LinkTimeCodeGeneration': '1',
'ErrorReporting': '1',
'DataExecutionPrevention': '2',
},
}
expected_msbuild_settings = {
'ClCompile': {
'AdditionalIncludeDirectories': 'dir1',
'AdditionalOptions': '/foo',
'BasicRuntimeChecks': 'Default',
},
'Link': {
'LinkTimeCodeGeneration': 'UseLinkTimeCodeGeneration',
'LinkErrorReporting': 'PromptImmediately',
'DataExecutionPrevention': 'true',
},
}
actual_msbuild_settings = MSVSSettings.ConvertToMSBuildSettings(
msvs_settings,
self.stderr)
self.assertEqual(expected_msbuild_settings, actual_msbuild_settings)
self._ExpectedWarnings([])
def testConvertToMSBuildSettings_warnings(self):
"""Tests conversion that generates warnings."""
msvs_settings = {
'VCCLCompilerTool': {
'AdditionalIncludeDirectories': '1',
'AdditionalOptions': '2',
# These are incorrect values:
'BasicRuntimeChecks': '12',
'BrowseInformation': '21',
'UsePrecompiledHeader': '13',
'GeneratePreprocessedFile': '14'},
'VCLinkerTool': {
# These are incorrect values:
'Driver': '10',
'LinkTimeCodeGeneration': '31',
'ErrorReporting': '21',
'FixedBaseAddress': '6'},
'VCResourceCompilerTool': {
# Custom
'Culture': '1003'}}
expected_msbuild_settings = {
'ClCompile': {
'AdditionalIncludeDirectories': '1',
'AdditionalOptions': '2'},
'Link': {},
'ResourceCompile': {
# Custom
'Culture': '0x03eb'}}
actual_msbuild_settings = MSVSSettings.ConvertToMSBuildSettings(
msvs_settings,
self.stderr)
self.assertEqual(expected_msbuild_settings, actual_msbuild_settings)
self._ExpectedWarnings([
'Warning: while converting VCCLCompilerTool/BasicRuntimeChecks to '
'MSBuild, index value (12) not in expected range [0, 4)',
'Warning: while converting VCCLCompilerTool/BrowseInformation to '
'MSBuild, index value (21) not in expected range [0, 3)',
'Warning: while converting VCCLCompilerTool/UsePrecompiledHeader to '
'MSBuild, index value (13) not in expected range [0, 3)',
'Warning: while converting VCCLCompilerTool/GeneratePreprocessedFile to '
'MSBuild, value must be one of [0, 1, 2]; got 14',
'Warning: while converting VCLinkerTool/Driver to '
'MSBuild, index value (10) not in expected range [0, 4)',
'Warning: while converting VCLinkerTool/LinkTimeCodeGeneration to '
'MSBuild, index value (31) not in expected range [0, 5)',
'Warning: while converting VCLinkerTool/ErrorReporting to '
'MSBuild, index value (21) not in expected range [0, 3)',
'Warning: while converting VCLinkerTool/FixedBaseAddress to '
'MSBuild, index value (6) not in expected range [0, 3)',
])
def testConvertToMSBuildSettings_full_synthetic(self):
"""Tests conversion of all the MSBuild settings."""
msvs_settings = {
'VCCLCompilerTool': {
'AdditionalIncludeDirectories': 'folder1;folder2;folder3',
'AdditionalOptions': 'a_string',
'AdditionalUsingDirectories': 'folder1;folder2;folder3',
'AssemblerListingLocation': 'a_file_name',
'AssemblerOutput': '0',
'BasicRuntimeChecks': '1',
'BrowseInformation': '2',
'BrowseInformationFile': 'a_file_name',
'BufferSecurityCheck': 'true',
'CallingConvention': '0',
'CompileAs': '1',
'DebugInformationFormat': '4',
'DefaultCharIsUnsigned': 'true',
'Detect64BitPortabilityProblems': 'true',
'DisableLanguageExtensions': 'true',
'DisableSpecificWarnings': 'd1;d2;d3',
'EnableEnhancedInstructionSet': '0',
'EnableFiberSafeOptimizations': 'true',
'EnableFunctionLevelLinking': 'true',
'EnableIntrinsicFunctions': 'true',
'EnablePREfast': 'true',
'ErrorReporting': '1',
'ExceptionHandling': '2',
'ExpandAttributedSource': 'true',
'FavorSizeOrSpeed': '0',
'FloatingPointExceptions': 'true',
'FloatingPointModel': '1',
'ForceConformanceInForLoopScope': 'true',
'ForcedIncludeFiles': 'file1;file2;file3',
'ForcedUsingFiles': 'file1;file2;file3',
'GeneratePreprocessedFile': '1',
'GenerateXMLDocumentationFiles': 'true',
'IgnoreStandardIncludePath': 'true',
'InlineFunctionExpansion': '2',
'KeepComments': 'true',
'MinimalRebuild': 'true',
'ObjectFile': 'a_file_name',
'OmitDefaultLibName': 'true',
'OmitFramePointers': 'true',
'OpenMP': 'true',
'Optimization': '3',
'PrecompiledHeaderFile': 'a_file_name',
'PrecompiledHeaderThrough': 'a_file_name',
'PreprocessorDefinitions': 'd1;d2;d3',
'ProgramDataBaseFileName': 'a_file_name',
'RuntimeLibrary': '0',
'RuntimeTypeInfo': 'true',
'ShowIncludes': 'true',
'SmallerTypeCheck': 'true',
'StringPooling': 'true',
'StructMemberAlignment': '1',
'SuppressStartupBanner': 'true',
'TreatWChar_tAsBuiltInType': 'true',
'UndefineAllPreprocessorDefinitions': 'true',
'UndefinePreprocessorDefinitions': 'd1;d2;d3',
'UseFullPaths': 'true',
'UsePrecompiledHeader': '1',
'UseUnicodeResponseFiles': 'true',
'WarnAsError': 'true',
'WarningLevel': '2',
'WholeProgramOptimization': 'true',
'XMLDocumentationFileName': 'a_file_name'},
'VCLinkerTool': {
'AdditionalDependencies': 'file1;file2;file3',
'AdditionalLibraryDirectories': 'folder1;folder2;folder3',
'AdditionalLibraryDirectories_excluded': 'folder1;folder2;folder3',
'AdditionalManifestDependencies': 'file1;file2;file3',
'AdditionalOptions': 'a_string',
'AddModuleNamesToAssembly': 'file1;file2;file3',
'AllowIsolation': 'true',
'AssemblyDebug': '0',
'AssemblyLinkResource': 'file1;file2;file3',
'BaseAddress': 'a_string',
'CLRImageType': '1',
'CLRThreadAttribute': '2',
'CLRUnmanagedCodeCheck': 'true',
'DataExecutionPrevention': '0',
'DelayLoadDLLs': 'file1;file2;file3',
'DelaySign': 'true',
'Driver': '1',
'EmbedManagedResourceFile': 'file1;file2;file3',
'EnableCOMDATFolding': '0',
'EnableUAC': 'true',
'EntryPointSymbol': 'a_string',
'ErrorReporting': '0',
'FixedBaseAddress': '1',
'ForceSymbolReferences': 'file1;file2;file3',
'FunctionOrder': 'a_file_name',
'GenerateDebugInformation': 'true',
'GenerateManifest': 'true',
'GenerateMapFile': 'true',
'HeapCommitSize': 'a_string',
'HeapReserveSize': 'a_string',
'IgnoreAllDefaultLibraries': 'true',
'IgnoreDefaultLibraryNames': 'file1;file2;file3',
'IgnoreEmbeddedIDL': 'true',
'IgnoreImportLibrary': 'true',
'ImportLibrary': 'a_file_name',
'KeyContainer': 'a_file_name',
'KeyFile': 'a_file_name',
'LargeAddressAware': '2',
'LinkIncremental': '1',
'LinkLibraryDependencies': 'true',
'LinkTimeCodeGeneration': '2',
'ManifestFile': 'a_file_name',
'MapExports': 'true',
'MapFileName': 'a_file_name',
'MergedIDLBaseFileName': 'a_file_name',
'MergeSections': 'a_string',
'MidlCommandFile': 'a_file_name',
'ModuleDefinitionFile': 'a_file_name',
'OptimizeForWindows98': '1',
'OptimizeReferences': '0',
'OutputFile': 'a_file_name',
'PerUserRedirection': 'true',
'Profile': 'true',
'ProfileGuidedDatabase': 'a_file_name',
'ProgramDatabaseFile': 'a_file_name',
'RandomizedBaseAddress': '1',
'RegisterOutput': 'true',
'ResourceOnlyDLL': 'true',
'SetChecksum': 'true',
'ShowProgress': '0',
'StackCommitSize': 'a_string',
'StackReserveSize': 'a_string',
'StripPrivateSymbols': 'a_file_name',
'SubSystem': '2',
'SupportUnloadOfDelayLoadedDLL': 'true',
'SuppressStartupBanner': 'true',
'SwapRunFromCD': 'true',
'SwapRunFromNet': 'true',
'TargetMachine': '3',
'TerminalServerAware': '2',
'TurnOffAssemblyGeneration': 'true',
'TypeLibraryFile': 'a_file_name',
'TypeLibraryResourceID': '33',
'UACExecutionLevel': '1',
'UACUIAccess': 'true',
'UseLibraryDependencyInputs': 'false',
'UseUnicodeResponseFiles': 'true',
'Version': 'a_string'},
'VCResourceCompilerTool': {
'AdditionalIncludeDirectories': 'folder1;folder2;folder3',
'AdditionalOptions': 'a_string',
'Culture': '1003',
'IgnoreStandardIncludePath': 'true',
'PreprocessorDefinitions': 'd1;d2;d3',
'ResourceOutputFileName': 'a_string',
'ShowProgress': 'true',
'SuppressStartupBanner': 'true',
'UndefinePreprocessorDefinitions': 'd1;d2;d3'},
'VCMIDLTool': {
'AdditionalIncludeDirectories': 'folder1;folder2;folder3',
'AdditionalOptions': 'a_string',
'CPreprocessOptions': 'a_string',
'DefaultCharType': '0',
'DLLDataFileName': 'a_file_name',
'EnableErrorChecks': '2',
'ErrorCheckAllocations': 'true',
'ErrorCheckBounds': 'true',
'ErrorCheckEnumRange': 'true',
'ErrorCheckRefPointers': 'true',
'ErrorCheckStubData': 'true',
'GenerateStublessProxies': 'true',
'GenerateTypeLibrary': 'true',
'HeaderFileName': 'a_file_name',
'IgnoreStandardIncludePath': 'true',
'InterfaceIdentifierFileName': 'a_file_name',
'MkTypLibCompatible': 'true',
'OutputDirectory': 'a_string',
'PreprocessorDefinitions': 'd1;d2;d3',
'ProxyFileName': 'a_file_name',
'RedirectOutputAndErrors': 'a_file_name',
'StructMemberAlignment': '3',
'SuppressStartupBanner': 'true',
'TargetEnvironment': '1',
'TypeLibraryName': 'a_file_name',
'UndefinePreprocessorDefinitions': 'd1;d2;d3',
'ValidateParameters': 'true',
'WarnAsError': 'true',
'WarningLevel': '4'},
'VCLibrarianTool': {
'AdditionalDependencies': 'file1;file2;file3',
'AdditionalLibraryDirectories': 'folder1;folder2;folder3',
'AdditionalLibraryDirectories_excluded': 'folder1;folder2;folder3',
'AdditionalOptions': 'a_string',
'ExportNamedFunctions': 'd1;d2;d3',
'ForceSymbolReferences': 'a_string',
'IgnoreAllDefaultLibraries': 'true',
'IgnoreSpecificDefaultLibraries': 'file1;file2;file3',
'LinkLibraryDependencies': 'true',
'ModuleDefinitionFile': 'a_file_name',
'OutputFile': 'a_file_name',
'SuppressStartupBanner': 'true',
'UseUnicodeResponseFiles': 'true'},
'VCManifestTool': {
'AdditionalManifestFiles': 'file1;file2;file3',
'AdditionalOptions': 'a_string',
'AssemblyIdentity': 'a_string',
'ComponentFileName': 'a_file_name',
'DependencyInformationFile': 'a_file_name',
'EmbedManifest': 'true',
'GenerateCatalogFiles': 'true',
'InputResourceManifests': 'a_string',
'ManifestResourceFile': 'my_name',
'OutputManifestFile': 'a_file_name',
'RegistrarScriptFile': 'a_file_name',
'ReplacementsFile': 'a_file_name',
'SuppressStartupBanner': 'true',
'TypeLibraryFile': 'a_file_name',
'UpdateFileHashes': 'true',
'UpdateFileHashesSearchPath': 'a_file_name',
'UseFAT32Workaround': 'true',
'UseUnicodeResponseFiles': 'true',
'VerboseOutput': 'true'}}
expected_msbuild_settings = {
'ClCompile': {
'AdditionalIncludeDirectories': 'folder1;folder2;folder3',
'AdditionalOptions': 'a_string /J',
'AdditionalUsingDirectories': 'folder1;folder2;folder3',
'AssemblerListingLocation': 'a_file_name',
'AssemblerOutput': 'NoListing',
'BasicRuntimeChecks': 'StackFrameRuntimeCheck',
'BrowseInformation': 'true',
'BrowseInformationFile': 'a_file_name',
'BufferSecurityCheck': 'true',
'CallingConvention': 'Cdecl',
'CompileAs': 'CompileAsC',
'DebugInformationFormat': 'EditAndContinue',
'DisableLanguageExtensions': 'true',
'DisableSpecificWarnings': 'd1;d2;d3',
'EnableEnhancedInstructionSet': 'NotSet',
'EnableFiberSafeOptimizations': 'true',
'EnablePREfast': 'true',
'ErrorReporting': 'Prompt',
'ExceptionHandling': 'Async',
'ExpandAttributedSource': 'true',
'FavorSizeOrSpeed': 'Neither',
'FloatingPointExceptions': 'true',
'FloatingPointModel': 'Strict',
'ForceConformanceInForLoopScope': 'true',
'ForcedIncludeFiles': 'file1;file2;file3',
'ForcedUsingFiles': 'file1;file2;file3',
'FunctionLevelLinking': 'true',
'GenerateXMLDocumentationFiles': 'true',
'IgnoreStandardIncludePath': 'true',
'InlineFunctionExpansion': 'AnySuitable',
'IntrinsicFunctions': 'true',
'MinimalRebuild': 'true',
'ObjectFileName': 'a_file_name',
'OmitDefaultLibName': 'true',
'OmitFramePointers': 'true',
'OpenMPSupport': 'true',
'Optimization': 'Full',
'PrecompiledHeader': 'Create',
'PrecompiledHeaderFile': 'a_file_name',
'PrecompiledHeaderOutputFile': 'a_file_name',
'PreprocessKeepComments': 'true',
'PreprocessorDefinitions': 'd1;d2;d3',
'PreprocessSuppressLineNumbers': 'false',
'PreprocessToFile': 'true',
'ProgramDataBaseFileName': 'a_file_name',
'RuntimeLibrary': 'MultiThreaded',
'RuntimeTypeInfo': 'true',
'ShowIncludes': 'true',
'SmallerTypeCheck': 'true',
'StringPooling': 'true',
'StructMemberAlignment': '1Byte',
'SuppressStartupBanner': 'true',
'TreatWarningAsError': 'true',
'TreatWChar_tAsBuiltInType': 'true',
'UndefineAllPreprocessorDefinitions': 'true',
'UndefinePreprocessorDefinitions': 'd1;d2;d3',
'UseFullPaths': 'true',
'WarningLevel': 'Level2',
'WholeProgramOptimization': 'true',
'XMLDocumentationFileName': 'a_file_name'},
'Link': {
'AdditionalDependencies': 'file1;file2;file3',
'AdditionalLibraryDirectories': 'folder1;folder2;folder3',
'AdditionalManifestDependencies': 'file1;file2;file3',
'AdditionalOptions': 'a_string',
'AddModuleNamesToAssembly': 'file1;file2;file3',
'AllowIsolation': 'true',
'AssemblyDebug': '',
'AssemblyLinkResource': 'file1;file2;file3',
'BaseAddress': 'a_string',
'CLRImageType': 'ForceIJWImage',
'CLRThreadAttribute': 'STAThreadingAttribute',
'CLRUnmanagedCodeCheck': 'true',
'DataExecutionPrevention': '',
'DelayLoadDLLs': 'file1;file2;file3',
'DelaySign': 'true',
'Driver': 'Driver',
'EmbedManagedResourceFile': 'file1;file2;file3',
'EnableCOMDATFolding': '',
'EnableUAC': 'true',
'EntryPointSymbol': 'a_string',
'FixedBaseAddress': 'false',
'ForceSymbolReferences': 'file1;file2;file3',
'FunctionOrder': 'a_file_name',
'GenerateDebugInformation': 'true',
'GenerateMapFile': 'true',
'HeapCommitSize': 'a_string',
'HeapReserveSize': 'a_string',
'IgnoreAllDefaultLibraries': 'true',
'IgnoreEmbeddedIDL': 'true',
'IgnoreSpecificDefaultLibraries': 'file1;file2;file3',
'ImportLibrary': 'a_file_name',
'KeyContainer': 'a_file_name',
'KeyFile': 'a_file_name',
'LargeAddressAware': 'true',
'LinkErrorReporting': 'NoErrorReport',
'LinkTimeCodeGeneration': 'PGInstrument',
'ManifestFile': 'a_file_name',
'MapExports': 'true',
'MapFileName': 'a_file_name',
'MergedIDLBaseFileName': 'a_file_name',
'MergeSections': 'a_string',
'MidlCommandFile': 'a_file_name',
'ModuleDefinitionFile': 'a_file_name',
'NoEntryPoint': 'true',
'OptimizeReferences': '',
'OutputFile': 'a_file_name',
'PerUserRedirection': 'true',
'Profile': 'true',
'ProfileGuidedDatabase': 'a_file_name',
'ProgramDatabaseFile': 'a_file_name',
'RandomizedBaseAddress': 'false',
'RegisterOutput': 'true',
'SetChecksum': 'true',
'ShowProgress': 'NotSet',
'StackCommitSize': 'a_string',
'StackReserveSize': 'a_string',
'StripPrivateSymbols': 'a_file_name',
'SubSystem': 'Windows',
'SupportUnloadOfDelayLoadedDLL': 'true',
'SuppressStartupBanner': 'true',
'SwapRunFromCD': 'true',
'SwapRunFromNET': 'true',
'TargetMachine': 'MachineARM',
'TerminalServerAware': 'true',
'TurnOffAssemblyGeneration': 'true',
'TypeLibraryFile': 'a_file_name',
'TypeLibraryResourceID': '33',
'UACExecutionLevel': 'HighestAvailable',
'UACUIAccess': 'true',
'Version': 'a_string'},
'ResourceCompile': {
'AdditionalIncludeDirectories': 'folder1;folder2;folder3',
'AdditionalOptions': 'a_string',
'Culture': '0x03eb',
'IgnoreStandardIncludePath': 'true',
'PreprocessorDefinitions': 'd1;d2;d3',
'ResourceOutputFileName': 'a_string',
'ShowProgress': 'true',
'SuppressStartupBanner': 'true',
'UndefinePreprocessorDefinitions': 'd1;d2;d3'},
'Midl': {
'AdditionalIncludeDirectories': 'folder1;folder2;folder3',
'AdditionalOptions': 'a_string',
'CPreprocessOptions': 'a_string',
'DefaultCharType': 'Unsigned',
'DllDataFileName': 'a_file_name',
'EnableErrorChecks': 'All',
'ErrorCheckAllocations': 'true',
'ErrorCheckBounds': 'true',
'ErrorCheckEnumRange': 'true',
'ErrorCheckRefPointers': 'true',
'ErrorCheckStubData': 'true',
'GenerateStublessProxies': 'true',
'GenerateTypeLibrary': 'true',
'HeaderFileName': 'a_file_name',
'IgnoreStandardIncludePath': 'true',
'InterfaceIdentifierFileName': 'a_file_name',
'MkTypLibCompatible': 'true',
'OutputDirectory': 'a_string',
'PreprocessorDefinitions': 'd1;d2;d3',
'ProxyFileName': 'a_file_name',
'RedirectOutputAndErrors': 'a_file_name',
'StructMemberAlignment': '4',
'SuppressStartupBanner': 'true',
'TargetEnvironment': 'Win32',
'TypeLibraryName': 'a_file_name',
'UndefinePreprocessorDefinitions': 'd1;d2;d3',
'ValidateAllParameters': 'true',
'WarnAsError': 'true',
'WarningLevel': '4'},
'Lib': {
'AdditionalDependencies': 'file1;file2;file3',
'AdditionalLibraryDirectories': 'folder1;folder2;folder3',
'AdditionalOptions': 'a_string',
'ExportNamedFunctions': 'd1;d2;d3',
'ForceSymbolReferences': 'a_string',
'IgnoreAllDefaultLibraries': 'true',
'IgnoreSpecificDefaultLibraries': 'file1;file2;file3',
'ModuleDefinitionFile': 'a_file_name',
'OutputFile': 'a_file_name',
'SuppressStartupBanner': 'true',
'UseUnicodeResponseFiles': 'true'},
'Manifest': {
'AdditionalManifestFiles': 'file1;file2;file3',
'AdditionalOptions': 'a_string',
'AssemblyIdentity': 'a_string',
'ComponentFileName': 'a_file_name',
'GenerateCatalogFiles': 'true',
'InputResourceManifests': 'a_string',
'OutputManifestFile': 'a_file_name',
'RegistrarScriptFile': 'a_file_name',
'ReplacementsFile': 'a_file_name',
'SuppressStartupBanner': 'true',
'TypeLibraryFile': 'a_file_name',
'UpdateFileHashes': 'true',
'UpdateFileHashesSearchPath': 'a_file_name',
'VerboseOutput': 'true'},
'ManifestResourceCompile': {
'ResourceOutputFileName': 'my_name'},
'ProjectReference': {
'LinkLibraryDependencies': 'true',
'UseLibraryDependencyInputs': 'false'},
'': {
'EmbedManifest': 'true',
'GenerateManifest': 'true',
'IgnoreImportLibrary': 'true',
'LinkIncremental': 'false'}}
actual_msbuild_settings = MSVSSettings.ConvertToMSBuildSettings(
msvs_settings,
self.stderr)
self.assertEqual(expected_msbuild_settings, actual_msbuild_settings)
self._ExpectedWarnings([])
def testConvertToMSBuildSettings_actual(self):
"""Tests the conversion of an actual project.
A VS2008 project with most of the options defined was created through the
VS2008 IDE. It was then converted to VS2010. The tool settings found in
the .vcproj and .vcxproj files were converted to the two dictionaries
msvs_settings and expected_msbuild_settings.
Note that for many settings, the VS2010 converter adds macros like
%(AdditionalIncludeDirectories) to make sure than inherited values are
included. Since the Gyp projects we generate do not use inheritance,
we removed these macros. They were:
ClCompile:
AdditionalIncludeDirectories: ';%(AdditionalIncludeDirectories)'
AdditionalOptions: ' %(AdditionalOptions)'
AdditionalUsingDirectories: ';%(AdditionalUsingDirectories)'
DisableSpecificWarnings: ';%(DisableSpecificWarnings)',
ForcedIncludeFiles: ';%(ForcedIncludeFiles)',
ForcedUsingFiles: ';%(ForcedUsingFiles)',
PreprocessorDefinitions: ';%(PreprocessorDefinitions)',
UndefinePreprocessorDefinitions:
';%(UndefinePreprocessorDefinitions)',
Link:
AdditionalDependencies: ';%(AdditionalDependencies)',
AdditionalLibraryDirectories: ';%(AdditionalLibraryDirectories)',
AdditionalManifestDependencies:
';%(AdditionalManifestDependencies)',
AdditionalOptions: ' %(AdditionalOptions)',
AddModuleNamesToAssembly: ';%(AddModuleNamesToAssembly)',
AssemblyLinkResource: ';%(AssemblyLinkResource)',
DelayLoadDLLs: ';%(DelayLoadDLLs)',
EmbedManagedResourceFile: ';%(EmbedManagedResourceFile)',
ForceSymbolReferences: ';%(ForceSymbolReferences)',
IgnoreSpecificDefaultLibraries:
';%(IgnoreSpecificDefaultLibraries)',
ResourceCompile:
AdditionalIncludeDirectories: ';%(AdditionalIncludeDirectories)',
AdditionalOptions: ' %(AdditionalOptions)',
PreprocessorDefinitions: ';%(PreprocessorDefinitions)',
Manifest:
AdditionalManifestFiles: ';%(AdditionalManifestFiles)',
AdditionalOptions: ' %(AdditionalOptions)',
InputResourceManifests: ';%(InputResourceManifests)',
"""
msvs_settings = {
'VCCLCompilerTool': {
'AdditionalIncludeDirectories': 'dir1',
'AdditionalOptions': '/more',
'AdditionalUsingDirectories': 'test',
'AssemblerListingLocation': '$(IntDir)\\a',
'AssemblerOutput': '1',
'BasicRuntimeChecks': '3',
'BrowseInformation': '1',
'BrowseInformationFile': '$(IntDir)\\e',
'BufferSecurityCheck': 'false',
'CallingConvention': '1',
'CompileAs': '1',
'DebugInformationFormat': '4',
'DefaultCharIsUnsigned': 'true',
'Detect64BitPortabilityProblems': 'true',
'DisableLanguageExtensions': 'true',
'DisableSpecificWarnings': 'abc',
'EnableEnhancedInstructionSet': '1',
'EnableFiberSafeOptimizations': 'true',
'EnableFunctionLevelLinking': 'true',
'EnableIntrinsicFunctions': 'true',
'EnablePREfast': 'true',
'ErrorReporting': '2',
'ExceptionHandling': '2',
'ExpandAttributedSource': 'true',
'FavorSizeOrSpeed': '2',
'FloatingPointExceptions': 'true',
'FloatingPointModel': '1',
'ForceConformanceInForLoopScope': 'false',
'ForcedIncludeFiles': 'def',
'ForcedUsingFiles': 'ge',
'GeneratePreprocessedFile': '2',
'GenerateXMLDocumentationFiles': 'true',
'IgnoreStandardIncludePath': 'true',
'InlineFunctionExpansion': '1',
'KeepComments': 'true',
'MinimalRebuild': 'true',
'ObjectFile': '$(IntDir)\\b',
'OmitDefaultLibName': 'true',
'OmitFramePointers': 'true',
'OpenMP': 'true',
'Optimization': '3',
'PrecompiledHeaderFile': '$(IntDir)\\$(TargetName).pche',
'PrecompiledHeaderThrough': 'StdAfx.hd',
'PreprocessorDefinitions': 'WIN32;_DEBUG;_CONSOLE',
'ProgramDataBaseFileName': '$(IntDir)\\vc90b.pdb',
'RuntimeLibrary': '3',
'RuntimeTypeInfo': 'false',
'ShowIncludes': 'true',
'SmallerTypeCheck': 'true',
'StringPooling': 'true',
'StructMemberAlignment': '3',
'SuppressStartupBanner': 'false',
'TreatWChar_tAsBuiltInType': 'false',
'UndefineAllPreprocessorDefinitions': 'true',
'UndefinePreprocessorDefinitions': 'wer',
'UseFullPaths': 'true',
'UsePrecompiledHeader': '0',
'UseUnicodeResponseFiles': 'false',
'WarnAsError': 'true',
'WarningLevel': '3',
'WholeProgramOptimization': 'true',
'XMLDocumentationFileName': '$(IntDir)\\c'},
'VCLinkerTool': {
'AdditionalDependencies': 'zx',
'AdditionalLibraryDirectories': 'asd',
'AdditionalManifestDependencies': 's2',
'AdditionalOptions': '/mor2',
'AddModuleNamesToAssembly': 'd1',
'AllowIsolation': 'false',
'AssemblyDebug': '1',
'AssemblyLinkResource': 'd5',
'BaseAddress': '23423',
'CLRImageType': '3',
'CLRThreadAttribute': '1',
'CLRUnmanagedCodeCheck': 'true',
'DataExecutionPrevention': '0',
'DelayLoadDLLs': 'd4',
'DelaySign': 'true',
'Driver': '2',
'EmbedManagedResourceFile': 'd2',
'EnableCOMDATFolding': '1',
'EnableUAC': 'false',
'EntryPointSymbol': 'f5',
'ErrorReporting': '2',
'FixedBaseAddress': '1',
'ForceSymbolReferences': 'd3',
'FunctionOrder': 'fssdfsd',
'GenerateDebugInformation': 'true',
'GenerateManifest': 'false',
'GenerateMapFile': 'true',
'HeapCommitSize': '13',
'HeapReserveSize': '12',
'IgnoreAllDefaultLibraries': 'true',
'IgnoreDefaultLibraryNames': 'flob;flok',
'IgnoreEmbeddedIDL': 'true',
'IgnoreImportLibrary': 'true',
'ImportLibrary': 'f4',
'KeyContainer': 'f7',
'KeyFile': 'f6',
'LargeAddressAware': '2',
'LinkIncremental': '0',
'LinkLibraryDependencies': 'false',
'LinkTimeCodeGeneration': '1',
'ManifestFile':
'$(IntDir)\\$(TargetFileName).2intermediate.manifest',
'MapExports': 'true',
'MapFileName': 'd5',
'MergedIDLBaseFileName': 'f2',
'MergeSections': 'f5',
'MidlCommandFile': 'f1',
'ModuleDefinitionFile': 'sdsd',
'OptimizeForWindows98': '2',
'OptimizeReferences': '2',
'OutputFile': '$(OutDir)\\$(ProjectName)2.exe',
'PerUserRedirection': 'true',
'Profile': 'true',
'ProfileGuidedDatabase': '$(TargetDir)$(TargetName).pgdd',
'ProgramDatabaseFile': 'Flob.pdb',
'RandomizedBaseAddress': '1',
'RegisterOutput': 'true',
'ResourceOnlyDLL': 'true',
'SetChecksum': 'false',
'ShowProgress': '1',
'StackCommitSize': '15',
'StackReserveSize': '14',
'StripPrivateSymbols': 'd3',
'SubSystem': '1',
'SupportUnloadOfDelayLoadedDLL': 'true',
'SuppressStartupBanner': 'false',
'SwapRunFromCD': 'true',
'SwapRunFromNet': 'true',
'TargetMachine': '1',
'TerminalServerAware': '1',
'TurnOffAssemblyGeneration': 'true',
'TypeLibraryFile': 'f3',
'TypeLibraryResourceID': '12',
'UACExecutionLevel': '2',
'UACUIAccess': 'true',
'UseLibraryDependencyInputs': 'true',
'UseUnicodeResponseFiles': 'false',
'Version': '333'},
'VCResourceCompilerTool': {
'AdditionalIncludeDirectories': 'f3',
'AdditionalOptions': '/more3',
'Culture': '3084',
'IgnoreStandardIncludePath': 'true',
'PreprocessorDefinitions': '_UNICODE;UNICODE2',
'ResourceOutputFileName': '$(IntDir)/$(InputName)3.res',
'ShowProgress': 'true'},
'VCManifestTool': {
'AdditionalManifestFiles': 'sfsdfsd',
'AdditionalOptions': 'afdsdafsd',
'AssemblyIdentity': 'sddfdsadfsa',
'ComponentFileName': 'fsdfds',
'DependencyInformationFile': '$(IntDir)\\mt.depdfd',
'EmbedManifest': 'false',
'GenerateCatalogFiles': 'true',
'InputResourceManifests': 'asfsfdafs',
'ManifestResourceFile':
'$(IntDir)\\$(TargetFileName).embed.manifest.resfdsf',
'OutputManifestFile': '$(TargetPath).manifestdfs',
'RegistrarScriptFile': 'sdfsfd',
'ReplacementsFile': 'sdffsd',
'SuppressStartupBanner': 'false',
'TypeLibraryFile': 'sfsd',
'UpdateFileHashes': 'true',
'UpdateFileHashesSearchPath': 'sfsd',
'UseFAT32Workaround': 'true',
'UseUnicodeResponseFiles': 'false',
'VerboseOutput': 'true'}}
expected_msbuild_settings = {
'ClCompile': {
'AdditionalIncludeDirectories': 'dir1',
'AdditionalOptions': '/more /J',
'AdditionalUsingDirectories': 'test',
'AssemblerListingLocation': '$(IntDir)a',
'AssemblerOutput': 'AssemblyCode',
'BasicRuntimeChecks': 'EnableFastChecks',
'BrowseInformation': 'true',
'BrowseInformationFile': '$(IntDir)e',
'BufferSecurityCheck': 'false',
'CallingConvention': 'FastCall',
'CompileAs': 'CompileAsC',
'DebugInformationFormat': 'EditAndContinue',
'DisableLanguageExtensions': 'true',
'DisableSpecificWarnings': 'abc',
'EnableEnhancedInstructionSet': 'StreamingSIMDExtensions',
'EnableFiberSafeOptimizations': 'true',
'EnablePREfast': 'true',
'ErrorReporting': 'Queue',
'ExceptionHandling': 'Async',
'ExpandAttributedSource': 'true',
'FavorSizeOrSpeed': 'Size',
'FloatingPointExceptions': 'true',
'FloatingPointModel': 'Strict',
'ForceConformanceInForLoopScope': 'false',
'ForcedIncludeFiles': 'def',
'ForcedUsingFiles': 'ge',
'FunctionLevelLinking': 'true',
'GenerateXMLDocumentationFiles': 'true',
'IgnoreStandardIncludePath': 'true',
'InlineFunctionExpansion': 'OnlyExplicitInline',
'IntrinsicFunctions': 'true',
'MinimalRebuild': 'true',
'ObjectFileName': '$(IntDir)b',
'OmitDefaultLibName': 'true',
'OmitFramePointers': 'true',
'OpenMPSupport': 'true',
'Optimization': 'Full',
'PrecompiledHeader': 'NotUsing', # Actual conversion gives ''
'PrecompiledHeaderFile': 'StdAfx.hd',
'PrecompiledHeaderOutputFile': '$(IntDir)$(TargetName).pche',
'PreprocessKeepComments': 'true',
'PreprocessorDefinitions': 'WIN32;_DEBUG;_CONSOLE',
'PreprocessSuppressLineNumbers': 'true',
'PreprocessToFile': 'true',
'ProgramDataBaseFileName': '$(IntDir)vc90b.pdb',
'RuntimeLibrary': 'MultiThreadedDebugDLL',
'RuntimeTypeInfo': 'false',
'ShowIncludes': 'true',
'SmallerTypeCheck': 'true',
'StringPooling': 'true',
'StructMemberAlignment': '4Bytes',
'SuppressStartupBanner': 'false',
'TreatWarningAsError': 'true',
'TreatWChar_tAsBuiltInType': 'false',
'UndefineAllPreprocessorDefinitions': 'true',
'UndefinePreprocessorDefinitions': 'wer',
'UseFullPaths': 'true',
'WarningLevel': 'Level3',
'WholeProgramOptimization': 'true',
'XMLDocumentationFileName': '$(IntDir)c'},
'Link': {
'AdditionalDependencies': 'zx',
'AdditionalLibraryDirectories': 'asd',
'AdditionalManifestDependencies': 's2',
'AdditionalOptions': '/mor2',
'AddModuleNamesToAssembly': 'd1',
'AllowIsolation': 'false',
'AssemblyDebug': 'true',
'AssemblyLinkResource': 'd5',
'BaseAddress': '23423',
'CLRImageType': 'ForceSafeILImage',
'CLRThreadAttribute': 'MTAThreadingAttribute',
'CLRUnmanagedCodeCheck': 'true',
'DataExecutionPrevention': '',
'DelayLoadDLLs': 'd4',
'DelaySign': 'true',
'Driver': 'UpOnly',
'EmbedManagedResourceFile': 'd2',
'EnableCOMDATFolding': 'false',
'EnableUAC': 'false',
'EntryPointSymbol': 'f5',
'FixedBaseAddress': 'false',
'ForceSymbolReferences': 'd3',
'FunctionOrder': 'fssdfsd',
'GenerateDebugInformation': 'true',
'GenerateMapFile': 'true',
'HeapCommitSize': '13',
'HeapReserveSize': '12',
'IgnoreAllDefaultLibraries': 'true',
'IgnoreEmbeddedIDL': 'true',
'IgnoreSpecificDefaultLibraries': 'flob;flok',
'ImportLibrary': 'f4',
'KeyContainer': 'f7',
'KeyFile': 'f6',
'LargeAddressAware': 'true',
'LinkErrorReporting': 'QueueForNextLogin',
'LinkTimeCodeGeneration': 'UseLinkTimeCodeGeneration',
'ManifestFile': '$(IntDir)$(TargetFileName).2intermediate.manifest',
'MapExports': 'true',
'MapFileName': 'd5',
'MergedIDLBaseFileName': 'f2',
'MergeSections': 'f5',
'MidlCommandFile': 'f1',
'ModuleDefinitionFile': 'sdsd',
'NoEntryPoint': 'true',
'OptimizeReferences': 'true',
'OutputFile': '$(OutDir)$(ProjectName)2.exe',
'PerUserRedirection': 'true',
'Profile': 'true',
'ProfileGuidedDatabase': '$(TargetDir)$(TargetName).pgdd',
'ProgramDatabaseFile': 'Flob.pdb',
'RandomizedBaseAddress': 'false',
'RegisterOutput': 'true',
'SetChecksum': 'false',
'ShowProgress': 'LinkVerbose',
'StackCommitSize': '15',
'StackReserveSize': '14',
'StripPrivateSymbols': 'd3',
'SubSystem': 'Console',
'SupportUnloadOfDelayLoadedDLL': 'true',
'SuppressStartupBanner': 'false',
'SwapRunFromCD': 'true',
'SwapRunFromNET': 'true',
'TargetMachine': 'MachineX86',
'TerminalServerAware': 'false',
'TurnOffAssemblyGeneration': 'true',
'TypeLibraryFile': 'f3',
'TypeLibraryResourceID': '12',
'UACExecutionLevel': 'RequireAdministrator',
'UACUIAccess': 'true',
'Version': '333'},
'ResourceCompile': {
'AdditionalIncludeDirectories': 'f3',
'AdditionalOptions': '/more3',
'Culture': '0x0c0c',
'IgnoreStandardIncludePath': 'true',
'PreprocessorDefinitions': '_UNICODE;UNICODE2',
'ResourceOutputFileName': '$(IntDir)%(Filename)3.res',
'ShowProgress': 'true'},
'Manifest': {
'AdditionalManifestFiles': 'sfsdfsd',
'AdditionalOptions': 'afdsdafsd',
'AssemblyIdentity': 'sddfdsadfsa',
'ComponentFileName': 'fsdfds',
'GenerateCatalogFiles': 'true',
'InputResourceManifests': 'asfsfdafs',
'OutputManifestFile': '$(TargetPath).manifestdfs',
'RegistrarScriptFile': 'sdfsfd',
'ReplacementsFile': 'sdffsd',
'SuppressStartupBanner': 'false',
'TypeLibraryFile': 'sfsd',
'UpdateFileHashes': 'true',
'UpdateFileHashesSearchPath': 'sfsd',
'VerboseOutput': 'true'},
'ProjectReference': {
'LinkLibraryDependencies': 'false',
'UseLibraryDependencyInputs': 'true'},
'': {
'EmbedManifest': 'false',
'GenerateManifest': 'false',
'IgnoreImportLibrary': 'true',
'LinkIncremental': ''
},
'ManifestResourceCompile': {
'ResourceOutputFileName':
'$(IntDir)$(TargetFileName).embed.manifest.resfdsf'}
}
actual_msbuild_settings = MSVSSettings.ConvertToMSBuildSettings(
msvs_settings,
self.stderr)
self.assertEqual(expected_msbuild_settings, actual_msbuild_settings)
self._ExpectedWarnings([])
if __name__ == '__main__':
unittest.main()
| 45.431267 | 82 | 0.550326 |
57adad65a20cd05dbd5a77e40b362810ffe56527
| 1,432 |
py
|
Python
|
README_demo.py
|
phy25/tweetpi
|
eec30a1ba861d5968b778d4f7056fb0b814bc699
|
[
"MIT"
] | 2 |
2019-03-08T03:05:07.000Z
|
2020-11-01T11:43:11.000Z
|
README_demo.py
|
phy25/tweetpi
|
eec30a1ba861d5968b778d4f7056fb0b814bc699
|
[
"MIT"
] | 22 |
2018-09-10T21:14:17.000Z
|
2018-12-01T06:51:12.000Z
|
README_demo.py
|
phy25/tweetpi
|
eec30a1ba861d5968b778d4f7056fb0b814bc699
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
from TweetPI import TweetPI, video
import sys
# Change keys below or in `options.json` before you execute!
# To read options from file, use
import json
with open("options.json", "r") as fp:
o = json.load(fp)
# To use the static options, use
# o = {"twitter_consumer_key":"...", "twitter_consumer_secret":"...", "twitter_access_token":"...", "twitter_access_secret":"...", "google_key_json":"gapi.json"}
tpi = TweetPI(o)
def main():
try:
# list
photolist = tpi.get_timeline(username="football", page=1, limit=50)
print(photolist)
# download
photolist.download_all(shell=True)
# annotate
photolist.get_annotations()
for t in photolist.photos:
print('{}: {}'.format(t.remote_url, ", ".join([a.description for a in t.annotation.label_annotations])))
# video
videopath = video.generate_video(photos=photolist, name='video1.mp4', size='1080x720', shell=True, interval=3)
print(videopath)
# annotated video
videopath2 = video.generate_annotated_video(photos=photolist, name='video2.mp4', size='1080x720', shell=True, font_color='rgb(255,0,0)', font_file='Roboto-Regular.ttf', interval=3, font_size=30)
print(videopath2)
except Exception as e:
# Error handling
print("ERROR: {}".format(e), file=sys.stderr)
sys.exit(2)
if __name__ == "__main__":
main()
| 38.702703 | 202 | 0.649441 |
793dab9a3babf8d8dea5f5191b9b3fa21911114e
| 1,017 |
py
|
Python
|
tests/experiment/dallinger_experiment.py
|
istresearch/Dallinger
|
47e4967ded9e01edbc8c1ae7132c9ec30a87f116
|
[
"MIT"
] | 1 |
2020-01-29T04:13:26.000Z
|
2020-01-29T04:13:26.000Z
|
tests/experiment/dallinger_experiment.py
|
jcpeterson/Dallinger
|
55bf00efddb19ab8b7201b65c461996793edf6f4
|
[
"MIT"
] | null | null | null |
tests/experiment/dallinger_experiment.py
|
jcpeterson/Dallinger
|
55bf00efddb19ab8b7201b65c461996793edf6f4
|
[
"MIT"
] | 1 |
2019-02-07T14:16:39.000Z
|
2019-02-07T14:16:39.000Z
|
from dallinger.config import get_config
from dallinger.experiment import Experiment
config = get_config()
class TestExperiment(Experiment):
_completed = None
def __init__(self, session=None):
try:
super(TestExperiment, self).__init__(session)
except TypeError:
self.practice_repeats = 0
self.verbose = True
if session:
self.session = session
self.configure()
self.experiment_repeats = 1
self.quorum = 1
if session:
self.setup()
@property
def public_properties(self):
return {
'exists': True,
}
def create_network(self):
"""Return a new network."""
from dallinger.networks import Star
return Star(max_size=2)
def is_complete(self):
return config.get('_is_completed', None)
def extra_parameters():
config.register('custom_parameter', int, [])
config.register('_is_completed', bool, [])
| 24.214286 | 57 | 0.60177 |
c5d781b2233f761719779e98344d428a52bf4fe4
| 12,982 |
py
|
Python
|
airbyte-integrations/connectors/source-amazon-ads/unit_tests/test_report_streams.py
|
harshithmullapudi/airbyte
|
c3c489ae3fc397580e598899bcc523cbaf4fdcd5
|
[
"MIT"
] | null | null | null |
airbyte-integrations/connectors/source-amazon-ads/unit_tests/test_report_streams.py
|
harshithmullapudi/airbyte
|
c3c489ae3fc397580e598899bcc523cbaf4fdcd5
|
[
"MIT"
] | null | null | null |
airbyte-integrations/connectors/source-amazon-ads/unit_tests/test_report_streams.py
|
harshithmullapudi/airbyte
|
c3c489ae3fc397580e598899bcc523cbaf4fdcd5
|
[
"MIT"
] | null | null | null |
#
# MIT License
#
# Copyright (c) 2020 Airbyte
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
import re
from base64 import b64decode
from unittest import mock
import pytest
import responses
from airbyte_cdk.models import SyncMode
from freezegun import freeze_time
from pytest import raises
from requests.exceptions import ConnectionError
from source_amazon_ads.schemas.profile import AccountInfo, Profile
from source_amazon_ads.spec import AmazonAdsConfig
from source_amazon_ads.streams import SponsoredBrandsReportStream, SponsoredDisplayReportStream, SponsoredProductsReportStream, SponsoredBrandsVideoReportStream
from source_amazon_ads.streams.report_streams.report_streams import TooManyRequests
"""
METRIC_RESPONSE is gzip compressed binary representing this string:
[
{
"campaignId": 214078428,
"campaignName": "sample-campaign-name-214078428"
},
{
"campaignId": 44504582,
"campaignName": "sample-campaign-name-44504582"
},
{
"campaignId": 509144838,
"campaignName": "sample-campaign-name-509144838"
},
{
"campaignId": 231712082,
"campaignName": "sample-campaign-name-231712082"
},
{
"campaignId": 895306040,
"campaignName": "sample-campaign-name-895306040"
}
]
"""
METRIC_RESPONSE = b64decode(
"""
H4sIAAAAAAAAAIvmUlCoBmIFBaXkxNyCxMz0PM8UJSsFI0MTA3MLEyMLHVRJv8TcVKC0UjGQn5Oq
CxPWzQOK68I1KQE11ergMNrExNTAxNTCiBSTYXrwGmxqYGloYmJhTJKb4ZrwGm1kbGhuaGRAmqPh
mvAabWFpamxgZmBiQIrRcE1go7liAYX9dsTHAQAA
"""
)
METRICS_COUNT = 5
def setup_responses(init_response=None, init_response_products=None, init_response_brands=None, status_response=None, metric_response=None):
if init_response:
responses.add(responses.POST, re.compile(r"https://advertising-api.amazon.com/sd/[a-zA-Z]+/report"), body=init_response, status=202)
if init_response_products:
responses.add(
responses.POST,
re.compile(r"https://advertising-api.amazon.com/v2/sp/[a-zA-Z]+/report"),
body=init_response_products,
status=202,
)
if init_response_brands:
responses.add(
responses.POST, re.compile(r"https://advertising-api.amazon.com/v2/hsa/[a-zA-Z]+/report"), body=init_response_brands, status=202
)
if status_response:
responses.add(
responses.GET,
re.compile(r"https://advertising-api.amazon.com/v2/reports/[^/]+$"),
body=status_response,
)
if metric_response:
responses.add(
responses.GET,
"https://advertising-api-test.amazon.com/v1/reports/amzn1.sdAPI.v1.m1.61022EEC.2ac27e60-665c-46b4-b5a9-d72f216cc8ca/download",
body=metric_response,
)
REPORT_INIT_RESPONSE = """
{"reportId":"amzn1.sdAPI.v1.m1.61022EEC.2ac27e60-665c-46b4-b5a9-d72f216cc8ca","recordType":"campaigns","status":"IN_PROGRESS","statusDetails":"Generating report"}
"""
REPORT_STATUS_RESPONSE = """
{"reportId":"amzn1.sdAPI.v1.m1.61022EEC.2ac27e60-665c-46b4-b5a9-d72f216cc8ca","status":"SUCCESS","statusDetails":"Report successfully generated","location":"https://advertising-api-test.amazon.com/v1/reports/amzn1.sdAPI.v1.m1.61022EEC.2ac27e60-665c-46b4-b5a9-d72f216cc8ca/download","fileSize":144}
"""
def make_profiles(profile_type="seller"):
return [
Profile(
profileId=1,
timezone="America/Los_Angeles",
accountInfo=AccountInfo(marketplaceStringId="", id="", type=profile_type),
)
]
@responses.activate
def test_display_report_stream(test_config):
setup_responses(
init_response=REPORT_INIT_RESPONSE,
status_response=REPORT_STATUS_RESPONSE,
metric_response=METRIC_RESPONSE,
)
config = AmazonAdsConfig(**test_config)
profiles = make_profiles()
stream = SponsoredDisplayReportStream(config, profiles, authenticator=mock.MagicMock())
stream_slice = {"reportDate": "20210725"}
metrics = [m for m in stream.read_records(SyncMode.incremental, stream_slice=stream_slice)]
assert len(metrics) == METRICS_COUNT * len(stream.metrics_map)
updated_state = stream.get_updated_state(None, stream_slice)
assert updated_state == stream_slice
profiles = make_profiles(profile_type="vendor")
stream = SponsoredDisplayReportStream(config, profiles, authenticator=mock.MagicMock())
metrics = [m for m in stream.read_records(SyncMode.incremental, stream_slice=stream_slice)]
# Skip asins record for vendor profiles
assert len(metrics) == METRICS_COUNT * (len(stream.metrics_map) - 1)
@responses.activate
def test_products_report_stream(test_config):
setup_responses(
init_response_products=REPORT_INIT_RESPONSE,
status_response=REPORT_STATUS_RESPONSE,
metric_response=METRIC_RESPONSE,
)
config = AmazonAdsConfig(**test_config)
profiles = make_profiles(profile_type="vendor")
stream = SponsoredProductsReportStream(config, profiles, authenticator=mock.MagicMock())
stream_slice = {"reportDate": "20210725"}
metrics = [m for m in stream.read_records(SyncMode.incremental, stream_slice=stream_slice)]
assert len(metrics) == METRICS_COUNT * len(stream.metrics_map)
@responses.activate
def test_brands_report_stream(test_config):
setup_responses(
init_response_brands=REPORT_INIT_RESPONSE,
status_response=REPORT_STATUS_RESPONSE,
metric_response=METRIC_RESPONSE,
)
config = AmazonAdsConfig(**test_config)
profiles = make_profiles()
stream = SponsoredBrandsReportStream(config, profiles, authenticator=mock.MagicMock())
stream_slice = {"reportDate": "20210725"}
metrics = [m for m in stream.read_records(SyncMode.incremental, stream_slice=stream_slice)]
assert len(metrics) == METRICS_COUNT * len(stream.metrics_map)
@responses.activate
def test_brands_video_report_stream(test_config):
setup_responses(
init_response_brands=REPORT_INIT_RESPONSE,
status_response=REPORT_STATUS_RESPONSE,
metric_response=METRIC_RESPONSE,
)
config = AmazonAdsConfig(**test_config)
profiles = make_profiles()
stream = SponsoredBrandsVideoReportStream(config, profiles, authenticator=mock.MagicMock())
stream_slice = {"reportDate": "20210725"}
metrics = [m for m in stream.read_records(SyncMode.incremental, stream_slice=stream_slice)]
assert len(metrics) == METRICS_COUNT * len(stream.metrics_map)
@responses.activate
def test_display_report_stream_report_generation_failure(test_config):
setup_responses(
init_response=REPORT_INIT_RESPONSE,
status_response=REPORT_STATUS_RESPONSE.replace("SUCCESS", "FAILURE"),
metric_response=METRIC_RESPONSE,
)
config = AmazonAdsConfig(**test_config)
profiles = make_profiles()
stream = SponsoredDisplayReportStream(config, profiles, authenticator=mock.MagicMock())
stream_slice = {"reportDate": "20210725"}
with pytest.raises(Exception):
_ = [m for m in stream.read_records(SyncMode.incremental, stream_slice=stream_slice)]
@responses.activate
def test_display_report_stream_init_failure(mocker, test_config):
config = AmazonAdsConfig(**test_config)
profiles = make_profiles()
stream = SponsoredDisplayReportStream(config, profiles, authenticator=mock.MagicMock())
stream_slice = {"reportDate": "20210725"}
responses.add(
responses.POST, re.compile(r"https://advertising-api.amazon.com/sd/[a-zA-Z]+/report"), json={"error": "some error"}, status=400
)
with pytest.raises(Exception):
[m for m in stream.read_records(SyncMode.incremental, stream_slice=stream_slice)]
@responses.activate
def test_display_report_stream_init_http_exception(mocker, test_config):
mocker.patch("time.sleep", lambda x: None)
config = AmazonAdsConfig(**test_config)
profiles = make_profiles()
stream = SponsoredDisplayReportStream(config, profiles, authenticator=mock.MagicMock())
stream_slice = {"reportDate": "20210725"}
responses.add(responses.POST, re.compile(r"https://advertising-api.amazon.com/sd/[a-zA-Z]+/report"), body=ConnectionError())
with raises(ConnectionError):
_ = [m for m in stream.read_records(SyncMode.incremental, stream_slice=stream_slice)]
assert len(responses.calls) == 5
@responses.activate
def test_display_report_stream_init_too_many_requests(mocker, test_config):
mocker.patch("time.sleep", lambda x: None)
config = AmazonAdsConfig(**test_config)
profiles = make_profiles()
stream = SponsoredDisplayReportStream(config, profiles, authenticator=mock.MagicMock())
stream_slice = {"reportDate": "20210725"}
responses.add(responses.POST, re.compile(r"https://advertising-api.amazon.com/sd/[a-zA-Z]+/report"), json={}, status=429)
with raises(TooManyRequests):
_ = [m for m in stream.read_records(SyncMode.incremental, stream_slice=stream_slice)]
assert len(responses.calls) == 5
@responses.activate
def test_display_report_stream_timeout(mocker, test_config):
time_mock = mock.MagicMock()
mocker.patch("time.sleep", time_mock)
setup_responses(init_response=REPORT_INIT_RESPONSE, metric_response=METRIC_RESPONSE)
with freeze_time("2021-07-30 04:26:08") as frozen_time:
success_cnt = 2
class StatusCallback:
count: int = 0
def __call__(self, request):
self.count += 1
response = REPORT_STATUS_RESPONSE
if self.count > success_cnt:
response = REPORT_STATUS_RESPONSE.replace("SUCCESS", "IN_PROGRESS")
if self.count > success_cnt + 1:
frozen_time.move_to("2021-07-30 06:26:08")
return (200, {}, response)
responses.add_callback(
responses.GET, re.compile(r"https://advertising-api.amazon.com/v2/reports/[^/]+$"), callback=StatusCallback()
)
config = AmazonAdsConfig(**test_config)
profiles = make_profiles()
stream = SponsoredDisplayReportStream(config, profiles, authenticator=mock.MagicMock())
stream_slice = {"reportDate": "20210725"}
with pytest.raises(Exception):
_ = [m for m in stream.read_records(SyncMode.incremental, stream_slice=stream_slice)]
time_mock.assert_called_with(30)
@freeze_time("2021-07-30 04:26:08")
@responses.activate
def test_display_report_stream_slices_full_refresh(test_config):
config = AmazonAdsConfig(**test_config)
stream = SponsoredDisplayReportStream(config, None, authenticator=mock.MagicMock())
slices = stream.stream_slices(SyncMode.full_refresh, cursor_field=stream.cursor_field)
assert slices == [{"reportDate": "20210730"}]
@freeze_time("2021-07-30 04:26:08")
@responses.activate
def test_display_report_stream_slices_incremental(test_config):
config = AmazonAdsConfig(**test_config)
stream = SponsoredDisplayReportStream(config, None, authenticator=mock.MagicMock())
stream_state = {"reportDate": "20210726"}
slices = stream.stream_slices(SyncMode.incremental, cursor_field=stream.cursor_field, stream_state=stream_state)
assert slices == [
{"reportDate": "20210727"},
{"reportDate": "20210728"},
{"reportDate": "20210729"},
{"reportDate": "20210730"},
]
stream_state = {"reportDate": "20210730"}
slices = stream.stream_slices(SyncMode.incremental, cursor_field=stream.cursor_field, stream_state=stream_state)
assert slices == [None]
stream_state = {"reportDate": "20210731"}
slices = stream.stream_slices(SyncMode.incremental, cursor_field=stream.cursor_field, stream_state=stream_state)
assert slices == [None]
slices = stream.stream_slices(SyncMode.incremental, cursor_field=stream.cursor_field, stream_state={})
assert slices == [{"reportDate": "20210730"}]
slices = stream.stream_slices(SyncMode.incremental, cursor_field=None, stream_state={})
assert slices == [{"reportDate": "20210730"}]
| 39.944615 | 297 | 0.730627 |
862a27ccede05995dccd4603adc9ef1f9b209922
| 461 |
py
|
Python
|
plotly/validators/area/stream/_token.py
|
faezs/plotly.py
|
6009b5b9c746e5d2a2849ad255a4eb234b551ed7
|
[
"MIT"
] | 2 |
2020-03-24T11:41:14.000Z
|
2021-01-14T07:59:43.000Z
|
plotly/validators/area/stream/_token.py
|
faezs/plotly.py
|
6009b5b9c746e5d2a2849ad255a4eb234b551ed7
|
[
"MIT"
] | null | null | null |
plotly/validators/area/stream/_token.py
|
faezs/plotly.py
|
6009b5b9c746e5d2a2849ad255a4eb234b551ed7
|
[
"MIT"
] | 4 |
2019-06-03T14:49:12.000Z
|
2022-01-06T01:05:12.000Z
|
import _plotly_utils.basevalidators
class TokenValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(
self, plotly_name='token', parent_name='area.stream', **kwargs
):
super(TokenValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type='calc',
no_blank=True,
role='info',
strict=True,
**kwargs
)
| 25.611111 | 70 | 0.598698 |
691187d1932a01ee94d3532bbdeb85d968756173
| 1,632 |
py
|
Python
|
gs/content/favicon/tests/icon.py
|
groupserver/gs.content.favicon
|
9b54d44c1c14e82fce77ff2303ade8f3ea500555
|
[
"ZPL-2.1"
] | null | null | null |
gs/content/favicon/tests/icon.py
|
groupserver/gs.content.favicon
|
9b54d44c1c14e82fce77ff2303ade8f3ea500555
|
[
"ZPL-2.1"
] | null | null | null |
gs/content/favicon/tests/icon.py
|
groupserver/gs.content.favicon
|
9b54d44c1c14e82fce77ff2303ade8f3ea500555
|
[
"ZPL-2.1"
] | null | null | null |
# -*- coding: utf-8 -*-
############################################################################
#
# Copyright © 2016 OnlineGroups.net and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
############################################################################
from __future__ import absolute_import, unicode_literals, print_function
from mock import (patch, PropertyMock, MagicMock, )
from unittest import TestCase
from gs.content.favicon.icon import Icon
class TestIcon(TestCase):
'Test the ``Icon`` class'
notImage = b'This is not an image.'
@patch.object(Icon, 'image', new_callable=PropertyMock)
def test_call(self, m_image):
m_image.return_value = self.notImage
request = MagicMock()
i = Icon(MagicMock(), request)
r = i()
self.assertEqual(self.notImage, r)
request.RESPONSE.setHeader.assert_any_call(b'Content-Length', b'21')
@patch('gs.content.favicon.icon.get_data')
def test_image(self, m_get_data):
m_get_data.return_value = self.notImage
i = Icon(MagicMock(), MagicMock())
r = i.image
self.assertEqual(self.notImage, r)
m_get_data.assert_called_once_with('gs.content.favicon', 'browser/images/favicon.ico')
| 37.090909 | 94 | 0.64277 |
75af9ddf9ca18b38bc3b878d52c0d4ee02cb1b6b
| 4,174 |
py
|
Python
|
final/one_time/emotion.py
|
Fabhi/sentiment-analysis
|
addf82c40ef1999241edbc80df164db908d56b57
|
[
"MIT"
] | 1 |
2021-05-22T18:48:14.000Z
|
2021-05-22T18:48:14.000Z
|
final/one_time/emotion.py
|
Fabhi/sentiment-analysis
|
addf82c40ef1999241edbc80df164db908d56b57
|
[
"MIT"
] | null | null | null |
final/one_time/emotion.py
|
Fabhi/sentiment-analysis
|
addf82c40ef1999241edbc80df164db908d56b57
|
[
"MIT"
] | null | null | null |
import re
from emotion_dict import getEmotionDict
patterns = []
adverbs = frozenset(["not", "also", "very", "often", "however", "too", "usually", "really", "early", "never", "always", "sometimes", "together", "likely", "simply", "generally", "instead", "actually", "again", "rather", "almost", "especially", "ever", "quickly", "probably", "already", "below", "directly", "therefore", "else", "thus", "easily", "eventually", "exactly", "certainly", "normally", "currently", "extremely", "finally", "constantly", "properly", "soon", "specifically", "ahead", "daily", "highly", "immediately", "relatively", "slowly", "fairly", "primarily", "completely", "ultimately", "widely", "recently", "seriously", "frequently", "fully", "mostly", "naturally", "nearly", "occasionally", "carefully", "clearly", "essentially", "possibly", "slightly", "somewhat", "equally", "greatly", "necessarily", "personally", "rarely", "regularly", "similarly", "basically", "closely", "effectively", "initially", "literally", "mainly", "merely", "gently", "hopefully", "originally", "roughly", "significantly", "totally", "twice", "elsewhere", "everywhere", "obviously", "perfectly", "physically", "successfully", "suddenly", "truly", "virtually", "altogether", "anyway", "automatically", "deeply", "definitely", "deliberately", "hardly", "readily", "terribly", "unfortunately", "forth", "briefly", "moreover", "strongly", "honestly", "previously", "as", "there", "when", "how", "so", "up", "out"])
degreeTuple = ["Positive", "Negative","Anger" ,"Anticipation", "Disgust" ,"Fear" ,"Joy", "Sadness" ,"Surprise", "Trust"]
allAfter = "\s+(.[^.,_-]*)"
justAfter = "\s+(\w+(?= )*)"
query = "I am feeling"
query2 = "I feel"
d = getEmotionDict()
def extractNext(text, queryText = query):
res = []
matches = re.findall(queryText+justAfter, text)
# print(text, matches)
if len(matches) == 0: return None
else:
for match in matches:
word = match
end = re.search(queryText+justAfter, text).end()
start = end - len(match)
res.append({"match" : word, "start" : start, "end":end, "text": text, "emotion": None, "degree_tuple" : None, "validity" : False})
return res
def isEmotion(c):
candidates = [c, c+"ness" ,c[:-1]+"ness" , c+"ment", c+"ation", c+"tion", c+"sion"]
if(len(c)>3):
candidates.append(c[:-3]+"ence")
candidates.append(c[:-2]+"ness")
for i in candidates:
val = d.get(i, None)
if val:
# print(i)
return val
return False
def emotionSearch(results):
for result in results:
candidate = result["match"].lower()
a = isEmotion(candidate)
if a:
result["emotion"] = candidate
result["degree_tuple"] = dict(zip(degreeTuple, a))
result["validity"] = any(a)
else:
if candidate in adverbs:
next = extractNext(result["text"], candidate)
if next is None : continue
nextCandidate = next[0]["match"].lower()
a = isEmotion(nextCandidate)
if a:
result["emotion"] = nextCandidate
result["degree_tuple"] = dict(zip(degreeTuple, a))
result["validity"] = any(a)
def preprocess(txt):
return txt.replace("I’m", "I am").replace("I'm", "I am")
samples= [
"Got my hair chopped off and I am feeling myself! (Before pic included)",
"Was called un-dateable due to being a virgin and I am feeling very hurt and confused.",
"I am feeling a shitload of anxiety about Natasha Helfer's disciplinary council, and I just figured out why.",
"Today is day three of not drinking, and I’m feeling very sad and depressed. I’ve been crying and very down. I can’t think of very many positive things. I don’t necessarily want a drink, but I’m just feeling sad. Is this normal? Any suggestions to get out of this funk"
]
def ProcessEmotions(data):
res = []
cleaned_text = preprocess(data)
a = extractNext(cleaned_text)
if a is None: return None
res.extend(a)
emotionSearch(res)
return res
# print(ProcessEmotions(samples))
| 54.207792 | 1,407 | 0.614998 |
a0bc6d2d2750a6e3cd59d8a87c260f794c0a6eb8
| 1,162 |
py
|
Python
|
src/probnum/linalg/solvers/policies/_linear_solver_policy.py
|
treid5/probnum
|
fabb51243d0952fbd35e542aeb5c2dc9a449ec81
|
[
"MIT"
] | 1 |
2021-06-22T14:25:43.000Z
|
2021-06-22T14:25:43.000Z
|
src/probnum/linalg/solvers/policies/_linear_solver_policy.py
|
pitmonticone/probnum
|
1fed705b2443a14d08419e16f98f6ef815ae9ffa
|
[
"MIT"
] | 42 |
2021-03-08T07:20:40.000Z
|
2022-03-28T05:04:48.000Z
|
src/probnum/linalg/solvers/policies/_linear_solver_policy.py
|
pitmonticone/probnum
|
1fed705b2443a14d08419e16f98f6ef815ae9ffa
|
[
"MIT"
] | null | null | null |
"""Base class for policies of probabilistic linear solvers returning actions."""
import abc
import numpy as np
import probnum # pylint: disable="unused-import"
class LinearSolverPolicy(abc.ABC):
r"""Policy of a (probabilistic) linear solver.
The policy :math:`\pi(s \mid \mathsf{A}, \mathsf{H}, \mathsf{x}, A, b)` of a
linear solver returns a vector to probe the linear system with, typically via
multiplication, resulting in an observation. Policies can either be deterministic or
stochastic depending on the application.
See Also
--------
ConjugateDirectionsPolicy : Policy returning :math:`A`-conjugate actions.
RandomUnitVectorPolicy : Policy returning random standard unit vectors.
"""
@abc.abstractmethod
def __call__(
self, solver_state: "probnum.linalg.solvers.LinearSolverState"
) -> np.ndarray:
"""Return an action for a given solver state.
Parameters
----------
solver_state
Current state of the linear solver.
Returns
-------
action
Next action to take.
"""
raise NotImplementedError
| 29.05 | 88 | 0.657487 |
4d0cde26455d1e37160c3381aedeaa565118f545
| 7,448 |
py
|
Python
|
src/semantic_parsing_with_constrained_lm/domains/calflow/disambiguate.py
|
microsoft/semantic_parsing_with_constrained_lm
|
7e3c099500c3102e46d7a47469fe6840580c2b11
|
[
"MIT"
] | 17 |
2021-09-22T13:08:37.000Z
|
2022-03-27T10:39:53.000Z
|
src/semantic_parsing_with_constrained_lm/domains/calflow/disambiguate.py
|
microsoft/semantic_parsing_with_constrained_lm
|
7e3c099500c3102e46d7a47469fe6840580c2b11
|
[
"MIT"
] | 1 |
2022-03-12T01:05:15.000Z
|
2022-03-12T01:05:15.000Z
|
src/semantic_parsing_with_constrained_lm/domains/calflow/disambiguate.py
|
microsoft/semantic_parsing_with_constrained_lm
|
7e3c099500c3102e46d7a47469fe6840580c2b11
|
[
"MIT"
] | 1 |
2021-12-16T22:26:54.000Z
|
2021-12-16T22:26:54.000Z
|
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
import re
from dataflow.core.lispress import Lispress, parse_lispress
from semantic_parsing_with_constrained_lm.util.util import flatten
def no_alwaystrue_under_getintrasalient(f: Lispress):
"""
GetIntraSalient should always be called with an AlwaysTrueConstraint
"""
if isinstance(f, str) or len(f) == 0:
return False
if f[0] == "getIntraSalient":
return "AlwaysTrueConstraint" not in f[1][0]
return any(no_alwaystrue_under_getintrasalient(x) for x in f)
def weird_do(f: Lispress):
"""
The arguments to Do should be Yield expressions.
"""
if isinstance(f, str) or len(f) == 0:
return False
if f[0] == "do":
if len(f) < 2:
return True
return not f[1][0] == "Yield"
return any(weird_do(x) for x in f)
def weird_yield(f: Lispress):
"""Expressions called by Yield should begin with an alphabetic character."""
return f[0] == "Yield" and not f[2][0][0].isalpha()
def is_banned(lispress_str: str) -> bool:
"""
Check if the lispress_str contains any banned patterns.
"""
try:
form = parse_lispress(lispress_str)
except AssertionError:
return True
has_double_yield = "(Yield :output (Yield" in lispress_str
preflight_no_create = lispress_str.count(
"CreatePreflightEventWrapper"
) - lispress_str.count(
"(CreateCommitEventWrapper :event (CreatePreflightEventWrapper"
)
no_yield = (
"Yield" not in lispress_str
and "Fence" not in lispress_str
and "Pleasantry" not in lispress_str
and "UserPauseResponse" not in lispress_str
and "RepeatAgent" not in lispress_str
and "DoNotConfirm" not in lispress_str
)
place_multi_results_and_weather = (
"FindPlaceMultiResults" in lispress_str and "Weather" in lispress_str
)
place_not_weather = (
"FindPlace" in lispress_str
and not "FindPlaceMultiResults" in lispress_str
and "Weather" not in lispress_str
)
return (
has_double_yield
or no_yield
or weird_do(form)
or place_multi_results_and_weather
or place_not_weather
or no_alwaystrue_under_getintrasalient(form)
or preflight_no_create > 0
)
def has_findperson_under_create(f: Lispress) -> bool:
"""CreateAddPerson needs to be used in create/clobber situations"""
if isinstance(f, str) or len(f) == 0:
return False
if f[0] == "CreatePreflightEventWrapper":
return "FindAddPerson" in flatten(f)
if f[:4] == [
"ClobberWrapper",
":oldLocation",
["Constraint[Constraint[Event]]"],
":new",
]:
return "FindAddPerson" in flatten(f[4:])
return any(has_findperson_under_create(x) for x in f)
def has_noncanonical_andconstraint(f: Lispress) -> bool:
"""andConstraints should be left-branching"""
if isinstance(f, str) or len(f) == 0:
return False
if f[0] == "andConstraint":
if len(f) < 3:
return True
for child in f[2:]:
if (
isinstance(child, list)
and len(child) > 0
and child[0] == "andConstraint"
):
# andConstraint can only be the first child
return True
return any(has_noncanonical_andconstraint(x) for x in f)
def score_auto_grammar_plan(lispress_str: str) -> float:
"""
Assigns a rule-based score to a Lispress str, with higher scores being preferred.
We use this to disambiguate when a canonical utterance has multiple possible parses.
"""
mega_functions = [
"FindNumNextEventWrapper",
"PathAndTypeConstraint",
"ResponseWrapper",
"ClobberWrapper",
"NewClobberWrapper",
"ChooseCreateEventWrapper",
"ChooseUpdateEventWrapper",
"ChooseCreateEventFromConstraintWrapper",
"ChooseUpdateEventFromConstraintWrapper",
"ChoosePersonFromConstraintWrapper",
"CreateAddPerson",
"FindAddPerson",
"UpdateWrapper",
"DeleteWrapper",
"NumberOrdinal",
]
if is_banned(lispress_str):
return -float("inf")
form = parse_lispress(lispress_str)
# dispreferred
num_chars = len(lispress_str)
num_parens = lispress_str.count("(")
always_true = lispress_str.count("AlwaysTrueConstraint") # prefer `Constraint`
yield_execute = lispress_str.count("(Yield :output (Execute")
bare_preflight_wrapper = lispress_str.count(":output (UpdatePreflightEventWrapper")
bad_output_equals = lispress_str.count(":output (?=")
bad_output_id = lispress_str.count(":output (:id")
bad_output_yield = lispress_str.count(":output (Yield")
# preferred
singletons = lispress_str.count(
"(singleton (:results (FindEventWrapperWithDefaults"
)
# sometimes bad
singleton_as_arg_to_describe = lispress_str.count(
":output (singleton (:results (FindEventWrapperWithDefaults"
)
event_on_date_output = (
lispress_str.count(":output (EventOn")
+ lispress_str.count(":output (EventAfter")
+ lispress_str.count(":output (EventBefore")
+ lispress_str.count(":output (EventDuring")
)
# For creates and updates, we use `do the Recipient "John"` (AttendeeListHasRecipient), but for queries
# we need `any Recipient "John" (FindAddPerson)
attendee_create_or_update = (
lispress_str.count("CreateCommitEventWrapper")
+ lispress_str.count("UpdateWrapper")
) * lispress_str.count("FindAddPerson")
num_mega_functions = sum(
lispress_str.count(mega_function) for mega_function in mega_functions
)
top_level_without_as_person = lispress_str.count(":output (CreateAddPerson")
top_level_person_name_like = lispress_str.count(":output (PersonWithNameLike")
event_date_time_separate = lispress_str.count("(EventAtTime :event (Event")
empty_constraint_match = re.search(
"(?<!extensionConstraint )\\(Constraint\\[([A-Za-z]+)\\]\\)", lispress_str
)
bad_empty_constraint = (
1
if empty_constraint_match
and empty_constraint_match.group(1) != "Event"
and empty_constraint_match.group(1) != "Recipient"
else 0
)
bad_recip = lispress_str.count(
"RecipientWithNameLike :constraint (Constraint[Recipient])"
)
bad_year = (
1 if re.search(":year #\\(Number [0-9]{1,3}[\\.\\)]", lispress_str) else 0
)
return (
-1000.0 * has_findperson_under_create(form)
+ -1000.0 * has_noncanonical_andconstraint(form)
+ -30.0 * always_true
+ -30.0 * attendee_create_or_update
+ -100.0
* (
bare_preflight_wrapper
+ bad_output_equals
+ bad_output_id
+ bad_output_yield
+ bad_recip
)
+ -1000.0 * bad_year
+ -50.0 * singleton_as_arg_to_describe
+ -30.0 * (event_date_time_separate + event_on_date_output)
+ -1000.0 * bad_empty_constraint
+ -20.0 * yield_execute
+ -100.0 * (top_level_without_as_person + top_level_person_name_like)
+ -20.0 * weird_yield(form)
+ -5.0 * num_parens
+ -0.1 * num_chars
+ 30.0 * singletons
+ 20.0 * num_mega_functions
)
| 32.810573 | 107 | 0.643126 |
4f5f8f6aae0cab1090fa7950389ac644657ad7d9
| 4,789 |
py
|
Python
|
python/cugraph/link_analysis/pagerank.py
|
cfld/cugraph
|
085922880478779597aa0c4c7e0a117e3e4a7515
|
[
"Apache-2.0"
] | null | null | null |
python/cugraph/link_analysis/pagerank.py
|
cfld/cugraph
|
085922880478779597aa0c4c7e0a117e3e4a7515
|
[
"Apache-2.0"
] | null | null | null |
python/cugraph/link_analysis/pagerank.py
|
cfld/cugraph
|
085922880478779597aa0c4c7e0a117e3e4a7515
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2019-2021, NVIDIA CORPORATION.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from cugraph.link_analysis import pagerank_wrapper
from cugraph.structure.graph_classes import null_check
import cugraph
def pagerank(
G, alpha=0.85, personalization=None, max_iter=100, tol=1.0e-5, nstart=None,
weight=None, dangling=None
):
"""
Find the PageRank score for every vertex in a graph. cuGraph computes an
approximation of the Pagerank eigenvector using the power method. The
number of iterations depends on the properties of the network itself; it
increases when the tolerance descreases and/or alpha increases toward the
limiting value of 1. The user is free to use default values or to provide
inputs for the initial guess, tolerance and maximum number of iterations.
Parameters
----------
graph : cugraph.Graph or networkx.Graph
cuGraph graph descriptor, should contain the connectivity information
as an edge list.
The transposed adjacency list will be computed if not already present.
alpha : float
The damping factor alpha represents the probability to follow an
outgoing edge, standard value is 0.85.
Thus, 1.0-alpha is the probability to “teleport” to a random vertex.
Alpha should be greater than 0.0 and strictly lower than 1.0.
personalization : cudf.Dataframe
GPU Dataframe containing the personalization information.
personalization['vertex'] : cudf.Series
Subset of vertices of graph for personalization
personalization['values'] : cudf.Series
Personalization values for vertices
max_iter : int
The maximum number of iterations before an answer is returned. This can
be used to limit the execution time and do an early exit before the
solver reaches the convergence tolerance.
If this value is lower or equal to 0 cuGraph will use the default
value, which is 100.
tolerance : float
Set the tolerance the approximation, this parameter should be a small
magnitude value.
The lower the tolerance the better the approximation. If this value is
0.0f, cuGraph will use the default value which is 1.0E-5.
Setting too small a tolerance can lead to non-convergence due to
numerical roundoff. Usually values between 0.01 and 0.00001 are
acceptable.
nstart : cudf.Dataframe
GPU Dataframe containing the initial guess for pagerank.
nstart['vertex'] : cudf.Series
Subset of vertices of graph for initial guess for pagerank values
nstart['values'] : cudf.Series
Pagerank values for vertices
dangling : dict
This parameter is here for NetworkX compatibility and ignored
Returns
-------
PageRank : cudf.DataFrame
GPU data frame containing two cudf.Series of size V: the vertex
identifiers and the corresponding PageRank values.
df['vertex'] : cudf.Series
Contains the vertex identifiers
df['pagerank'] : cudf.Series
Contains the PageRank score
Examples
--------
>>> gdf = cudf.read_csv('datasets/karate.csv', delimiter=' ',
>>> dtype=['int32', 'int32', 'float32'], header=None)
>>> G = cugraph.Graph()
>>> G.from_cudf_edgelist(gdf, source='0', destination='1')
>>> pr = cugraph.pagerank(G, alpha = 0.85, max_iter = 500, tol = 1.0e-05)
"""
G, isNx = cugraph.utilities.check_nx_graph(G, weight)
if personalization is not None:
null_check(personalization["vertex"])
null_check(personalization["values"])
if G.renumbered is True:
personalization = G.add_internal_vertex_id(
personalization, "vertex", "vertex"
)
if nstart is not None:
if G.renumbered is True:
nstart = G.add_internal_vertex_id(
nstart, "vertex", "vertex"
)
df = pagerank_wrapper.pagerank(
G, alpha, personalization, max_iter, tol, nstart
)
if G.renumbered:
df = G.unrenumber(df, "vertex")
if isNx is True:
return cugraph.utilities.df_score_to_dictionary(df, 'pagerank')
else:
return df
| 39.578512 | 79 | 0.676968 |
1ef324ffe446d4d75c22c3fe84319cf3ee9bff77
| 946 |
py
|
Python
|
Exercicios/semaforo.py
|
beatrizflorenccio/Projects-Python
|
fc584167a2816dc89f22baef0fa0f780af796c98
|
[
"MIT"
] | 1 |
2021-10-10T08:18:45.000Z
|
2021-10-10T08:18:45.000Z
|
Exercicios/semaforo.py
|
beatrizflorenccio/Projects-Python
|
fc584167a2816dc89f22baef0fa0f780af796c98
|
[
"MIT"
] | null | null | null |
Exercicios/semaforo.py
|
beatrizflorenccio/Projects-Python
|
fc584167a2816dc89f22baef0fa0f780af796c98
|
[
"MIT"
] | null | null | null |
#MaBe
import time
print()
print("=" * 30)
print("\33[1;38;48mSEMÁFORO\33[m".center(43))
print("=" * 30)
t1 = int(input("Tempo para o vermelho: "))
t2 = int(input("Tempo para o amarelo: "))
t3 = int(input("Tempo para o verde: "))
print()
print("INICIANDO SISTEMA...")
time.sleep(1)
#Vermelho
print()
print("\33[1; 31mVermelho: LIGADO\33[m")
print("\33[1mAmarelo: DESLIGADO\33[m")
print("\33[1mVerde: DESLIGADO\33[m")
for x in range(t1, -1, -1):
print(">>>Tempo: {}s".format(x))
time.sleep(1)
#Amarelo
print()
print("\33[1mVermelho: DESLIGADO\33[m")
print("\33[1; 33mAmarelo: LIGADO\33[m")
print("\33[1mVerde: DESLIGADO\33[m")
for y in range(t2, -1, -1):
print(">>>Tempo: {}s".format(y))
time.sleep(1)
#Verde
print()
print("\33[1mVermelho: DESLIGADO\33[m")
print("\33[1mAmarelo: DESLIGADO\33[m")
print("\33[1; 32mVerde: LIGADO\33[m")
for z in range(t3, -1, -1):
print(">>>Tempo: {}s".format(z))
time.sleep(1)
print()
| 21.022222 | 45 | 0.630021 |
07099b8c34ca13c1e41008c4abc2e12fdb4bcd16
| 6,134 |
py
|
Python
|
optimization.py
|
baobaozhou/BertClassfiy
|
5c994636ee63164752515a082c8d8a373bea3dcd
|
[
"Apache-2.0"
] | null | null | null |
optimization.py
|
baobaozhou/BertClassfiy
|
5c994636ee63164752515a082c8d8a373bea3dcd
|
[
"Apache-2.0"
] | null | null | null |
optimization.py
|
baobaozhou/BertClassfiy
|
5c994636ee63164752515a082c8d8a373bea3dcd
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Functions and classes related to optimization (weight updates)."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import re
import tensorflow as tf
def create_optimizer(loss, init_lr, num_train_steps, num_warmup_steps, use_tpu):
"""Creates an optimizer training op."""
global_step = tf.compat.v1.train.get_or_create_global_step()
learning_rate = tf.constant(value=init_lr, shape=[], dtype=tf.float32)
# Implements linear decay of the learning rate.
learning_rate = tf.compat.v1.train.polynomial_decay(
learning_rate,
global_step,
num_train_steps,
end_learning_rate=0.0,
power=1.0,
cycle=False)
# Implements linear warmup. I.e., if global_step < num_warmup_steps, the
# learning rate will be `global_step/num_warmup_steps * init_lr`.
if num_warmup_steps:
global_steps_int = tf.cast(global_step, tf.int32)
warmup_steps_int = tf.constant(num_warmup_steps, dtype=tf.int32)
global_steps_float = tf.cast(global_steps_int, tf.float32)
warmup_steps_float = tf.cast(warmup_steps_int, tf.float32)
warmup_percent_done = global_steps_float / warmup_steps_float
warmup_learning_rate = init_lr * warmup_percent_done
is_warmup = tf.cast(global_steps_int < warmup_steps_int, tf.float32)
learning_rate = (
(1.0 - is_warmup) * learning_rate + is_warmup * warmup_learning_rate)
# It is recommended that you use this optimizer for fine tuning, since this
# is how the model was trained (note that the Adam m/v variables are NOT
# loaded from init_checkpoint.)
optimizer = AdamWeightDecayOptimizer(
learning_rate=learning_rate,
weight_decay_rate=0.01,
beta_1=0.9,
beta_2=0.999,
epsilon=1e-6,
exclude_from_weight_decay=["LayerNorm", "layer_norm", "bias"])
if use_tpu:
optimizer = tf.compat.v1.tpu.CrossShardOptimizer(optimizer)
tvars = tf.compat.v1.trainable_variables()
grads = tf.gradients(ys=loss, xs=tvars)
# This is how the model was pre-trained.
(grads, _) = tf.clip_by_global_norm(grads, clip_norm=1.0)
train_op = optimizer.apply_gradients(
zip(grads, tvars), global_step=global_step)
new_global_step = global_step + 1
train_op = tf.group(train_op, [global_step.assign(new_global_step)])
return train_op
class AdamWeightDecayOptimizer(tf.compat.v1.train.Optimizer):
"""A basic Adam optimizer that includes "correct" L2 weight decay."""
def __init__(self,
learning_rate,
weight_decay_rate=0.0,
beta_1=0.9,
beta_2=0.999,
epsilon=1e-6,
exclude_from_weight_decay=None,
name="AdamWeightDecayOptimizer"):
"""Constructs a AdamWeightDecayOptimizer."""
super(AdamWeightDecayOptimizer, self).__init__(False, name)
self.learning_rate = learning_rate
self.weight_decay_rate = weight_decay_rate
self.beta_1 = beta_1
self.beta_2 = beta_2
self.epsilon = epsilon
self.exclude_from_weight_decay = exclude_from_weight_decay
def apply_gradients(self, grads_and_vars, global_step=None, name=None):
"""See base class."""
assignments = []
for (grad, param) in grads_and_vars:
if grad is None or param is None:
continue
param_name = self._get_variable_name(param.name)
m = tf.compat.v1.get_variable(
name=param_name + "/adam_m",
shape=param.shape.as_list(),
dtype=tf.float32,
trainable=False,
initializer=tf.compat.v1.zeros_initializer())
v = tf.compat.v1.get_variable(
name=param_name + "/adam_v",
shape=param.shape.as_list(),
dtype=tf.float32,
trainable=False,
initializer=tf.compat.v1.zeros_initializer())
# Standard Adam update.
next_m = (
tf.multiply(self.beta_1, m) + tf.multiply(1.0 - self.beta_1, grad))
next_v = (
tf.multiply(self.beta_2, v) + tf.multiply(1.0 - self.beta_2,
tf.square(grad)))
update = next_m / (tf.sqrt(next_v) + self.epsilon)
# Just adding the square of the weights to the loss function is *not*
# the correct way of using L2 regularization/weight decay with Adam,
# since that will interact with the m and v parameters in strange ways.
#
# Instead we want ot decay the weights in a manner that doesn't interact
# with the m/v parameters. This is equivalent to adding the square
# of the weights to the loss with plain (non-momentum) SGD.
if self._do_use_weight_decay(param_name):
update += self.weight_decay_rate * param
update_with_lr = self.learning_rate * update
next_param = param - update_with_lr
assignments.extend(
[param.assign(next_param),
m.assign(next_m),
v.assign(next_v)])
return tf.group(*assignments, name=name)
def _do_use_weight_decay(self, param_name):
"""Whether to use L2 weight decay for `param_name`."""
if not self.weight_decay_rate:
return False
if self.exclude_from_weight_decay:
for r in self.exclude_from_weight_decay:
if re.search(r, param_name) is not None:
return False
return True
def _get_variable_name(self, param_name):
"""Get the variable name from the tensor name."""
m = re.match("^(.*):\\d+$", param_name)
if m is not None:
param_name = m.group(1)
return param_name
| 35.662791 | 80 | 0.687154 |
fc0244284b072d9d6466a98f1e2a45171b2b44b8
| 12,174 |
py
|
Python
|
test/test_dataset.py
|
joannetruong/habitat-api
|
aad2fd7b8545dce44daefd4b7b3941672eb96ee3
|
[
"MIT"
] | 355 |
2020-08-18T03:48:26.000Z
|
2022-03-30T00:22:50.000Z
|
test/test_dataset.py
|
joannetruong/habitat-api
|
aad2fd7b8545dce44daefd4b7b3941672eb96ee3
|
[
"MIT"
] | 328 |
2020-08-12T21:25:09.000Z
|
2022-03-31T10:39:21.000Z
|
test/test_dataset.py
|
joannetruong/habitat-api
|
aad2fd7b8545dce44daefd4b7b3941672eb96ee3
|
[
"MIT"
] | 159 |
2020-08-12T22:23:36.000Z
|
2022-03-30T22:56:52.000Z
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from itertools import groupby, islice
import pytest
from habitat.core.dataset import Dataset, Episode
def _construct_dataset(num_episodes, num_groups=10):
episodes = []
for i in range(num_episodes):
episode = Episode(
episode_id=str(i),
scene_id="scene_id_" + str(i % num_groups),
start_position=[0, 0, 0],
start_rotation=[0, 0, 0, 1],
)
episodes.append(episode)
dataset = Dataset()
dataset.episodes = episodes
return dataset
def test_scene_ids():
dataset = _construct_dataset(100)
assert dataset.scene_ids == ["scene_id_" + str(ii) for ii in range(10)]
def test_get_scene_episodes():
dataset = _construct_dataset(100)
scene = "scene_id_0"
scene_episodes = dataset.get_scene_episodes(scene)
assert len(scene_episodes) == 10
for ep in scene_episodes:
assert ep.scene_id == scene
def test_filter_episodes():
dataset = _construct_dataset(100)
def filter_fn(episode: Episode) -> bool:
return int(episode.episode_id) % 2 == 0
filtered_dataset = dataset.filter_episodes(filter_fn)
assert len(filtered_dataset.episodes) == 50
for ep in filtered_dataset.episodes:
assert filter_fn(ep)
def test_get_splits_even_split_possible():
dataset = _construct_dataset(100)
splits = dataset.get_splits(10)
assert len(splits) == 10
for split in splits:
assert len(split.episodes) == 10
def test_get_splits_with_remainder():
dataset = _construct_dataset(100)
splits = dataset.get_splits(11)
assert len(splits) == 11
for split in splits:
assert len(split.episodes) == 9
def test_get_splits_num_episodes_specified():
dataset = _construct_dataset(100)
splits = dataset.get_splits(10, 3, False)
assert len(splits) == 10
for split in splits:
assert len(split.episodes) == 3
assert len(dataset.episodes) == 100
dataset = _construct_dataset(100)
splits = dataset.get_splits(10, 10)
assert len(splits) == 10
for split in splits:
assert len(split.episodes) == 10
assert len(dataset.episodes) == 100
dataset = _construct_dataset(100)
splits = dataset.get_splits(10, 3, True)
assert len(splits) == 10
for split in splits:
assert len(split.episodes) == 3
assert len(dataset.episodes) == 30
dataset = _construct_dataset(100)
with pytest.raises(ValueError):
splits = dataset.get_splits(10, 20)
def test_get_splits_collate_scenes():
dataset = _construct_dataset(10000)
splits = dataset.get_splits(10, 23, collate_scene_ids=True)
assert len(splits) == 10
for split in splits:
assert len(split.episodes) == 23
prev_ids = set()
for ii, ep in enumerate(split.episodes):
if ep.scene_id not in prev_ids:
prev_ids.add(ep.scene_id)
else:
assert split.episodes[ii - 1].scene_id == ep.scene_id
dataset = _construct_dataset(10000)
splits = dataset.get_splits(10, 200, collate_scene_ids=False)
assert len(splits) == 10
for split in splits:
prev_ids = set()
found_not_collated = False
for ii, ep in enumerate(split.episodes):
if ep.scene_id not in prev_ids:
prev_ids.add(ep.scene_id)
else:
if split.episodes[ii - 1].scene_id != ep.scene_id:
found_not_collated = True
break
assert found_not_collated
dataset = _construct_dataset(10000)
splits = dataset.get_splits(10, collate_scene_ids=True)
assert len(splits) == 10
for split in splits:
assert len(split.episodes) == 1000
prev_ids = set()
for ii, ep in enumerate(split.episodes):
if ep.scene_id not in prev_ids:
prev_ids.add(ep.scene_id)
else:
assert split.episodes[ii - 1].scene_id == ep.scene_id
dataset = _construct_dataset(10000)
splits = dataset.get_splits(10, collate_scene_ids=False)
assert len(splits) == 10
for split in splits:
prev_ids = set()
found_not_collated = False
for ii, ep in enumerate(split.episodes):
if ep.scene_id not in prev_ids:
prev_ids.add(ep.scene_id)
else:
if split.episodes[ii - 1].scene_id != ep.scene_id:
found_not_collated = True
break
assert found_not_collated
def test_get_splits_sort_by_episode_id():
dataset = _construct_dataset(10000)
splits = dataset.get_splits(10, 23, sort_by_episode_id=True)
assert len(splits) == 10
for split in splits:
assert len(split.episodes) == 23
for ii, ep in enumerate(split.episodes):
if ii > 0:
assert ep.episode_id >= split.episodes[ii - 1].episode_id
@pytest.mark.parametrize(
"num_episodes,num_splits",
[(994, 64), (1023, 64), (1024, 64), (1025, 64), (10000, 9), (10000, 10)],
)
def test_get_splits_func(num_episodes: int, num_splits: int):
dataset = _construct_dataset(num_episodes)
splits = dataset.get_splits(num_splits, allow_uneven_splits=True)
assert len(splits) == num_splits
assert sum(len(split.episodes) for split in splits) == num_episodes
splits = dataset.get_splits(num_splits, allow_uneven_splits=False)
assert len(splits) == num_splits
assert (
sum(map(lambda s: s.num_episodes, splits))
== (num_episodes // num_splits) * num_splits
)
def test_sample_episodes():
dataset = _construct_dataset(1000)
ep_iter = dataset.get_episode_iterator(
num_episode_sample=1000, cycle=False
)
assert len(list(ep_iter)) == 1000
ep_iter = dataset.get_episode_iterator(num_episode_sample=0, cycle=False)
assert len(list(ep_iter)) == 0
with pytest.raises(ValueError):
dataset.get_episode_iterator(num_episode_sample=1001, cycle=False)
ep_iter = dataset.get_episode_iterator(num_episode_sample=100, cycle=True)
ep_id_list = [e.episode_id for e in list(islice(ep_iter, 100))]
assert len(set(ep_id_list)) == 100
next_episode = next(ep_iter)
assert next_episode.episode_id in ep_id_list
ep_iter = dataset.get_episode_iterator(num_episode_sample=0, cycle=False)
with pytest.raises(StopIteration):
next(ep_iter)
def test_iterator_cycle():
dataset = _construct_dataset(100)
ep_iter = dataset.get_episode_iterator(
cycle=True, shuffle=False, group_by_scene=False
)
for i in range(200):
episode = next(ep_iter)
assert episode.episode_id == dataset.episodes[i % 100].episode_id
ep_iter = dataset.get_episode_iterator(cycle=True, num_episode_sample=20)
episodes = list(islice(ep_iter, 20))
for i in range(200):
episode = next(ep_iter)
assert episode.episode_id == episodes[i % 20].episode_id
def test_iterator_shuffle():
dataset = _construct_dataset(100)
episode_iter = dataset.get_episode_iterator(shuffle=True)
first_round_episodes = list(islice(episode_iter, 100))
second_round_episodes = list(islice(episode_iter, 100))
# both rounds should have same episodes but in different order
assert sorted(first_round_episodes) == sorted(second_round_episodes)
assert first_round_episodes != second_round_episodes
# both rounds should be grouped by scenes
first_round_scene_groups = [
k for k, g in groupby(first_round_episodes, key=lambda x: x.scene_id)
]
second_round_scene_groups = [
k for k, g in groupby(second_round_episodes, key=lambda x: x.scene_id)
]
assert len(first_round_scene_groups) == len(second_round_scene_groups)
assert len(first_round_scene_groups) == len(set(first_round_scene_groups))
def test_iterator_scene_switching_episodes():
total_ep = 1000
max_repeat = 25
dataset = _construct_dataset(total_ep)
episode_iter = dataset.get_episode_iterator(
max_scene_repeat_episodes=max_repeat, shuffle=False, cycle=True
)
episodes = sorted(dataset.episodes, key=lambda x: x.scene_id)
for _ in range(max_repeat):
episode = next(episode_iter)
assert (
episode.episode_id == episodes.pop(0).episode_id
), "episodes before max_repeat reached should be identical"
episode = next(episode_iter)
assert (
episode.scene_id != episodes.pop(0).scene_id
), "After max_repeat episodes a scene switch doesn't happen."
remaining_episodes = list(islice(episode_iter, total_ep - max_repeat - 1))
assert len(remaining_episodes) == len(
episodes
), "Remaining episodes should be identical."
assert len({e.scene_id for e in remaining_episodes}) == len(
set(map(lambda ep: ep.scene_id, remaining_episodes))
), "Next episodes should still include all scenes."
cycled_episodes = list(islice(episode_iter, 4 * total_ep))
assert (
len(set(map(lambda x: x.episode_id, cycled_episodes))) == total_ep
), "Some episodes leaked after cycling."
grouped_episodes = [
list(g) for k, g in groupby(cycled_episodes, key=lambda x: x.scene_id)
]
assert (
len(sum(grouped_episodes, [])) == 4 * total_ep
), "Cycled episode iterator returned unexpected number of episodes."
assert (
len(grouped_episodes) == 4 * total_ep / max_repeat
), "The number of scene switches is unexpected."
assert all(
len(group) == max_repeat for group in grouped_episodes
), "Not all scene switches are equal to required number."
def test_iterator_scene_switching_episodes_without_shuffle_cycle():
total_ep = 1000
max_repeat = 25
dataset = _construct_dataset(total_ep)
episode_iter = dataset.get_episode_iterator(
max_scene_repeat_episodes=max_repeat, shuffle=False, cycle=False
)
grouped_episodes = [
list(g) for k, g in groupby(episode_iter, key=lambda x: x.scene_id)
]
assert (
len(sum(grouped_episodes, [])) == total_ep
), "The episode iterator returned unexpected number of episodes."
assert (
len(grouped_episodes) == total_ep / max_repeat
), "The number of scene switches is unexpected."
assert all(
len(group) == max_repeat for group in grouped_episodes
), "Not all scene stitches are equal to requirement."
def test_iterator_scene_switching_steps():
total_ep = 1000
max_repeat_steps = 250
dataset = _construct_dataset(total_ep)
episode_iter = dataset.get_episode_iterator(
max_scene_repeat_steps=max_repeat_steps,
shuffle=False,
step_repetition_range=0.0,
)
episodes = sorted(dataset.episodes, key=lambda x: x.scene_id)
episode = next(episode_iter)
assert (
episode.episode_id == episodes.pop(0).episode_id
), "After max_repeat_steps episodes a scene switch doesn't happen."
# episodes before max_repeat reached should be identical
for _ in range(max_repeat_steps):
episode_iter.step_taken()
episode = next(episode_iter)
assert (
episode.episode_id != episodes.pop(0).episode_id
), "After max_repeat_steps episodes a scene switch doesn't happen."
remaining_episodes = list(islice(episode_iter, total_ep - 2))
assert len(remaining_episodes) == len(
episodes
), "Remaining episodes numbers aren't equal."
assert len({e.scene_id for e in remaining_episodes}) == len(
list(groupby(remaining_episodes, lambda ep: ep.scene_id))
), (
"Next episodes should still be grouped by scene (before next "
"switching)."
)
def test_preserve_order():
dataset = _construct_dataset(100)
episodes = sorted(dataset.episodes, reverse=True, key=lambda x: x.scene_id)
dataset.episodes = episodes[:]
episode_iter = dataset.get_episode_iterator(shuffle=False, cycle=False)
assert list(episode_iter) == episodes
| 33.629834 | 79 | 0.675867 |
397da19920522c12b705ddfdc6138bb6663582a4
| 1,008 |
py
|
Python
|
authors/apps/profiles/renderers.py
|
ronaldndirangu/medium-clone
|
0a65cf923cb340936e4e249fcf39ec2561b64ac9
|
[
"BSD-3-Clause"
] | 3 |
2021-01-03T19:54:48.000Z
|
2022-01-20T20:44:04.000Z
|
authors/apps/profiles/renderers.py
|
andela/ah-titans
|
0a65cf923cb340936e4e249fcf39ec2561b64ac9
|
[
"BSD-3-Clause"
] | 43 |
2018-07-23T12:34:09.000Z
|
2021-06-10T20:34:54.000Z
|
authors/apps/profiles/renderers.py
|
ronaldndirangu/medium-clone
|
0a65cf923cb340936e4e249fcf39ec2561b64ac9
|
[
"BSD-3-Clause"
] | 3 |
2021-02-07T11:55:06.000Z
|
2021-10-31T17:37:04.000Z
|
import json
from rest_framework.renderers import JSONRenderer
class ProfileJSONRenderer(JSONRenderer):
"""This class contains json renderer for Profile"""
charset = 'utf-8'
def render(self, data, media_type=None, renderer_context=None):
if data is not None:
if len(data) <= 1:
return json.dumps({
'profile': data
})
return json.dumps({
'profiles': data
})
class FollowersJSONRenderer(JSONRenderer):
"""This class contains json renderer for Profile"""
charset = 'utf-8'
def render(self, data, media_type=None, renderer_context=None):
return json.dumps({
'followers': data
})
class FollowingJSONRenderer(JSONRenderer):
"""This class contains json renderer for Profile"""
charset = 'utf-8'
def render(self, data, media_type=None, renderer_context=None):
return json.dumps({
'following': data
})
| 24.585366 | 67 | 0.599206 |
07a4d49263ad580baa537b828be60a8e13d150c2
| 1,227 |
py
|
Python
|
core/builder.py
|
spyrozone/keylogger
|
7aa0bbccfa059db57bbb77a566ae69bdb768def3
|
[
"BSD-3-Clause"
] | 235 |
2018-07-13T20:17:39.000Z
|
2022-03-29T05:36:24.000Z
|
core/builder.py
|
safithetechi/HeraKeylogger
|
7aa0bbccfa059db57bbb77a566ae69bdb768def3
|
[
"BSD-3-Clause"
] | 5 |
2019-02-25T15:10:48.000Z
|
2021-06-11T03:49:49.000Z
|
core/builder.py
|
safithetechi/HeraKeylogger
|
7aa0bbccfa059db57bbb77a566ae69bdb768def3
|
[
"BSD-3-Clause"
] | 90 |
2018-07-13T22:39:15.000Z
|
2021-12-15T11:45:22.000Z
|
######################################################
# #
# HeraChromeKeylogger #
# #
# by: UNDEADSEC #
# #
# Telegram Group: https://t.me/UndeadSec #
# YouTube Channel: https://youtube.com/c/UndeadSec #
# Twitter: https://twitter.com/A1S0N_ #
# #
######################################################
from json import load, dump
from os import system
def build(prefix, domain):
with open('MaliciousExtension/temis.js', 'r+') as f:
content = f.read()
f.seek(0, 0)
f.write('var url = \'' + prefix + domain + '\';' + '\n' + content)
def editMan(newName, newDesc, newVersion):
with open('MaliciousExtension/manifest.json', 'r+') as f:
data = load(f)
data['name'] = newName
data['description'] = newDesc
data['version'] = newVersion
f.seek(0)
dump(data, f, indent=4)
f.truncate()
def runServer():
system("sudo php -S 127.0.0.1:80")
| 36.088235 | 74 | 0.393643 |
795dd2f2220171d65b48466f851ead2abe1ac163
| 929 |
py
|
Python
|
oscar/lib/python2.7/site-packages/phonenumber_field/formfields.py
|
sainjusajan/django-oscar
|
466e8edc807be689b0a28c9e525c8323cc48b8e1
|
[
"BSD-3-Clause"
] | null | null | null |
oscar/lib/python2.7/site-packages/phonenumber_field/formfields.py
|
sainjusajan/django-oscar
|
466e8edc807be689b0a28c9e525c8323cc48b8e1
|
[
"BSD-3-Clause"
] | null | null | null |
oscar/lib/python2.7/site-packages/phonenumber_field/formfields.py
|
sainjusajan/django-oscar
|
466e8edc807be689b0a28c9e525c8323cc48b8e1
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.core.exceptions import ValidationError
from django.forms.fields import CharField
from django.utils.translation import ugettext_lazy as _
from phonenumber_field.phonenumber import to_python
from phonenumber_field.validators import validate_international_phonenumber
class PhoneNumberField(CharField):
default_error_messages = {
'invalid': _('Enter a valid phone number.'),
}
default_validators = [validate_international_phonenumber]
def __init__(self, *args, **kwargs):
super(PhoneNumberField, self).__init__(*args, **kwargs)
self.widget.input_type = 'tel'
def to_python(self, value):
phone_number = to_python(value)
if phone_number and not phone_number.is_valid():
raise ValidationError(self.error_messages['invalid'])
return phone_number
| 33.178571 | 76 | 0.719053 |
5e3c0926e4dc855e63c9c044093af096314fc852
| 32 |
py
|
Python
|
pyhwtherm/__init__.py
|
jlim0930/pyhwtherm
|
8996d3847389aa03ae22795df2ee1ac612ae95c1
|
[
"MIT"
] | 7 |
2015-03-11T04:28:00.000Z
|
2021-09-10T19:05:24.000Z
|
pyhwtherm/__init__.py
|
jlim0930/pyhwtherm
|
8996d3847389aa03ae22795df2ee1ac612ae95c1
|
[
"MIT"
] | 1 |
2015-11-12T05:13:58.000Z
|
2015-11-12T05:13:58.000Z
|
pyhwtherm/__init__.py
|
jlim0930/pyhwtherm
|
8996d3847389aa03ae22795df2ee1ac612ae95c1
|
[
"MIT"
] | 9 |
2015-07-08T13:01:33.000Z
|
2021-10-06T06:00:05.000Z
|
from pyhwtherm import PyHWTherm
| 16 | 31 | 0.875 |
3f5f5942ce364a3a55fcb56d742874accf450811
| 3,610 |
py
|
Python
|
missioncontrol/v0/time.py
|
Psykar/missioncontrol
|
ca3a1ef523e04d87722ed18f4668c0a68bb3d519
|
[
"Apache-2.0"
] | null | null | null |
missioncontrol/v0/time.py
|
Psykar/missioncontrol
|
ca3a1ef523e04d87722ed18f4668c0a68bb3d519
|
[
"Apache-2.0"
] | 1 |
2019-03-19T06:05:21.000Z
|
2019-03-19T06:05:21.000Z
|
missioncontrol/v0/time.py
|
Psykar/missioncontrol
|
ca3a1ef523e04d87722ed18f4668c0a68bb3d519
|
[
"Apache-2.0"
] | null | null | null |
from astropy.time import Time
from datetime import datetime
from django.conf import settings
from skyfield.api import Loader
load = Loader(settings.EPHEM_DIR)
TWO_DAYS_S = 2 * 24 * 60 * 60
def timescale_functions():
""" skyfield requires a "timescale" object that is used for things like
leap seconds. we want to initialize it once, but avoid making it
a global variable.
This closure exposes two functions that rely on a global timescale,
now : returns a Time() of the current time
add_seconds : returns a Time() with s seconds added
"""
timescale = load.timescale()
def now():
return timescale.now()
def add_seconds(t, s):
"""
There's no easier way to add seconds to a Time object :(
"""
return timescale.utc(*map(sum, zip(t.utc, (0, 0, 0, 0, 0, s))))
def utc(t):
""" do whatever it takes to make time into skyfield
"""
if t == "now":
return now()
if isinstance(t, str):
t = timescale.from_astropy(Time(t, format='isot'))
if isinstance(t, tuple):
t = timescale.utc(*t)
if isinstance(t, datetime):
t = timescale.utc(t)
return t
def iso(t):
t = utc(t)
return t.utc_iso(places=6)
def midpoint(start_time, end_time):
start_time = utc(start_time)
end_time = utc(end_time)
mid_time = timescale.tai_jd(
((start_time.tai + end_time.tai) / 2)
)
return mid_time
return add_seconds, now, utc, iso, midpoint
add_seconds, now, utc, iso, midpoint = timescale_functions()
def make_timeseries(start, end, step):
""" return a list of times from start to end.
each step is 'step' seconds after the previous time.
"""
if end.tt < start.tt:
raise RuntimeError("end cannot be before start")
t = start
ts = [t]
while t.tt <= end.tt:
t = add_seconds(t, step)
ts += [t]
return ts
def get_default_range(range_start=None, range_end=None):
""" cast to internal time, set default range_start and range_end times
"""
if range_start is None:
range_start = now()
else:
range_start = utc(range_start)
if range_end is None:
range_end = add_seconds(range_start, TWO_DAYS_S)
else:
range_end = utc(range_end)
return range_start, range_end
def filter_range(windows, range_start, range_end, range_inclusive):
""" given a list of time windows (object that have start and end times),
filters out items base on the range_inclusive criteria:
start - the start of the range is inclusive
end - the end of the range is inclusive
neither - all windows must fit completely within range
both (default) - windows that overlap with range are returned
this is useful for pagination, when you may want to set either end to
inclusive depending on the direction of the page so as to not get
duplicate items.
"""
# filter the start of the range
if range_inclusive in ['end', 'neither']:
windows = filter(lambda w: utc(w.start_time).tt >= range_start.tt, windows)
else:
windows = filter(lambda w: utc(w.end_time).tt >= range_start.tt, windows)
# filter the end of the range
if range_inclusive in ['start', 'neither']:
windows = filter(lambda w: utc(w.end_time).tt <= range_end.tt, windows)
else:
windows = filter(lambda w: utc(w.start_time).tt <= range_end.tt, windows)
return windows
| 31.12069 | 83 | 0.625208 |
53a21ef11e468f4237d472511e30f3da466db47f
| 1,714 |
py
|
Python
|
test/base/test_resume_run.py
|
Gabriel-p/pyABC
|
a1c963203c9f9e3fa40793ccf214753fb689d27f
|
[
"BSD-3-Clause"
] | null | null | null |
test/base/test_resume_run.py
|
Gabriel-p/pyABC
|
a1c963203c9f9e3fa40793ccf214753fb689d27f
|
[
"BSD-3-Clause"
] | null | null | null |
test/base/test_resume_run.py
|
Gabriel-p/pyABC
|
a1c963203c9f9e3fa40793ccf214753fb689d27f
|
[
"BSD-3-Clause"
] | null | null | null |
import numpy as np
import pytest
from pyabc import (
ABCSMC,
RV,
Distribution,
MulticoreEvalParallelSampler,
RedisEvalParallelSamplerServerStarter,
)
@pytest.fixture(params=[0, None])
def gt_model(request):
return request.param
def RedisEvalParallelSamplerLookAheadDelayWrapper(**kwargs):
return RedisEvalParallelSamplerServerStarter(
look_ahead=True, look_ahead_delay_evaluation=True, **kwargs
)
@pytest.fixture(
params=[
MulticoreEvalParallelSampler,
RedisEvalParallelSamplerLookAheadDelayWrapper,
]
)
def sampler(request):
s = request.param()
try:
yield s
finally:
# release all resources
try:
s.shutdown()
except AttributeError:
pass
def test_resume(db_path, gt_model, sampler):
def model(parameter):
return {"data": parameter["mean"] + np.random.randn()}
prior = Distribution(mean=RV("uniform", 0, 5))
def distance(x, y):
x_data = x["data"]
y_data = y["data"]
return abs(x_data - y_data)
abc = ABCSMC(model, prior, distance, population_size=10, sampler=sampler)
history = abc.new(db_path, {"data": 2.5}, gt_model=gt_model)
run_id = history.id
print("Run ID:", run_id)
hist_new = abc.run(minimum_epsilon=0, max_nr_populations=1)
assert hist_new.n_populations == 1
abc_continued = ABCSMC(model, prior, distance, sampler=sampler)
run_id_continued = abc_continued.load(db_path, run_id)
print("Run ID continued:", run_id_continued)
hist_contd = abc_continued.run(minimum_epsilon=0, max_nr_populations=1)
assert hist_contd.n_populations == 2
assert hist_new.n_populations == 2
| 25.58209 | 77 | 0.679113 |
8fe97ff536ac2aed46c59c2b040ac53b442be9c8
| 2,928 |
py
|
Python
|
shippo/test/test_address.py
|
penguinstampede/shippo-python-client
|
d21366d959d22ff301947072346479c287bf0f51
|
[
"MIT"
] | 101 |
2015-10-10T18:44:36.000Z
|
2022-01-26T03:54:27.000Z
|
shippo/test/test_address.py
|
penguinstampede/shippo-python-client
|
d21366d959d22ff301947072346479c287bf0f51
|
[
"MIT"
] | 47 |
2015-08-07T21:13:50.000Z
|
2022-03-08T18:48:16.000Z
|
shippo/test/test_address.py
|
penguinstampede/shippo-python-client
|
d21366d959d22ff301947072346479c287bf0f51
|
[
"MIT"
] | 71 |
2015-10-31T01:54:09.000Z
|
2022-02-17T22:43:30.000Z
|
# -*- coding: utf-8 -*-
import unittest2
from mock import patch
import shippo
from shippo.test.helper import (
DUMMY_ADDRESS,
INVALID_ADDRESS,
NOT_POSSIBLE_ADDRESS,
ShippoTestCase,
)
from shippo.test.helper import shippo_vcr
class AddressTests(ShippoTestCase):
request_client = shippo.http_client.RequestsClient
def setUp(self):
super(AddressTests, self).setUp()
def get_http_client(*args, **kwargs):
return self.request_client(*args, **kwargs)
self.client_patcher = patch(
'shippo.http_client.new_default_http_client')
client_mock = self.client_patcher.start()
client_mock.side_effect = get_http_client
def tearDown(self):
super(AddressTests, self).tearDown()
self.client_patcher.stop()
@shippo_vcr.use_cassette(cassette_library_dir='shippo/test/fixtures/address')
def test_invalid_create(self):
address = shippo.Address.create(**INVALID_ADDRESS)
self.assertEqual(address.is_complete, False)
@shippo_vcr.use_cassette(cassette_library_dir='shippo/test/fixtures/address')
def test_create(self):
address = shippo.Address.create(**DUMMY_ADDRESS)
self.assertEqual(address.is_complete, True)
@shippo_vcr.use_cassette(cassette_library_dir='shippo/test/fixtures/address')
def test_retrieve(self):
address = shippo.Address.create(**DUMMY_ADDRESS)
retrieve = shippo.Address.retrieve(address.object_id)
self.assertItemsEqual(address, retrieve)
@shippo_vcr.use_cassette(cassette_library_dir='shippo/test/fixtures/address')
def test_invalid_retrieve(self):
self.assertRaises(shippo.error.APIError, shippo.Address.retrieve,
'EXAMPLE_OF_INVALID_ID')
@shippo_vcr.use_cassette(cassette_library_dir='shippo/test/fixtures/address')
def test_list_all(self):
address_list = shippo.Address.all()
self.assertTrue('results' in address_list)
@shippo_vcr.use_cassette(cassette_library_dir='shippo/test/fixtures/address')
def test_list_page_size(self):
pagesize = 1
address_list = shippo.Address.all(size=pagesize)
self.assertEqual(len(address_list.results), pagesize)
@shippo_vcr.use_cassette(cassette_library_dir='shippo/test/fixtures/address')
def test_invalid_validate(self):
address = shippo.Address.create(**NOT_POSSIBLE_ADDRESS)
self.assertEqual(address.is_complete, True)
address = shippo.Address.validate(address.object_id)
self.assertEqual(address.is_complete, False)
@shippo_vcr.use_cassette(cassette_library_dir='shippo/test/fixtures/address')
def test_validate(self):
address = shippo.Address.create(**DUMMY_ADDRESS)
self.assertEqual(address.is_complete, True)
address = shippo.Address.validate(address.object_id)
if __name__ == '__main__':
unittest2.main()
| 34.447059 | 81 | 0.717896 |
d2c4e929b183b82e88ba9a42870222ee184e0256
| 542 |
py
|
Python
|
blender/arm/logicnode/physics/LN_set_rb_friction.py
|
notwarp/armory
|
bd6078e3035eefcb3c725664698eeb369b4c2d88
|
[
"Zlib"
] | null | null | null |
blender/arm/logicnode/physics/LN_set_rb_friction.py
|
notwarp/armory
|
bd6078e3035eefcb3c725664698eeb369b4c2d88
|
[
"Zlib"
] | null | null | null |
blender/arm/logicnode/physics/LN_set_rb_friction.py
|
notwarp/armory
|
bd6078e3035eefcb3c725664698eeb369b4c2d88
|
[
"Zlib"
] | null | null | null |
from arm.logicnode.arm_nodes import *
class SetFrictionNode (ArmLogicTreeNode):
"""Sets the friction of the given rigid body."""
bl_idname = 'LNSetFrictionNode'
bl_label = 'Set RB Friction'
bl_icon = 'NONE'
arm_version = 1
def init(self, context):
super(SetFrictionNode, self).init(context)
self.inputs.new('ArmNodeSocketAction', 'In')
self.inputs.new('ArmNodeSocketObject', 'RB')
self.inputs.new('NodeSocketFloat', 'Friction')
self.outputs.new('ArmNodeSocketAction', 'Out')
| 31.882353 | 54 | 0.671587 |
de30525f213036d4da51a3ab29938491e6bf5d71
| 15,649 |
py
|
Python
|
dfc/networks/dfc_layer.py
|
mariacer/strong_dfc
|
04d0c633106d39637867062c039583a022af374b
|
[
"Apache-2.0"
] | null | null | null |
dfc/networks/dfc_layer.py
|
mariacer/strong_dfc
|
04d0c633106d39637867062c039583a022af374b
|
[
"Apache-2.0"
] | null | null | null |
dfc/networks/dfc_layer.py
|
mariacer/strong_dfc
|
04d0c633106d39637867062c039583a022af374b
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
# Copyright 2021 Alexander Meulemans, Matilde Tristany, Maria Cervera
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# @title :networks/dfc_layer.py
# @author :mc
# @contact :[email protected]
# @created :28/11/2021
# @version :1.0
# @python_version :3.7.4
"""
Implementation of a layer for Deep Feedback Control
---------------------------------------------------
A layer that is prepared to be trained with DFC.
"""
import numpy as np
import torch
import torch.nn as nn
from networks.layer_interface import LayerInterface
class DFCLayer(LayerInterface):
"""Implementation of a Deep Feedback Control layer.
It contains the following important functions:
* forward: which computes the linear activation based on the previous layer
as well as the post non-linearity activation. It stores these in the
attributes "_linear_activations" and "_activa.tions".
* compute_forward_gradients: computes the forward parameter updates and
stores them under "grad", by using the pre-synaptic activations and the
controller feedback. The ule is based on a voltage difference rule.
* compute_forward_gradients_continuous: same as "compute_forward_gradients"
but it performs an integration over time.
* compute_feedback_gradients: compute the feedback gradients.
* compute_feedback_gradients_continuous: same as
"compute_feedback_gradients" but it performs an integration over time.
Args:
(....): See docstring of class :class:`layer_interface.LayerInterface`.
last_layer_features (int): The size of the output layer.
"""
def __init__(self, in_features, out_features, last_layer_features,
bias=True, requires_grad=False, forward_activation='tanh',
initialization='orthogonal',
initialization_fb='weight_product'):
super().__init__(in_features, out_features, bias=bias,
requires_grad=requires_grad,
forward_activation=forward_activation,
initialization=initialization)
if initialization_fb is None:
initialization_fb = initialization
self._initialization_fb = initialization_fb
self._last_features = last_layer_features
self._activations = None
self._linear_activations = None
# Create and initialize feedback weights.
self.set_direct_feedback_layer(last_layer_features, out_features)
# The "weight_product" initialization is applied at the network level,
# since it requires knowledge of all weight matrices. So here, we
# initialize them equal to the feedfoward weights and then it will get
# overwritten.
if initialization_fb=='weight_product':
initialization_fb = initialization
self.init_layer(self._weights_backward,
initialization=initialization_fb)
@property
def weights_backward(self):
"""Getter for read-only attribute :attr:`_weights_backward`."""
return self._weights_backward
@weights_backward.setter
def weights_backward(self, tensor):
"""Setter for feedback weights.
Args:
tensor (torch.Tensor): The tensor of values to set.
"""
self._weights_backward = tensor
@property
def activations(self):
"""Getter for read-only attribute :attr:`activations` """
return self._activations
@activations.setter
def activations(self, value):
""" Setter for the attribute activations"""
self._activations = value
@property
def linear_activations(self):
"""Getter for read-only attribute :attr:`linear_activations` """
return self._linear_activations
@linear_activations.setter
def linear_activations(self, value):
"""Setter for the attribute :attr:`linear_activations` """
self._linear_activations = value
def set_direct_feedback_layer(self, last_features, out_features):
"""Create the network backward parameters.
This layer connects the output layer to a hidden layer. No biases are
used in direct feedback layers. These backward parameters have no
gradient as they are fixed.
Note that as opposed to DFA, here the backwards weights are not
Parameters.
Args:
(....): See docstring of method
:meth:`layer_interface.LayerInterface.set_layer`.
"""
self._weights_backward = torch.empty((out_features, last_features))
def forward(self, x):
"""Compute the output of the layer.
This method applies first a linear mapping with the parameters
``weights`` and ``bias``, after which it applies the forward activation
function.
In the forward pass there is no noise, and thus the normal activations
and the low-pass filtered activations are identical.
Args:
x (torch.Tensor): Mini-batch of size `[B, in_features]` with input
activations from the previous layer or input.
Returns:
The mini-batch of output activations of the layer.
"""
a = x.mm(self.weights.t())
if self.bias is not None:
a += self.bias.unsqueeze(0).expand_as(a)
self.linear_activations = a
self.linear_activations_lp = a
self.activations = self.forward_activation_function(a)
self.activations_lp = self.forward_activation_function(a)
return self.activations
def compute_forward_gradients(self, delta_v, r_previous, scale=1.,
saving_ndi_updates=False,
learning_rule='nonlinear_difference'):
"""Computes forward gradients using a local-in-time learning rule.
This function applies a non-linear difference learning rule as described
in Eq. (5) in the paper. Specifically, it compues the difference between
the non-linear transformation of basal and somatic voltages.
Depending on the option ``saving_ndi_updates`` these updates will be
stored in different locations (see argument docstring).
Args:
delta_v: The feedback teaching signal from the controller.
r_previous (torch.Tensor): The activations of the previous layer.
scale (float): Scaling factor for the gradients.
saving_ndi_updates (boolean): Whether to save the non-dynamical
inversion updates. When ``True``, computed updates are added to
``ndi_updates_weights`` (and bias) to later compare with the
steady-state/continuous updates. When ``False``, computed
updates are added to ``weights.grad`` (and bias), to be later
updated.
learning_rule (str): The type of learning rule.
"""
batch_size = r_previous.shape[0]
if learning_rule == "voltage_difference":
teaching_signal = 2 * (-delta_v)
elif learning_rule == "nonlinear_difference":
# Compute feedforward activations in basal and somatic compartments.
v_ff = torch.matmul(r_previous, self.weights.t())
if self.bias is not None:
v_ff += self.bias.unsqueeze(0).expand_as(v_ff)
v = delta_v + v_ff
# Compute the teaching signal based on the basal-somatic difference.
teaching_signal = self.forward_activation_function(v) - \
self.forward_activation_function(v_ff)
else:
raise ValueError('The rule %s is not valid.' % learning_rule)
# Compute the gradients and actual updates.
weights_grad = - 2 * 1./batch_size * teaching_signal.t().mm(r_previous)
weight_update = scale * weights_grad.detach()
if self.bias is not None:
bias_grad = - 2 * teaching_signal.mean(0)
bias_update = scale * bias_grad.detach()
# Store the updates appropriately.
if saving_ndi_updates:
self.ndi_updates_weights = weight_update
if self.bias is not None:
self.ndi_updates_bias = bias_update
else:
self._weights.grad += weight_update
if self.bias is not None:
self._bias.grad += bias_update
def compute_forward_gradients_continuous(self, v_time, v_ff_time,
r_previous_time, t_start=None, t_end=None,
learning_rule='nonlinear_difference'):
r"""Computes forward gradients using an integration (sum) of voltage
differences across comparments.
This weight update is identical to ``compute_forward_gradients``
except that it allows to integrate over more than one timestep.
However, here the somatic and basal voltages are assumed to have been
computed outside and provided as an input argument.
Args:
v_time: The somatic voltages at different timesteps.
v_ff_time: The basal voltages at different timesteps.
r_previous_time: The activations of the previous layer at different
timesteps.
t_start (int): The initial time index for the integration.
t_end (int): The final time index for the integration.
learning_rule (str): The type of learning rule.
"""
batch_size = r_previous_time.shape[1]
# Get the boundaries accross which to compute the summation.
if t_start is None:
t_start = 0
if t_end is None:
t_end = v_time.shape[0]
T = t_end - t_start
if learning_rule == "voltage_difference":
# Compute the teaching signal based on the voltage difference.
teaching_signal = v_time[t_start:t_end] - v_ff_time[t_start:t_end]
elif learning_rule == "nonlinear_difference":
# Compute the teaching signal based on the basal-somatic difference.
teaching_signal = \
self.forward_activation_function(v_time[t_start:t_end]) - \
self.forward_activation_function(v_ff_time[t_start:t_end])
else:
raise ValueError('The rule %s is not valid.' % learning_rule)
# Compute the gradients.
if self.bias is not None:
bias_grad = -2 * 1. / T * torch.sum(teaching_signal, axis=0).mean(0)
teaching_signal = teaching_signal.permute(0, 2, 1)
weights_grad = -2 * 1. / batch_size * 1. / T * \
torch.sum(teaching_signal @ \
r_previous_time[t_start:t_end, :, :], axis=0)
# Store the updates appropriately.
if self.bias is not None:
self._bias.grad = bias_grad.detach()
self._weights.grad = weights_grad.detach()
def compute_feedback_gradients_continuous(self, v_fb_time, u_time,
t_start=None, t_end=None,
sigma=1., beta=0., scaling=1.):
r"""Computes feedback gradients using an integration (sum) of voltage.
This weight update is identical to :meth:`compute_feedback_gradients`
except that it allows to integrate over more than one timestep.
It follows the differential equation:
.. math::
\frac{dQ_i}{dt} = -\mathbf{v}_i^\text{fb} \mathbf{u}(t)^T - \
\beta Q_i
Refer to :meth:`compute_feedback_gradients` for variable details.
Note that pytorch saves the positive gradient, hence we should save
:math:`-\Delta Q_i`.
Args:
v_fb_time (torch.Tensor): The apical compartment voltages over
a certain time period.
u_time (torch.Tensor): The control inputs over certain time period.
t_start (torch.Tensor): The start index from which the summation
over time should start.
t_end (torch.Tensor): The stop index at which the summation over
time should stop.
sigma (float): The standard deviation of the noise in the network
dynamics. This is used to scale the fb weight update, such that
its magnitude is independent of the noise variance.
beta (float): The homeostatic weight decay parameter.
scaling (float): In the theory for the feedback weight updates, the
update for each layer should be scaled with
:math:`(1+\tau_{v}/\tau_{\epsilon})^{L-i}`, with L the amount of
layers and i the layer index. ``scaling`` should be the factor
:math:`(1+\tau_{v}/\tau_{\epsilon})^{L-i}` for this layer.
"""
batch_size = v_fb_time.shape[1]
# Get the boundaries accross which to compute the summation.
if t_start is None:
t_start = 0
if t_end is None:
t_end = v_fb_time.shape[0]
T = t_end - t_start
# Compute the gradient scaling.
if sigma < 0.01:
scale = 1 / 0.01 ** 2
else:
scale = 1 / sigma ** 2
scale *= scaling
# Compute the update.
feedbackweights_grad = scale/(T * batch_size) * \
torch.sum(v_fb_time[t_start:t_end].permute(0,2,1) \
@ u_time[t_start:t_end], axis=0)
feedbackweights_grad += beta * self._weights_backward
self._weights_backward.grad = feedbackweights_grad.detach()
def save_feedback_batch_logs(self, writer, step, name, no_gradient=False,
pretraining=False):
"""Save feedback weight stats for the latest mini-batch.
Args:
writer (SummaryWriter): Summary writer from tensorboardX.
step (int): The global step used for the x-axis of the plots.
name (str): The name of the layer.
no_gradient (bool): Flag indicating whether we should skip saving
the gradients of the feedback weights.
pretraining (bool): Flag indicating that the training is in the
initialization phase (only training the feedback weights).
"""
if pretraining:
prefix = 'feedback_training/{}/'.format(name)
else:
prefix = name + '/'
feedback_weights_norm = torch.norm(self.weights_backward)
writer.add_scalar(tag=prefix + 'feedback_weights_norm',
scalar_value=feedback_weights_norm,
global_step=step)
if self.weights_backward.grad is not None:
feedback_weights_grad_norm = torch.norm(self.weights_backward.grad)
writer.add_scalar(tag=prefix + 'feedback_weights_gradient_norm',
scalar_value=feedback_weights_grad_norm,
global_step=step)
@property
def name(self):
return 'DFCLayer'
| 42.873973 | 80 | 0.629305 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.