max_stars_repo_path
stringlengths 4
286
| max_stars_repo_name
stringlengths 5
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.03M
| content_cleaned
stringlengths 6
1.03M
| language
stringclasses 111
values | language_score
float64 0.03
1
| comments
stringlengths 0
556k
| edu_score
float64 0.32
5.03
| edu_int_score
int64 0
5
|
---|---|---|---|---|---|---|---|---|---|---|
DPATrail.py | angzhang1/algorithm_thinking_graph_analysis | 0 | 6632051 | <reponame>angzhang1/algorithm_thinking_graph_analysis
"""
Provided code for application portion of module 1
Helper class for implementing efficient version
of DPA algorithm
"""
# general imports
import random
class DPATrial:
"""
Simple class to encapsulate optimized trials for DPA algorithm
Maintains a list of node numbers with multiple instances of each number.
The number of instances of each node number are
in the same proportion as the desired probabilities
Uses random.choice() to select a node number from this list for each trial.
"""
def __init__(self, num_nodes):
"""
Initialize a DPATrial object corresponding to a
complete graph with num_nodes nodes
Note the initial list of node numbers has num_nodes copies of
each node number
"""
self._num_nodes = num_nodes
self._node_numbers = [node for node in range(num_nodes) for dummy_idx in range(num_nodes)]
def run_trial(self, num_nodes):
"""
Conduct num_node trials using by applying random.choice()
to the list of node numbers
Updates the list of node numbers so that the number of instances of
each node number is in the same ratio as the desired probabilities
Returns:
Set of nodes
"""
# compute the neighbors for the newly-created node
new_node_neighbors = set()
for dummy_idx in range(num_nodes):
new_node_neighbors.add(random.choice(self._node_numbers))
# update the list of node numbers so that each node number
# appears in the correct ratio
self._node_numbers.append(self._num_nodes)
self._node_numbers.extend(list(new_node_neighbors))
# update the number of nodes
self._num_nodes += 1
return new_node_neighbors
| """
Provided code for application portion of module 1
Helper class for implementing efficient version
of DPA algorithm
"""
# general imports
import random
class DPATrial:
"""
Simple class to encapsulate optimized trials for DPA algorithm
Maintains a list of node numbers with multiple instances of each number.
The number of instances of each node number are
in the same proportion as the desired probabilities
Uses random.choice() to select a node number from this list for each trial.
"""
def __init__(self, num_nodes):
"""
Initialize a DPATrial object corresponding to a
complete graph with num_nodes nodes
Note the initial list of node numbers has num_nodes copies of
each node number
"""
self._num_nodes = num_nodes
self._node_numbers = [node for node in range(num_nodes) for dummy_idx in range(num_nodes)]
def run_trial(self, num_nodes):
"""
Conduct num_node trials using by applying random.choice()
to the list of node numbers
Updates the list of node numbers so that the number of instances of
each node number is in the same ratio as the desired probabilities
Returns:
Set of nodes
"""
# compute the neighbors for the newly-created node
new_node_neighbors = set()
for dummy_idx in range(num_nodes):
new_node_neighbors.add(random.choice(self._node_numbers))
# update the list of node numbers so that each node number
# appears in the correct ratio
self._node_numbers.append(self._num_nodes)
self._node_numbers.extend(list(new_node_neighbors))
# update the number of nodes
self._num_nodes += 1
return new_node_neighbors | en | 0.834432 | Provided code for application portion of module 1 Helper class for implementing efficient version of DPA algorithm # general imports Simple class to encapsulate optimized trials for DPA algorithm Maintains a list of node numbers with multiple instances of each number. The number of instances of each node number are in the same proportion as the desired probabilities Uses random.choice() to select a node number from this list for each trial. Initialize a DPATrial object corresponding to a complete graph with num_nodes nodes Note the initial list of node numbers has num_nodes copies of each node number Conduct num_node trials using by applying random.choice() to the list of node numbers Updates the list of node numbers so that the number of instances of each node number is in the same ratio as the desired probabilities Returns: Set of nodes # compute the neighbors for the newly-created node # update the list of node numbers so that each node number # appears in the correct ratio # update the number of nodes | 3.417035 | 3 |
iris/worker/__main__.py | dioptra-io/iris | 6 | 6632052 | <reponame>dioptra-io/iris<gh_stars>1-10
import sys
from dramatiq.cli import main
if __name__ == "__main__":
# Equivalent to `python -m dramatiq iris.worker.watch`.
sys.argv.append("iris.worker.watch")
sys.exit(main())
| import sys
from dramatiq.cli import main
if __name__ == "__main__":
# Equivalent to `python -m dramatiq iris.worker.watch`.
sys.argv.append("iris.worker.watch")
sys.exit(main()) | en | 0.321799 | # Equivalent to `python -m dramatiq iris.worker.watch`. | 1.40421 | 1 |
freesif/__init__.py | alessioprestileo/freesif | 6 | 6632053 | # -*- coding: utf-8 -*-
# Copyright (c) 2015 <NAME>
"""
"""
from .sequentialparser.sif2hdf5 import sif2hdf5
from .data.file import File, open_hdf5, open_sif
from . import utils
from . import calc
__version__ = '0.1.2'
__all__ = ['sif2hdf5', 'File', 'utils', 'open_hdf5', 'open_sif', 'calc']
| # -*- coding: utf-8 -*-
# Copyright (c) 2015 <NAME>
"""
"""
from .sequentialparser.sif2hdf5 import sif2hdf5
from .data.file import File, open_hdf5, open_sif
from . import utils
from . import calc
__version__ = '0.1.2'
__all__ = ['sif2hdf5', 'File', 'utils', 'open_hdf5', 'open_sif', 'calc']
| en | 0.746348 | # -*- coding: utf-8 -*- # Copyright (c) 2015 <NAME> | 1.257418 | 1 |
graphs_and_plots/plt_basic.py | mayankvanani/matplotlib_graphs_templates | 0 | 6632054 | import matplotlib.pyplot as plt
## plotting x vs y where x,y = [lists]
## plt.plot([x], [y], label)
## label is key of graph
x1 = [1,2,3,4]
y1 = [1,8,2,4]
x2 = [1,2,3,8]
y2 = [10,16,13,18]
plt.plot(x2,y2, label='plot_1')
plt.plot(x1, y1, label='plot_2')
## labelling x axis and y axis
plt.xlabel('random nos x- axis')
plt.ylabel('random nos y-axis')
## title of graph
##\n is for pressing enter to write the title in new line
plt.title('rand_x V/S rand_y\ncheck it out!')
## this bring the labels of the plot to frontend as key of graph
plt.legend()
## plt.show() brings the backend processed stuff to forefront
plt.show() | import matplotlib.pyplot as plt
## plotting x vs y where x,y = [lists]
## plt.plot([x], [y], label)
## label is key of graph
x1 = [1,2,3,4]
y1 = [1,8,2,4]
x2 = [1,2,3,8]
y2 = [10,16,13,18]
plt.plot(x2,y2, label='plot_1')
plt.plot(x1, y1, label='plot_2')
## labelling x axis and y axis
plt.xlabel('random nos x- axis')
plt.ylabel('random nos y-axis')
## title of graph
##\n is for pressing enter to write the title in new line
plt.title('rand_x V/S rand_y\ncheck it out!')
## this bring the labels of the plot to frontend as key of graph
plt.legend()
## plt.show() brings the backend processed stuff to forefront
plt.show() | en | 0.780192 | ## plotting x vs y where x,y = [lists] ## plt.plot([x], [y], label) ## label is key of graph ## labelling x axis and y axis ## title of graph ##\n is for pressing enter to write the title in new line ## this bring the labels of the plot to frontend as key of graph ## plt.show() brings the backend processed stuff to forefront | 3.852868 | 4 |
examples/makefig8.py | prudhvibhattiprolu/Zstats | 2 | 6632055 | #This program prints out data to *.dat files, for Fig. 8 in our paper
import numpy as np
from Zstats import Zdisc
#WARNING: Each computation, particularly when *asimov_only* is set to False, takes a lot more time when background uncertainty is non-zero
#And, the computation time increases as s/bhat (*sbyb*) gets smaller, and also when *s* gets larger. Not recommended to run this script on a single cpu if generating data when background uncertainty is non-zero.
#For faster computation, significantly reduce the number of points in *s_array* and/or run multiple batch jobs on a compute cluster.
#X-axis
s_array = np.append(np.arange(0.10,2.00,0.01),np.arange(2.00,10.02,0.02))
s_array = np.append(s_array,np.arange(10.05,100.05,0.05))
#Fig 8: Known background case [left panel]
#Set fractional uncertainty in the background
dbbyb=0
#Y-axis
#Calculate P(Zdisc > 5.0) for discovery case
#*Zcriteria* is set to 5.0 by default for function *Zdisc*
#s/bhat=2
temp1 = [Zdisc(s,s/2,dbbyb*s/2,asimov_only=False)[5] for s in s_array]
#s/bhat=5
temp2 = [Zdisc(s,s/5,dbbyb*s/5,asimov_only=False)[5] for s in s_array]
#s/bhat=10
temp3 = [Zdisc(s,s/10,dbbyb*s/10,asimov_only=False)[5] for s in s_array]
#s/bhat=50
temp4 = [Zdisc(s,s/50,dbbyb*s/50,asimov_only=False)[5] for s in s_array]
#Printing data to a *.dat file
np.savetxt('fig8_disc_dbbyb%s.dat' %(dbbyb),np.transpose([s_array, temp1, temp2, temp3, temp4]),delimiter='\t',header='s \t s/bhat=2 \t s/bhat=5 \t s/bhat=10 \t s/bhat=50',comments='#Fig8: The probability of obtaining a significance Zdisc > 5 in a large number of pseudo-experiments generated for the discovery case, when dbhat/bhat=%s\n#' %(dbbyb))
#Fig 8: Uncertain background case, with dbhat/bhat=0.5 [right panel]
#Set fractional uncertainty in the background
dbbyb=0.5
#Y-axis
#Calculate P(Zdisc > 5.0) for discovery case
#s/bhat=2
temp1 = [Zdisc(s,s/2,dbbyb*s/2,asimov_only=False)[5] for s in s_array]
#s/bhat=5
temp2 = [Zdisc(s,s/5,dbbyb*s/5,asimov_only=False)[5] for s in s_array]
#s/bhat=10
temp3 = [Zdisc(s,s/10,dbbyb*s/10,asimov_only=False)[5] for s in s_array]
#s/bhat=50
temp4 = [Zdisc(s,s/50,dbbyb*s/50,asimov_only=False)[5] for s in s_array]
#Printing data to a *.dat file
np.savetxt('fig8_disc_dbbyb%s.dat' %(dbbyb),np.transpose([s_array, temp1, temp2, temp3, temp4]),delimiter='\t',header='s \t s/bhat=2 \t s/bhat=5 \t s/bhat=10 \t s/bhat=50',comments='#Fig8: The probability of obtaining a significance Zdisc > 5 in a large number of pseudo-experiments generated for the discovery case, when dbhat/bhat=%s\n#' %(dbbyb))
| #This program prints out data to *.dat files, for Fig. 8 in our paper
import numpy as np
from Zstats import Zdisc
#WARNING: Each computation, particularly when *asimov_only* is set to False, takes a lot more time when background uncertainty is non-zero
#And, the computation time increases as s/bhat (*sbyb*) gets smaller, and also when *s* gets larger. Not recommended to run this script on a single cpu if generating data when background uncertainty is non-zero.
#For faster computation, significantly reduce the number of points in *s_array* and/or run multiple batch jobs on a compute cluster.
#X-axis
s_array = np.append(np.arange(0.10,2.00,0.01),np.arange(2.00,10.02,0.02))
s_array = np.append(s_array,np.arange(10.05,100.05,0.05))
#Fig 8: Known background case [left panel]
#Set fractional uncertainty in the background
dbbyb=0
#Y-axis
#Calculate P(Zdisc > 5.0) for discovery case
#*Zcriteria* is set to 5.0 by default for function *Zdisc*
#s/bhat=2
temp1 = [Zdisc(s,s/2,dbbyb*s/2,asimov_only=False)[5] for s in s_array]
#s/bhat=5
temp2 = [Zdisc(s,s/5,dbbyb*s/5,asimov_only=False)[5] for s in s_array]
#s/bhat=10
temp3 = [Zdisc(s,s/10,dbbyb*s/10,asimov_only=False)[5] for s in s_array]
#s/bhat=50
temp4 = [Zdisc(s,s/50,dbbyb*s/50,asimov_only=False)[5] for s in s_array]
#Printing data to a *.dat file
np.savetxt('fig8_disc_dbbyb%s.dat' %(dbbyb),np.transpose([s_array, temp1, temp2, temp3, temp4]),delimiter='\t',header='s \t s/bhat=2 \t s/bhat=5 \t s/bhat=10 \t s/bhat=50',comments='#Fig8: The probability of obtaining a significance Zdisc > 5 in a large number of pseudo-experiments generated for the discovery case, when dbhat/bhat=%s\n#' %(dbbyb))
#Fig 8: Uncertain background case, with dbhat/bhat=0.5 [right panel]
#Set fractional uncertainty in the background
dbbyb=0.5
#Y-axis
#Calculate P(Zdisc > 5.0) for discovery case
#s/bhat=2
temp1 = [Zdisc(s,s/2,dbbyb*s/2,asimov_only=False)[5] for s in s_array]
#s/bhat=5
temp2 = [Zdisc(s,s/5,dbbyb*s/5,asimov_only=False)[5] for s in s_array]
#s/bhat=10
temp3 = [Zdisc(s,s/10,dbbyb*s/10,asimov_only=False)[5] for s in s_array]
#s/bhat=50
temp4 = [Zdisc(s,s/50,dbbyb*s/50,asimov_only=False)[5] for s in s_array]
#Printing data to a *.dat file
np.savetxt('fig8_disc_dbbyb%s.dat' %(dbbyb),np.transpose([s_array, temp1, temp2, temp3, temp4]),delimiter='\t',header='s \t s/bhat=2 \t s/bhat=5 \t s/bhat=10 \t s/bhat=50',comments='#Fig8: The probability of obtaining a significance Zdisc > 5 in a large number of pseudo-experiments generated for the discovery case, when dbhat/bhat=%s\n#' %(dbbyb))
| en | 0.727914 | #This program prints out data to *.dat files, for Fig. 8 in our paper #WARNING: Each computation, particularly when *asimov_only* is set to False, takes a lot more time when background uncertainty is non-zero #And, the computation time increases as s/bhat (*sbyb*) gets smaller, and also when *s* gets larger. Not recommended to run this script on a single cpu if generating data when background uncertainty is non-zero. #For faster computation, significantly reduce the number of points in *s_array* and/or run multiple batch jobs on a compute cluster. #X-axis #Fig 8: Known background case [left panel] #Set fractional uncertainty in the background #Y-axis #Calculate P(Zdisc > 5.0) for discovery case #*Zcriteria* is set to 5.0 by default for function *Zdisc* #s/bhat=2 #s/bhat=5 #s/bhat=10 #s/bhat=50 #Printing data to a *.dat file #' %(dbbyb)) #Fig 8: Uncertain background case, with dbhat/bhat=0.5 [right panel] #Set fractional uncertainty in the background #Y-axis #Calculate P(Zdisc > 5.0) for discovery case #s/bhat=2 #s/bhat=5 #s/bhat=10 #s/bhat=50 #Printing data to a *.dat file #' %(dbbyb)) | 2.118248 | 2 |
var/spack/repos/builtin/packages/uncrustify/package.py | QianJianhua1/spack | 11 | 6632056 | <reponame>QianJianhua1/spack
# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Uncrustify(Package):
"""Source Code Beautifier for C, C++, C#, ObjectiveC, Java, and others."""
homepage = "http://uncrustify.sourceforge.net/"
git = "https://github.com/uncrustify/uncrustify"
url = "https://sourceforge.net/projects/uncrustify/files/uncrustify/uncrustify-0.69/uncrustify-0.69.tar.gz"
maintainers = ['gmaurel']
version('master', branch='master')
version('0.74', commit='62048b')
version('0.73', commit='25b765')
version('0.72', commit='1d3d8f')
version('0.71', commit='64d82f')
version('0.70', commit='51f64d')
version('0.69', commit='a7a8fb')
version('0.68', commit='86bc34')
version('0.67', commit='00321a')
version('0.66', commit='80f549')
version('0.65', commit='905676')
version('0.64', commit='1d7d97')
version('0.63', commit='44ce0f')
version('0.62', commit='5987f2')
version('0.61', sha256='1df0e5a2716e256f0a4993db12f23d10195b3030326fdf2e07f8e6421e172df9')
depends_on('cmake', type='build', when='@0.64:')
depends_on('automake', type='build', when='@0.63')
depends_on('autoconf', type='build', when='@0.63')
@when('@0.64:')
def install(self, spec, prefix):
with working_dir('spack-build', create=True):
cmake('..', *std_cmake_args)
make()
make('install')
@when('@0.63')
def install(self, spec, prefix):
which('bash')('autogen.sh')
configure('--prefix={0}'.format(self.prefix))
make()
make('install')
@when('@:0.62')
def install(self, spec, prefix):
configure('--prefix={0}'.format(self.prefix))
make()
make('install')
patch('uncrustify-includes.patch', when='@0.73')
| # Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Uncrustify(Package):
"""Source Code Beautifier for C, C++, C#, ObjectiveC, Java, and others."""
homepage = "http://uncrustify.sourceforge.net/"
git = "https://github.com/uncrustify/uncrustify"
url = "https://sourceforge.net/projects/uncrustify/files/uncrustify/uncrustify-0.69/uncrustify-0.69.tar.gz"
maintainers = ['gmaurel']
version('master', branch='master')
version('0.74', commit='62048b')
version('0.73', commit='25b765')
version('0.72', commit='1d3d8f')
version('0.71', commit='64d82f')
version('0.70', commit='51f64d')
version('0.69', commit='a7a8fb')
version('0.68', commit='86bc34')
version('0.67', commit='00321a')
version('0.66', commit='80f549')
version('0.65', commit='905676')
version('0.64', commit='1d7d97')
version('0.63', commit='44ce0f')
version('0.62', commit='5987f2')
version('0.61', sha256='1df0e5a2716e256f0a4993db12f23d10195b3030326fdf2e07f8e6421e172df9')
depends_on('cmake', type='build', when='@0.64:')
depends_on('automake', type='build', when='@0.63')
depends_on('autoconf', type='build', when='@0.63')
@when('@0.64:')
def install(self, spec, prefix):
with working_dir('spack-build', create=True):
cmake('..', *std_cmake_args)
make()
make('install')
@when('@0.63')
def install(self, spec, prefix):
which('bash')('autogen.sh')
configure('--prefix={0}'.format(self.prefix))
make()
make('install')
@when('@:0.62')
def install(self, spec, prefix):
configure('--prefix={0}'.format(self.prefix))
make()
make('install')
patch('uncrustify-includes.patch', when='@0.73') | en | 0.700599 | # Copyright 2013-2022 Lawrence Livermore National Security, LLC and other # Spack Project Developers. See the top-level COPYRIGHT file for details. # # SPDX-License-Identifier: (Apache-2.0 OR MIT) Source Code Beautifier for C, C++, C#, ObjectiveC, Java, and others. | 1.541952 | 2 |
coronavirus_cli/main.py | MickaelLopes/Coronavirus_Update_CLI | 0 | 6632057 | <reponame>MickaelLopes/Coronavirus_Update_CLI<filename>coronavirus_cli/main.py
# !/usr/bin/env python3.7
from interface.command import RequestCommand
from cleo import Application
# from datarequest.global_data import GlobalData
# from datarequest.utils import _find_country_row
app = Application()
app.add(RequestCommand())
if __name__ == '__main__':
app.run()
# g = GlobalData()
# print(_find_country_row(g.soup, 'USA')) | # !/usr/bin/env python3.7
from interface.command import RequestCommand
from cleo import Application
# from datarequest.global_data import GlobalData
# from datarequest.utils import _find_country_row
app = Application()
app.add(RequestCommand())
if __name__ == '__main__':
app.run()
# g = GlobalData()
# print(_find_country_row(g.soup, 'USA')) | en | 0.252248 | # !/usr/bin/env python3.7 # from datarequest.global_data import GlobalData # from datarequest.utils import _find_country_row # g = GlobalData() # print(_find_country_row(g.soup, 'USA')) | 2.295131 | 2 |
do_scripts/bot.py | schedutron/chirps | 40 | 6632058 | <reponame>schedutron/chirps<filename>do_scripts/bot.py<gh_stars>10-100
"""Main bot script - bot.py
For the DigitalOcean Tutorial.
"""
# Parts of the standard library
import random
import time
# Installed library
from lxml.html import fromstring
import nltk # Used here to split paragraphs into sentences during scraping.
nltk.download('punkt')
import requests # Used to get the HTML source of a web page to be scraped.
from twitter import OAuth, Twitter
# Local credentials file defined above
import credentials
tokenizer = nltk.data.load('tokenizers/punkt/english.pickle') # This line will be explained soon
oauth = OAuth(
credentials.ACCESS_TOKEN,
credentials.ACCESS_SECRET,
credentials.CONSUMER_KEY,
credentials.CONSUMER_SECRET
)
t = Twitter(auth=oauth)
HEADERS = {
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_5)'
' AppleWebKit/537.36 (KHTML, like Gecko) Cafari/537.36'
}
def extract_paratext(paras):
"""Extracts text from <p> elements and returns a clean, tokenized random
paragraph."""
paras = [para.text_content() for para in paras if para.text_content()]
para = random.choice(paras)
return tokenizer.tokenize(para)
def extract_text(para):
"""Returns a sufficiently-large random text from a tokenized paragraph,
if such text exists. Otherwise, returns None."""
for _ in range(10):
text = random.choice(para)
if text and 60 < len(text) < 210:
return text
return None
def scrape_coursera():
"""Scrapes content from the Coursera blog."""
url = 'https://blog.coursera.org'
r = requests.get(url, headers=HEADERS)
tree = fromstring(r.content)
links = tree.xpath('//div[@class="recent"]//div[@class="title"]/a/@href')
for link in links:
r = requests.get(link, headers=HEADERS)
blog_tree = fromstring(r.content)
paras = blog_tree.xpath('//div[@class="entry-content"]/p')
para = extract_paratext(paras) # Gets a random paragraph.
text = extract_text(para) # Gets a good-enough random text quote.
if not text:
continue
yield '"%s" %s' % (text, link) # This will be passed on to the code that composes our tweet.
def scrape_thenewstack():
"""Scrapes news from thenewstack.io"""
# For some, page may not be fetched without verify=False flag.
r = requests.get('https://thenewstack.io', verify=False)
tree = fromstring(r.content)
links = tree.xpath('//div[@class="normalstory-box"]/header/h2/a/@href')
for link in links:
r = requests.get(link, verify=False)
tree = fromstring(r.content)
paras = tree.xpath('//div[@class="post-content"]/p')
para = extract_paratext(paras)
text = extract_text(para) # Gets a good-enough random text quote.
if not text:
continue
yield '"%s" %s' % (text, link)
def main():
"""Encompasses the main loop of the bot."""
print('Bot started.')
news_funcs = ['scrape_coursera', 'scrape_thenewstack']
news_iterators = [] # A list for the scrapers we defined.
for func in news_funcs:
news_iterators.append(globals()[func]())
while True:
for i, iterator in enumerate(news_iterators):
try:
tweet = next(iterator)
t.statuses.update(status=tweet)
print(tweet, end='\n')
time.sleep(600) # Sleep for 10 minutes.
except StopIteration:
news_iterators[i] = globals()[newsfuncs[i]]()
if __name__ == "__main__": # This checks if the script is called directly.
main()
| """Main bot script - bot.py
For the DigitalOcean Tutorial.
"""
# Parts of the standard library
import random
import time
# Installed library
from lxml.html import fromstring
import nltk # Used here to split paragraphs into sentences during scraping.
nltk.download('punkt')
import requests # Used to get the HTML source of a web page to be scraped.
from twitter import OAuth, Twitter
# Local credentials file defined above
import credentials
tokenizer = nltk.data.load('tokenizers/punkt/english.pickle') # This line will be explained soon
oauth = OAuth(
credentials.ACCESS_TOKEN,
credentials.ACCESS_SECRET,
credentials.CONSUMER_KEY,
credentials.CONSUMER_SECRET
)
t = Twitter(auth=oauth)
HEADERS = {
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_5)'
' AppleWebKit/537.36 (KHTML, like Gecko) Cafari/537.36'
}
def extract_paratext(paras):
"""Extracts text from <p> elements and returns a clean, tokenized random
paragraph."""
paras = [para.text_content() for para in paras if para.text_content()]
para = random.choice(paras)
return tokenizer.tokenize(para)
def extract_text(para):
"""Returns a sufficiently-large random text from a tokenized paragraph,
if such text exists. Otherwise, returns None."""
for _ in range(10):
text = random.choice(para)
if text and 60 < len(text) < 210:
return text
return None
def scrape_coursera():
"""Scrapes content from the Coursera blog."""
url = 'https://blog.coursera.org'
r = requests.get(url, headers=HEADERS)
tree = fromstring(r.content)
links = tree.xpath('//div[@class="recent"]//div[@class="title"]/a/@href')
for link in links:
r = requests.get(link, headers=HEADERS)
blog_tree = fromstring(r.content)
paras = blog_tree.xpath('//div[@class="entry-content"]/p')
para = extract_paratext(paras) # Gets a random paragraph.
text = extract_text(para) # Gets a good-enough random text quote.
if not text:
continue
yield '"%s" %s' % (text, link) # This will be passed on to the code that composes our tweet.
def scrape_thenewstack():
"""Scrapes news from thenewstack.io"""
# For some, page may not be fetched without verify=False flag.
r = requests.get('https://thenewstack.io', verify=False)
tree = fromstring(r.content)
links = tree.xpath('//div[@class="normalstory-box"]/header/h2/a/@href')
for link in links:
r = requests.get(link, verify=False)
tree = fromstring(r.content)
paras = tree.xpath('//div[@class="post-content"]/p')
para = extract_paratext(paras)
text = extract_text(para) # Gets a good-enough random text quote.
if not text:
continue
yield '"%s" %s' % (text, link)
def main():
"""Encompasses the main loop of the bot."""
print('Bot started.')
news_funcs = ['scrape_coursera', 'scrape_thenewstack']
news_iterators = [] # A list for the scrapers we defined.
for func in news_funcs:
news_iterators.append(globals()[func]())
while True:
for i, iterator in enumerate(news_iterators):
try:
tweet = next(iterator)
t.statuses.update(status=tweet)
print(tweet, end='\n')
time.sleep(600) # Sleep for 10 minutes.
except StopIteration:
news_iterators[i] = globals()[newsfuncs[i]]()
if __name__ == "__main__": # This checks if the script is called directly.
main() | en | 0.78533 | Main bot script - bot.py For the DigitalOcean Tutorial. # Parts of the standard library # Installed library # Used here to split paragraphs into sentences during scraping. # Used to get the HTML source of a web page to be scraped. # Local credentials file defined above # This line will be explained soon Extracts text from <p> elements and returns a clean, tokenized random paragraph. Returns a sufficiently-large random text from a tokenized paragraph, if such text exists. Otherwise, returns None. Scrapes content from the Coursera blog. # Gets a random paragraph. # Gets a good-enough random text quote. # This will be passed on to the code that composes our tweet. Scrapes news from thenewstack.io # For some, page may not be fetched without verify=False flag. # Gets a good-enough random text quote. Encompasses the main loop of the bot. # A list for the scrapers we defined. # Sleep for 10 minutes. # This checks if the script is called directly. | 3.386288 | 3 |
scholars/utils/dynamic.py | shirishgoyal/scholars | 138 | 6632059 | <filename>scholars/utils/dynamic.py
from rest_framework import serializers
class DynamicFieldsModelSerializer(serializers.ModelSerializer):
"""
A ModelSerializer that takes an additional `fields` argument that
controls which fields should be displayed.
"""
def __init__(self, *args, **kwargs):
# Don't pass the 'fields' arg up to the superclass
fields = kwargs.pop('fields', None)
# Instantiate the superclass normally
super(DynamicFieldsModelSerializer, self).__init__(*args, **kwargs)
if fields is not None:
# Drop any fields that are not specified in the `fields` argument.
allowed = set(fields)
existing = set(self.fields.keys())
for field_name in existing - allowed:
self.fields.pop(field_name)
| <filename>scholars/utils/dynamic.py
from rest_framework import serializers
class DynamicFieldsModelSerializer(serializers.ModelSerializer):
"""
A ModelSerializer that takes an additional `fields` argument that
controls which fields should be displayed.
"""
def __init__(self, *args, **kwargs):
# Don't pass the 'fields' arg up to the superclass
fields = kwargs.pop('fields', None)
# Instantiate the superclass normally
super(DynamicFieldsModelSerializer, self).__init__(*args, **kwargs)
if fields is not None:
# Drop any fields that are not specified in the `fields` argument.
allowed = set(fields)
existing = set(self.fields.keys())
for field_name in existing - allowed:
self.fields.pop(field_name)
| en | 0.786603 | A ModelSerializer that takes an additional `fields` argument that controls which fields should be displayed. # Don't pass the 'fields' arg up to the superclass # Instantiate the superclass normally # Drop any fields that are not specified in the `fields` argument. | 2.971777 | 3 |
geeksaga/archive/config.py | geekflow/archive | 0 | 6632060 | <filename>geeksaga/archive/config.py
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
.config
~~~~~~~~
:copyright: (c) 2014 by geeksaga.
:license: MIT LICENSE 2.0, see license for more details.
"""
import os
class Config(object):
SITE_ROOT = os.path.abspath(os.path.dirname(__file__))
DB_FILE_PATH= 'resource/database/archive.db'
DB_URL= 'sqlite:///' + os.path.join(SITE_ROOT, DB_FILE_PATH)
TMP_FOLDER = 'resource/tmp/'
UPLOAD_FOLDER = 'resource/upload/'
MAX_CONTENT_LENGTH = 10 * 1024 * 1024
PERMANENT_SESSION_LIFETIME = 60 * 60
SESSION_COOKIE_NAME = 'geeksaga_archive_session'
LOG_LEVEL = 'debug'
LOG_FILE_PATH = 'resource/log/archive.log'
DB_LOG_FLAG = 'True'
ARCHIVE_PATH = os.path.join(SITE_ROOT, 'data')
ARCHIVE_BACKUP_PATH = os.path.join(SITE_ROOT, 'data_backup')
INDEX_PATH = os.path.join(SITE_ROOT, 'resource/whoosh_index') | <filename>geeksaga/archive/config.py
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
.config
~~~~~~~~
:copyright: (c) 2014 by geeksaga.
:license: MIT LICENSE 2.0, see license for more details.
"""
import os
class Config(object):
SITE_ROOT = os.path.abspath(os.path.dirname(__file__))
DB_FILE_PATH= 'resource/database/archive.db'
DB_URL= 'sqlite:///' + os.path.join(SITE_ROOT, DB_FILE_PATH)
TMP_FOLDER = 'resource/tmp/'
UPLOAD_FOLDER = 'resource/upload/'
MAX_CONTENT_LENGTH = 10 * 1024 * 1024
PERMANENT_SESSION_LIFETIME = 60 * 60
SESSION_COOKIE_NAME = 'geeksaga_archive_session'
LOG_LEVEL = 'debug'
LOG_FILE_PATH = 'resource/log/archive.log'
DB_LOG_FLAG = 'True'
ARCHIVE_PATH = os.path.join(SITE_ROOT, 'data')
ARCHIVE_BACKUP_PATH = os.path.join(SITE_ROOT, 'data_backup')
INDEX_PATH = os.path.join(SITE_ROOT, 'resource/whoosh_index') | en | 0.564975 | #!/usr/bin/python # -*- coding: utf-8 -*- .config ~~~~~~~~ :copyright: (c) 2014 by geeksaga. :license: MIT LICENSE 2.0, see license for more details. | 1.902356 | 2 |
setup.py | Gorilla-Lab-SCUT/gorilla-3d | 6 | 6632061 | import os
import os.path as osp
import sys
from glob import glob
from setuptools import dist, setup, find_packages, Extension
from setuptools.command.build_ext import build_ext
try:
import torch
from torch.utils.cpp_extension import CUDAExtension, CppExtension, BuildExtension
EXT_TYPE = "pytorch"
except ModuleNotFoundError:
from Cython.Distutils import build_ext as BuildExtension
print("Skip building ext ops due to the absence of torch.")
def get_requirements(filename="requirements.txt"):
assert osp.exists(filename), f"{filename} not exists"
with open(filename, "r") as f:
content = f.read()
lines = content.split("\n")
requirements_list = list(
filter(lambda x: x != "" and not x.startswith("#"), lines))
return requirements_list
def get_version():
version_file = osp.join("gorilla3d", "version.py")
with open(version_file, "r", encoding="utf-8") as f:
exec(compile(f.read(), version_file, "exec"))
return locals()["__version__"]
def get_sources(module, surfix="*.c*"):
src_dir = osp.join(*module.split("."), "src")
cuda_dir = osp.join(src_dir, "cuda")
cpu_dir = osp.join(src_dir, "cpu")
return glob(osp.join(src_dir, surfix)) + \
glob(osp.join(cuda_dir, surfix)) + \
glob(osp.join(cpu_dir, surfix))
def get_include_dir(module):
include_dir = osp.join(*module.split("."), "include")
if osp.exists(include_dir):
return [osp.abspath(include_dir)]
else:
return []
def make_extension(name, module):
if not torch.cuda.is_available(): return
extersion = CUDAExtension
return extersion(name=".".join([module, name]),
sources=get_sources(module),
include_dirs=get_include_dir(module),
extra_compile_args={
"cxx": ["-g"],
"nvcc": [
"-D__CUDA_NO_HALF_OPERATORS__",
"-D__CUDA_NO_HALF_CONVERSIONS__",
"-D__CUDA_NO_HALF2_OPERATORS__",
],
},
define_macros=[("WITH_CUDA", None)])
def get_extensions():
extensions = []
if torch.cuda.is_available():
extensions = [
make_extension(name="compiling_info",
module="gorilla3d.ops.utils"),
make_extension(name="ball_query_ext",
module="gorilla3d.ops.ball_query"),
make_extension(name="group_points_ext",
module="gorilla3d.ops.group_points"),
make_extension(name="interpolate_ext",
module="gorilla3d.ops.interpolate"),
make_extension(name="furthest_point_sample_ext",
module="gorilla3d.ops.furthest_point_sample"),
make_extension(name="gather_points_ext",
module="gorilla3d.ops.gather_points"),
make_extension(name="chamfer",
module="gorilla3d.ops.chamfer_distance"),
make_extension(name="sparse_interpolate_ext",
module="gorilla3d.ops.sparse_interpolate"),
]
return extensions
if __name__ == "__main__":
setup(name="gorilla3d",
version=get_version(),
author="<NAME>",
author_email="<EMAIL>",
description="3D vision library for Gorilla-Lab using PyTorch",
long_description=open("README.md").read(),
license="MIT",
install_requires=get_requirements(),
packages=find_packages(exclude=["tests"]),
ext_modules=get_extensions(),
cmdclass={"build_ext": BuildExtension},
zip_safe=False)
| import os
import os.path as osp
import sys
from glob import glob
from setuptools import dist, setup, find_packages, Extension
from setuptools.command.build_ext import build_ext
try:
import torch
from torch.utils.cpp_extension import CUDAExtension, CppExtension, BuildExtension
EXT_TYPE = "pytorch"
except ModuleNotFoundError:
from Cython.Distutils import build_ext as BuildExtension
print("Skip building ext ops due to the absence of torch.")
def get_requirements(filename="requirements.txt"):
assert osp.exists(filename), f"{filename} not exists"
with open(filename, "r") as f:
content = f.read()
lines = content.split("\n")
requirements_list = list(
filter(lambda x: x != "" and not x.startswith("#"), lines))
return requirements_list
def get_version():
version_file = osp.join("gorilla3d", "version.py")
with open(version_file, "r", encoding="utf-8") as f:
exec(compile(f.read(), version_file, "exec"))
return locals()["__version__"]
def get_sources(module, surfix="*.c*"):
src_dir = osp.join(*module.split("."), "src")
cuda_dir = osp.join(src_dir, "cuda")
cpu_dir = osp.join(src_dir, "cpu")
return glob(osp.join(src_dir, surfix)) + \
glob(osp.join(cuda_dir, surfix)) + \
glob(osp.join(cpu_dir, surfix))
def get_include_dir(module):
include_dir = osp.join(*module.split("."), "include")
if osp.exists(include_dir):
return [osp.abspath(include_dir)]
else:
return []
def make_extension(name, module):
if not torch.cuda.is_available(): return
extersion = CUDAExtension
return extersion(name=".".join([module, name]),
sources=get_sources(module),
include_dirs=get_include_dir(module),
extra_compile_args={
"cxx": ["-g"],
"nvcc": [
"-D__CUDA_NO_HALF_OPERATORS__",
"-D__CUDA_NO_HALF_CONVERSIONS__",
"-D__CUDA_NO_HALF2_OPERATORS__",
],
},
define_macros=[("WITH_CUDA", None)])
def get_extensions():
extensions = []
if torch.cuda.is_available():
extensions = [
make_extension(name="compiling_info",
module="gorilla3d.ops.utils"),
make_extension(name="ball_query_ext",
module="gorilla3d.ops.ball_query"),
make_extension(name="group_points_ext",
module="gorilla3d.ops.group_points"),
make_extension(name="interpolate_ext",
module="gorilla3d.ops.interpolate"),
make_extension(name="furthest_point_sample_ext",
module="gorilla3d.ops.furthest_point_sample"),
make_extension(name="gather_points_ext",
module="gorilla3d.ops.gather_points"),
make_extension(name="chamfer",
module="gorilla3d.ops.chamfer_distance"),
make_extension(name="sparse_interpolate_ext",
module="gorilla3d.ops.sparse_interpolate"),
]
return extensions
if __name__ == "__main__":
setup(name="gorilla3d",
version=get_version(),
author="<NAME>",
author_email="<EMAIL>",
description="3D vision library for Gorilla-Lab using PyTorch",
long_description=open("README.md").read(),
license="MIT",
install_requires=get_requirements(),
packages=find_packages(exclude=["tests"]),
ext_modules=get_extensions(),
cmdclass={"build_ext": BuildExtension},
zip_safe=False)
| none | 1 | 1.988872 | 2 |
|
books/migrations/0003_remove_book_pages_book_page_count_and_more.py | nixonsparrow/BookWorm | 1 | 6632062 | # Generated by Django 4.0 on 2022-01-04 09:00
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('books', '0002_book_author_book_cover_link_book_isbn_book_isbn_10_and_more'),
]
operations = [
migrations.RemoveField(
model_name='book',
name='pages',
),
migrations.AddField(
model_name='book',
name='page_count',
field=models.IntegerField(blank=True, default=None, null=True, verbose_name='Pages'),
),
migrations.AlterField(
model_name='book',
name='cover_link',
field=models.CharField(blank=True, default='', max_length=150, null=True, verbose_name='Link to the cover'),
),
migrations.AlterField(
model_name='book',
name='isbn_10',
field=models.CharField(blank=True, default='', max_length=10, null=True, verbose_name='ISBN 10'),
),
migrations.AlterField(
model_name='book',
name='language',
field=models.CharField(blank=True, default='', max_length=100, null=True, verbose_name='Language'),
),
migrations.AlterField(
model_name='book',
name='pub_date',
field=models.CharField(blank=True, default='', max_length=100, null=True, verbose_name='Publication Date'),
),
migrations.AlterField(
model_name='book',
name='title',
field=models.CharField(default='', max_length=100, verbose_name='Title'),
),
]
| # Generated by Django 4.0 on 2022-01-04 09:00
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('books', '0002_book_author_book_cover_link_book_isbn_book_isbn_10_and_more'),
]
operations = [
migrations.RemoveField(
model_name='book',
name='pages',
),
migrations.AddField(
model_name='book',
name='page_count',
field=models.IntegerField(blank=True, default=None, null=True, verbose_name='Pages'),
),
migrations.AlterField(
model_name='book',
name='cover_link',
field=models.CharField(blank=True, default='', max_length=150, null=True, verbose_name='Link to the cover'),
),
migrations.AlterField(
model_name='book',
name='isbn_10',
field=models.CharField(blank=True, default='', max_length=10, null=True, verbose_name='ISBN 10'),
),
migrations.AlterField(
model_name='book',
name='language',
field=models.CharField(blank=True, default='', max_length=100, null=True, verbose_name='Language'),
),
migrations.AlterField(
model_name='book',
name='pub_date',
field=models.CharField(blank=True, default='', max_length=100, null=True, verbose_name='Publication Date'),
),
migrations.AlterField(
model_name='book',
name='title',
field=models.CharField(default='', max_length=100, verbose_name='Title'),
),
]
| en | 0.833217 | # Generated by Django 4.0 on 2022-01-04 09:00 | 1.713092 | 2 |
movies/views.py | vijay0707/REST-API-DRF | 0 | 6632063 | from django.shortcuts import render
from rest_framework import viewsets
from .serializers import MovieSerializer
from .models import Moviedata
# Create your views here.
class MovieViewSet(viewsets.ModelViewSet):
queryset = Moviedata.objects.all()
serializer_class = MovieSerializer
class ActionViewSet(viewsets.ModelViewSet):
queryset = Moviedata.objects.filter(typ='action')
serializer_class = MovieSerializer
class ComedyViewSet(viewsets.ModelViewSet):
queryset = Moviedata.objects.filter(typ='comedy')
serializer_class = MovieSerializer | from django.shortcuts import render
from rest_framework import viewsets
from .serializers import MovieSerializer
from .models import Moviedata
# Create your views here.
class MovieViewSet(viewsets.ModelViewSet):
queryset = Moviedata.objects.all()
serializer_class = MovieSerializer
class ActionViewSet(viewsets.ModelViewSet):
queryset = Moviedata.objects.filter(typ='action')
serializer_class = MovieSerializer
class ComedyViewSet(viewsets.ModelViewSet):
queryset = Moviedata.objects.filter(typ='comedy')
serializer_class = MovieSerializer | en | 0.968116 | # Create your views here. | 2.01091 | 2 |
abtools/phylogeny/tree.py | menis/abtools | 0 | 6632064 | #!/usr/bin/python
# filename: tree.py
#
# Copyright (c) 2015 <NAME>
# License: The MIT license (http://opensource.org/licenses/MIT)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software
# and associated documentation files (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge, publish, distribute,
# sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or
# substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
import os
import subprocess as sp
import ete3
def make_tree(alignment, timepoints, delimiter, is_aa, scale, branch_vert_margin,
fontsize, show_name, tree_orientation, show_scale=False):
'''
Builds a tree file (using FastTree) from a sequence alignment in FASTA format
Input
path to a FASTA-formatted sequence alignment
Output
path to a Newick-formatted tree file
'''
tree = alignment.replace('_aligned.aln', '_tree.nw')
tree = fast_tree(alignment, tree, is_aa)
make_figure(tree, timepoints, delimiter, scale, branch_vert_margin,
fontsize, show_name, tree_orientation, show_scale=show_scale)
def fast_tree(alignment, tree, is_aa, show_output=False):
if is_aa:
ft_cmd = 'fasttree {} > {}'.format(alignment, tree)
else:
ft_cmd = 'fasttree -nt {} > {}'.format(alignment, tree)
ft = sp.Popen(ft_cmd, stdout=sp.PIPE, stderr=sp.PIPE, shell=True)
stdout, stderr = ft.communicate()
if show_output:
print(ft_cmd)
print(stdout)
print(stderr)
return tree
def make_figure(tree, timepoints, delimiter, scale, branch_vert_margin,
fontsize, show_name, tree_orientation, show_scale=False):
fig = tree.replace('_tree.nw', '_tree.pdf')
orders = {tp.name: tp.order for tp in timepoints}
colors = {tp.name: tp.color for tp in timepoints}
# settins for name showing
if show_name == 'none':
show_name = []
if show_name == 'all':
show_name = ['mab', 'root', 'input']
elif show_name == 'no-root':
show_name = ['input', 'mab']
elif type(show_name) in [str, unicode]:
show_name = [show_name, ]
# make the tree
t = ete2.Tree(tree)
t.set_outgroup(t&"root")
# style the nodes based on timepoint
for node in t.traverse():
earliest = get_earliest_leaf(node.get_leaf_names(), orders, delimiter)
color = colors[earliest]
node_type = get_node_type(node.name)
style = ete2.NodeStyle()
style['size'] = 0
style['vt_line_width'] = 1.0
style['hz_line_width'] = 1.0
style['vt_line_color'] = color
style['hz_line_color'] = color
style['vt_line_type'] = 0
style['hz_line_type'] = 0
if node_type in show_name:
if node_type in ['mab', 'input']:
name = ' ' + delimiter.join(node.name.split(delimiter)[1:])
else:
name = ' ' + node.name
tf = ete2.TextFace(name)
tf.fsize = fontsize
node.add_face(tf, column=0)
style['fgcolor'] = '#000000'
node.set_style(style)
# style the full tree
# root = (t&"root")
# nearest_to_root, distance = root.get_closest_leaf()
# root_node = t.get_common_ancestor(root, nearest_to_root)
t.dist = 0
ts = ete2.TreeStyle()
ts.orientation = tree_orientation
ts.show_leaf_name = False
if scale:
ts.scale = int(scale)
if branch_vert_margin:
ts.branch_vertical_margin = float(branch_vert_margin)
ts.show_scale = False
# ladderize
t.ladderize()
# render the tree
t.render(fig, tree_style=ts)
def get_node_type(node_name):
if node_name == 'root':
return 'root'
if node_name.startswith('mab'):
return 'mab'
if node_name == 'NoName':
return 'inner'
return 'input'
def get_earliest_leaf(leaves, order, delimiter):
counts = {}
for leaf in leaves:
tp = leaf.split(delimiter)[0]
counts[tp] = counts[tp] + 1 if tp in counts else 1
total = sum(counts.values())
if 'root' in counts:
return 'root'
timepoints = sorted(counts.keys(), key=lambda x: order[x])
for tp in timepoints:
if 100. * counts[tp] / total >= 5:
return tp
| #!/usr/bin/python
# filename: tree.py
#
# Copyright (c) 2015 <NAME>
# License: The MIT license (http://opensource.org/licenses/MIT)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software
# and associated documentation files (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge, publish, distribute,
# sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or
# substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
import os
import subprocess as sp
import ete3
def make_tree(alignment, timepoints, delimiter, is_aa, scale, branch_vert_margin,
fontsize, show_name, tree_orientation, show_scale=False):
'''
Builds a tree file (using FastTree) from a sequence alignment in FASTA format
Input
path to a FASTA-formatted sequence alignment
Output
path to a Newick-formatted tree file
'''
tree = alignment.replace('_aligned.aln', '_tree.nw')
tree = fast_tree(alignment, tree, is_aa)
make_figure(tree, timepoints, delimiter, scale, branch_vert_margin,
fontsize, show_name, tree_orientation, show_scale=show_scale)
def fast_tree(alignment, tree, is_aa, show_output=False):
if is_aa:
ft_cmd = 'fasttree {} > {}'.format(alignment, tree)
else:
ft_cmd = 'fasttree -nt {} > {}'.format(alignment, tree)
ft = sp.Popen(ft_cmd, stdout=sp.PIPE, stderr=sp.PIPE, shell=True)
stdout, stderr = ft.communicate()
if show_output:
print(ft_cmd)
print(stdout)
print(stderr)
return tree
def make_figure(tree, timepoints, delimiter, scale, branch_vert_margin,
fontsize, show_name, tree_orientation, show_scale=False):
fig = tree.replace('_tree.nw', '_tree.pdf')
orders = {tp.name: tp.order for tp in timepoints}
colors = {tp.name: tp.color for tp in timepoints}
# settins for name showing
if show_name == 'none':
show_name = []
if show_name == 'all':
show_name = ['mab', 'root', 'input']
elif show_name == 'no-root':
show_name = ['input', 'mab']
elif type(show_name) in [str, unicode]:
show_name = [show_name, ]
# make the tree
t = ete2.Tree(tree)
t.set_outgroup(t&"root")
# style the nodes based on timepoint
for node in t.traverse():
earliest = get_earliest_leaf(node.get_leaf_names(), orders, delimiter)
color = colors[earliest]
node_type = get_node_type(node.name)
style = ete2.NodeStyle()
style['size'] = 0
style['vt_line_width'] = 1.0
style['hz_line_width'] = 1.0
style['vt_line_color'] = color
style['hz_line_color'] = color
style['vt_line_type'] = 0
style['hz_line_type'] = 0
if node_type in show_name:
if node_type in ['mab', 'input']:
name = ' ' + delimiter.join(node.name.split(delimiter)[1:])
else:
name = ' ' + node.name
tf = ete2.TextFace(name)
tf.fsize = fontsize
node.add_face(tf, column=0)
style['fgcolor'] = '#000000'
node.set_style(style)
# style the full tree
# root = (t&"root")
# nearest_to_root, distance = root.get_closest_leaf()
# root_node = t.get_common_ancestor(root, nearest_to_root)
t.dist = 0
ts = ete2.TreeStyle()
ts.orientation = tree_orientation
ts.show_leaf_name = False
if scale:
ts.scale = int(scale)
if branch_vert_margin:
ts.branch_vertical_margin = float(branch_vert_margin)
ts.show_scale = False
# ladderize
t.ladderize()
# render the tree
t.render(fig, tree_style=ts)
def get_node_type(node_name):
if node_name == 'root':
return 'root'
if node_name.startswith('mab'):
return 'mab'
if node_name == 'NoName':
return 'inner'
return 'input'
def get_earliest_leaf(leaves, order, delimiter):
counts = {}
for leaf in leaves:
tp = leaf.split(delimiter)[0]
counts[tp] = counts[tp] + 1 if tp in counts else 1
total = sum(counts.values())
if 'root' in counts:
return 'root'
timepoints = sorted(counts.keys(), key=lambda x: order[x])
for tp in timepoints:
if 100. * counts[tp] / total >= 5:
return tp
| en | 0.751219 | #!/usr/bin/python # filename: tree.py # # Copyright (c) 2015 <NAME> # License: The MIT license (http://opensource.org/licenses/MIT) # # Permission is hereby granted, free of charge, to any person obtaining a copy of this software # and associated documentation files (the "Software"), to deal in the Software without restriction, # including without limitation the rights to use, copy, modify, merge, publish, distribute, # sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all copies or # substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING # BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, # DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. # Builds a tree file (using FastTree) from a sequence alignment in FASTA format Input path to a FASTA-formatted sequence alignment Output path to a Newick-formatted tree file # settins for name showing # make the tree # style the nodes based on timepoint # style the full tree # root = (t&"root") # nearest_to_root, distance = root.get_closest_leaf() # root_node = t.get_common_ancestor(root, nearest_to_root) # ladderize # render the tree | 2.347472 | 2 |
test/testLine.py | kholohan/ud953-linear-algebra | 0 | 6632065 | <gh_stars>0
# -*- coding: utf-8 -*-
# Author: github.com/kholohan
import unittest
from vector import Vector
from line import Line
class LineTest(unittest.TestCase):
def test_is_equal(self):
# line 1: 4.046x + 2.836y = 1.21
# line 2: 10.115x + 7.09y = 3.025
line1 = Line(Vector([4.046, 2.836]), 1.21)
line2 = Line(Vector([10.115, 7.09]), 3.025)
result = line1 == line2
self.assertTrue(result)
def test_is_equal2(self):
# line 1: 7.204x + 3.182y = 8.68
# line 2: 8.172x + 4.114y = 9.883
line1 = Line(Vector([7.204, 3.182]), 8.68)
line2 = Line(Vector([8.172, 4.114]), 9.883)
result = line1 == line2
self.assertFalse(result)
def test_is_equal3(self):
# line 1: 1.182x + 5.562y = 6.744
# line 2: 1.773x + 8.343y = 9.525
line1 = Line(Vector([1.182, 5.562]), 6.744)
line2 = Line(Vector([1.773, 8.343]), 9.525)
result = line1 == line2
self.assertFalse(result)
def test_is_parallel1(self):
# line 1: 4.046x + 2.836y = 1.21
# line 2: 10.115x + 7.09y = 3.025
line1 = Line(Vector([4.046, 2.836]), 1.21)
line2 = Line(Vector([10.115, 7.09]), 3.025)
result = line1.is_parallel(line2)
self.assertTrue(result)
def test_is_parallel2(self):
# line 1: 7.204x + 3.182y = 8.68
# line 2: 8.172x + 4.114y = 9.883
line1 = Line(Vector([7.204, 3.182]), 8.68)
line2 = Line(Vector([8.172, 4.114]), 9.883)
result = line1.is_parallel(line2)
self.assertFalse(result)
def test_is_parallel3(self):
# line 1: 1.182x + 5.562y = 6.744
# line 2: 1.773x + 8.343y = 9.525
line1 = Line(Vector([1.182, 5.562]), 6.744)
line2 = Line(Vector([1.773, 8.343]), 9.525)
result = line1.is_parallel(line2)
self.assertTrue(result)
def test_intersection(self):
# line 1: 4.046x + 2.836y = 1.21
# line 2: 10.115x + 7.09y = 3.025
line1 = Line(Vector([4.046, 2.836]), 1.21)
line2 = Line(Vector([10.115, 7.09]), 3.025)
result = line1.intersection(line2)
self.assertEquals(result, line1.normal_vector)
def test_intersection2(self):
# line 1: 7.204x + 3.182y = 8.68
# line 2: 8.172x + 4.114y = 9.883
line1 = Line(Vector([7.204, 3.182]), 8.68)
line2 = Line(Vector([8.172, 4.114]), 9.883)
result = line1.intersection(line2)
answer = Vector([1.1727766354646414, 0.07269551166333184])
self.assertEquals(result, answer)
def test_intersection3(self):
# line 1: 1.182x + 5.562y = 6.744
# line 2: 1.773x + 8.343y = 9.525
line1 = Line(Vector([1.182, 5.562]), 6.744)
line2 = Line(Vector([1.773, 8.343]), 9.525)
result = line1.intersection(line2)
self.assertFalse(result)
def test_equals(self):
# line 1: 4.046x + 2.836y = 1.21
# line 2: 10.115x + 7.09y = 3.025
line1 = Line(Vector([4.046, 2.836]), 1.21)
line2 = Line(Vector([10.115, 7.09]), 3.025)
result = line1 == line2
self.assertTrue(result)
def test_equals2(self):
# line 1: 7.204x + 3.182y = 8.68
# line 2: 8.172x + 4.114y = 9.883
line1 = Line(Vector([7.204, 3.182]), 8.68)
line2 = Line(Vector([8.172, 4.114]), 9.883)
result = line1 == line2
self.assertFalse(result)
def test_equals3(self):
# line 1: 1.182x + 5.562y = 6.744
# line 2: 1.773x + 8.343y = 9.525
line1 = Line(Vector([1.182, 5.562]), 6.744)
line2 = Line(Vector([1.773, 8.343]), 9.525)
result = line1 == line2
self.assertFalse(result) | # -*- coding: utf-8 -*-
# Author: github.com/kholohan
import unittest
from vector import Vector
from line import Line
class LineTest(unittest.TestCase):
def test_is_equal(self):
# line 1: 4.046x + 2.836y = 1.21
# line 2: 10.115x + 7.09y = 3.025
line1 = Line(Vector([4.046, 2.836]), 1.21)
line2 = Line(Vector([10.115, 7.09]), 3.025)
result = line1 == line2
self.assertTrue(result)
def test_is_equal2(self):
# line 1: 7.204x + 3.182y = 8.68
# line 2: 8.172x + 4.114y = 9.883
line1 = Line(Vector([7.204, 3.182]), 8.68)
line2 = Line(Vector([8.172, 4.114]), 9.883)
result = line1 == line2
self.assertFalse(result)
def test_is_equal3(self):
# line 1: 1.182x + 5.562y = 6.744
# line 2: 1.773x + 8.343y = 9.525
line1 = Line(Vector([1.182, 5.562]), 6.744)
line2 = Line(Vector([1.773, 8.343]), 9.525)
result = line1 == line2
self.assertFalse(result)
def test_is_parallel1(self):
# line 1: 4.046x + 2.836y = 1.21
# line 2: 10.115x + 7.09y = 3.025
line1 = Line(Vector([4.046, 2.836]), 1.21)
line2 = Line(Vector([10.115, 7.09]), 3.025)
result = line1.is_parallel(line2)
self.assertTrue(result)
def test_is_parallel2(self):
# line 1: 7.204x + 3.182y = 8.68
# line 2: 8.172x + 4.114y = 9.883
line1 = Line(Vector([7.204, 3.182]), 8.68)
line2 = Line(Vector([8.172, 4.114]), 9.883)
result = line1.is_parallel(line2)
self.assertFalse(result)
def test_is_parallel3(self):
# line 1: 1.182x + 5.562y = 6.744
# line 2: 1.773x + 8.343y = 9.525
line1 = Line(Vector([1.182, 5.562]), 6.744)
line2 = Line(Vector([1.773, 8.343]), 9.525)
result = line1.is_parallel(line2)
self.assertTrue(result)
def test_intersection(self):
# line 1: 4.046x + 2.836y = 1.21
# line 2: 10.115x + 7.09y = 3.025
line1 = Line(Vector([4.046, 2.836]), 1.21)
line2 = Line(Vector([10.115, 7.09]), 3.025)
result = line1.intersection(line2)
self.assertEquals(result, line1.normal_vector)
def test_intersection2(self):
# line 1: 7.204x + 3.182y = 8.68
# line 2: 8.172x + 4.114y = 9.883
line1 = Line(Vector([7.204, 3.182]), 8.68)
line2 = Line(Vector([8.172, 4.114]), 9.883)
result = line1.intersection(line2)
answer = Vector([1.1727766354646414, 0.07269551166333184])
self.assertEquals(result, answer)
def test_intersection3(self):
# line 1: 1.182x + 5.562y = 6.744
# line 2: 1.773x + 8.343y = 9.525
line1 = Line(Vector([1.182, 5.562]), 6.744)
line2 = Line(Vector([1.773, 8.343]), 9.525)
result = line1.intersection(line2)
self.assertFalse(result)
def test_equals(self):
# line 1: 4.046x + 2.836y = 1.21
# line 2: 10.115x + 7.09y = 3.025
line1 = Line(Vector([4.046, 2.836]), 1.21)
line2 = Line(Vector([10.115, 7.09]), 3.025)
result = line1 == line2
self.assertTrue(result)
def test_equals2(self):
# line 1: 7.204x + 3.182y = 8.68
# line 2: 8.172x + 4.114y = 9.883
line1 = Line(Vector([7.204, 3.182]), 8.68)
line2 = Line(Vector([8.172, 4.114]), 9.883)
result = line1 == line2
self.assertFalse(result)
def test_equals3(self):
# line 1: 1.182x + 5.562y = 6.744
# line 2: 1.773x + 8.343y = 9.525
line1 = Line(Vector([1.182, 5.562]), 6.744)
line2 = Line(Vector([1.773, 8.343]), 9.525)
result = line1 == line2
self.assertFalse(result) | en | 0.611746 | # -*- coding: utf-8 -*- # Author: github.com/kholohan # line 1: 4.046x + 2.836y = 1.21 # line 2: 10.115x + 7.09y = 3.025 # line 1: 7.204x + 3.182y = 8.68 # line 2: 8.172x + 4.114y = 9.883 # line 1: 1.182x + 5.562y = 6.744 # line 2: 1.773x + 8.343y = 9.525 # line 1: 4.046x + 2.836y = 1.21 # line 2: 10.115x + 7.09y = 3.025 # line 1: 7.204x + 3.182y = 8.68 # line 2: 8.172x + 4.114y = 9.883 # line 1: 1.182x + 5.562y = 6.744 # line 2: 1.773x + 8.343y = 9.525 # line 1: 4.046x + 2.836y = 1.21 # line 2: 10.115x + 7.09y = 3.025 # line 1: 7.204x + 3.182y = 8.68 # line 2: 8.172x + 4.114y = 9.883 # line 1: 1.182x + 5.562y = 6.744 # line 2: 1.773x + 8.343y = 9.525 # line 1: 4.046x + 2.836y = 1.21 # line 2: 10.115x + 7.09y = 3.025 # line 1: 7.204x + 3.182y = 8.68 # line 2: 8.172x + 4.114y = 9.883 # line 1: 1.182x + 5.562y = 6.744 # line 2: 1.773x + 8.343y = 9.525 | 3.291733 | 3 |
python/tvm/relay/op/image/_image.py | shengxinhu/tvm | 4,640 | 6632066 | <gh_stars>1000+
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, unused-argument
"""Backend compiler related feature registration"""
from __future__ import absolute_import
from tvm.te.hybrid import script
from tvm.runtime import convert
from tvm import topi
from tvm.topi.utils import get_const_tuple
from .. import op as reg
from .. import strategy
from ..op import OpPattern
from .image import resize2d
# resize
@reg.register_compute("image.resize1d")
def compute_resize1d(attrs, inputs, out_type):
"""compute definition for resize1d op"""
size = attrs.size
roi = attrs.roi
layout = attrs.layout
method = attrs.method
coord_trans = attrs.coordinate_transformation_mode
rounding_method = attrs.rounding_method
cubic_alpha = attrs.cubic_alpha
cubic_exclude = attrs.cubic_exclude
extrapolation_value = attrs.extrapolation_value
out_dtype = attrs.out_dtype
return [
topi.image.resize1d(
inputs[0],
roi,
size,
layout,
method,
coord_trans,
rounding_method,
cubic_alpha,
cubic_exclude,
extrapolation_value,
out_dtype,
)
]
reg.register_injective_schedule("image.resize1d")
@reg.register_convert_op_layout("image.resize1d")
def convert_image_resize1d(attrs, inputs, tinfos, desired_layouts):
"""Convert Layout pass registration for image resize1d op.
Parameters
----------
attrs : tvm.ir.Attrs
Attributes of current resize op
inputs : list of tvm.relay.Expr
The args of the Relay expr to be legalized
tinfos : list of types
List of input and output types
desired_layouts : list of layout strings
List of layouts defining our desired
layout for the data input.
Returns
-------
result : tvm.relay.Expr
The transformed expr
"""
new_attrs = dict(attrs)
assert len(desired_layouts) == 1, "Only one desired layout is expected"
desired_layout = str(desired_layouts[0])
assert desired_layout != "default", "Layout cannot be default"
new_attrs["layout"] = desired_layout
return resize1d(*inputs, **new_attrs)
@script
def _resize1d_shape_func(image_shape, size, batch_axis, width_axis, channel_axis):
out = output_tensor((3,), "int64")
out[batch_axis] = int64(image_shape[0])
out[width_axis] = int64(size[1])
out[channel_axis] = image_shape[channel_axis]
return out
@reg.register_shape_func("image.resize1d", False)
def resize1d_shape_func(attrs, inputs, _):
"""
Shape function for resize2d op.
"""
layout = attrs.layout
width_axis = channel_axis = 1
for i, letter in enumerate(layout):
if letter == "N":
batch_axis = i
if letter == "W":
width_axis = i
if letter == "C":
channel_axis = i
size = get_const_tuple(attrs.size)
return [
_resize1d_shape_func(
inputs[0],
convert(size),
convert(batch_axis),
convert(width_axis),
convert(channel_axis),
)
]
@reg.register_compute("image.resize2d")
def compute_resize2d(attrs, inputs, out_type):
"""compute definition for resize2d op"""
size = attrs.size
roi = attrs.roi
layout = attrs.layout
method = attrs.method
coord_trans = attrs.coordinate_transformation_mode
rounding_method = attrs.rounding_method
cubic_alpha = attrs.cubic_alpha
cubic_exclude = attrs.cubic_exclude
extrapolation_value = attrs.extrapolation_value
out_dtype = attrs.out_dtype
return [
topi.image.resize2d(
inputs[0],
roi,
size,
layout,
method,
coord_trans,
rounding_method,
cubic_alpha,
cubic_exclude,
extrapolation_value,
out_dtype,
)
]
reg.register_injective_schedule("image.resize2d")
@reg.register_convert_op_layout("image.resize2d")
def convert_image_resize2d(attrs, inputs, tinfos, desired_layouts):
"""Convert Layout pass registration for image resize2d op.
Parameters
----------
attrs : tvm.ir.Attrs
Attributes of current resize op
inputs : list of tvm.relay.Expr
The args of the Relay expr to be legalized
tinfos : list of types
List of input and output types
desired_layouts : list of layout strings
List of layouts defining our desired
layout for the data input.
Returns
-------
result : tvm.relay.Expr
The transformed expr
"""
new_attrs = dict(attrs)
assert len(desired_layouts) == 1, "Only one desired layout is expected"
desired_layout = str(desired_layouts[0])
assert desired_layout != "default", "Layout cannot be default"
new_attrs["layout"] = desired_layout
return resize2d(*inputs, **new_attrs)
@script
def _resize2d_shape_func(image_shape, size, batch_axis, height_axis, width_axis, channel_axis):
out = output_tensor((4,), "int64")
out[batch_axis] = int64(image_shape[0])
out[height_axis] = int64(size[0])
out[width_axis] = int64(size[1])
out[channel_axis] = image_shape[channel_axis]
return out
@reg.register_shape_func("image.resize2d", False)
def resize2d_shape_func(attrs, inputs, _):
"""
Shape function for resize2d op.
"""
layout = attrs.layout
height_axis = width_axis = channel_axis = 1
for i, letter in enumerate(layout):
if letter == "N":
batch_axis = i
if letter == "H":
height_axis = i
if letter == "W":
width_axis = i
if letter == "C":
channel_axis = i
size = get_const_tuple(attrs.size)
return [
_resize2d_shape_func(
inputs[0],
convert(size),
convert(batch_axis),
convert(height_axis),
convert(width_axis),
convert(channel_axis),
)
]
@reg.register_compute("image.resize3d")
def compute_resize3d(attrs, inputs, out_type):
"""compute definition for resize3d op"""
size = attrs.size
roi = attrs.roi
layout = attrs.layout
method = attrs.method
coord_trans = attrs.coordinate_transformation_mode
rounding_method = attrs.rounding_method
cubic_alpha = attrs.cubic_alpha
cubic_exclude = attrs.cubic_exclude
extrapolation_value = attrs.extrapolation_value
out_dtype = attrs.out_dtype
return [
topi.image.resize3d(
inputs[0],
roi,
size,
layout,
method,
coord_trans,
rounding_method,
cubic_alpha,
cubic_exclude,
extrapolation_value,
out_dtype,
)
]
reg.register_injective_schedule("image.resize3d")
# crop and resize
@reg.register_compute("image.crop_and_resize")
def compute_crop_and_resize(attrs, inputs, out_type):
crop_size = attrs.crop_size
layout = attrs.layout
method = attrs.method
extrapolation_value = attrs.extrapolation_value
out_dtype = attrs.out_dtype
return [
topi.image.crop_and_resize(
inputs[0],
inputs[1],
inputs[2],
crop_size,
layout,
method,
extrapolation_value,
out_dtype,
)
]
reg.register_injective_schedule("image.crop_and_resize")
@script
def _crop_and_resize_func(
image_shape, boxes_shape, crop_size, height_axis, width_axis, channel_axis
):
out = output_tensor((4,), "int64")
out[0] = boxes_shape[0]
out[height_axis] = int64(crop_size[0])
out[width_axis] = int64(crop_size[1])
out[channel_axis] = image_shape[channel_axis]
return out
@reg.register_shape_func("image.crop_and_resize", False)
def crop_and_resize_func(attrs, inputs, _):
"""
Shape function for crop_and_resize op.
"""
layout = attrs.layout
height_axis = width_axis = channel_axis = 1
for i, letter in enumerate(layout):
if letter == "H":
height_axis = i
if letter == "W":
width_axis = i
if letter == "C":
channel_axis = i
crop_size = get_const_tuple(attrs.crop_size)
return [
_crop_and_resize_func(
inputs[0],
inputs[1],
convert(crop_size),
convert(height_axis),
convert(width_axis),
convert(channel_axis),
)
]
# dilation2d
reg.register_strategy("image.dilation2d", strategy.dilation2d_strategy)
reg.register_pattern("image.dilation2d", OpPattern.OUT_ELEMWISE_FUSABLE)
# affine_grid
@reg.register_compute("image.affine_grid")
def compute_affine_grid(attrs, inputs, out_dtype):
target_shape = get_const_tuple(attrs.target_shape)
return [topi.image.affine_grid(inputs[0], target_shape)]
reg.register_injective_schedule("image.affine_grid")
@script
def _affine_grid_func(data, target_shape):
out = output_tensor((4,), "int64")
out[0] = int64(data[0])
out[1] = int64(2)
out[2] = int64(target_shape[0])
out[3] = int64(target_shape[1])
return out
@reg.register_shape_func("image.affine_grid", False)
def affine_grid_func(attrs, inputs, _):
"""
Shape function for affine_grid op.
"""
target_shape = get_const_tuple(attrs.target_shape)
return [_affine_grid_func(inputs[0], convert(target_shape))]
# grid_sample
@reg.register_compute("image.grid_sample")
def compute_grid_sample(attrs, inputs, out_dtype):
method = attrs.method
layout = attrs.layout
padding_mode = attrs.padding_mode
align_corners = attrs.align_corners
return [
topi.image.grid_sample(inputs[0], inputs[1], method, layout, padding_mode, align_corners)
]
reg.register_injective_schedule("image.grid_sample")
@script
def _grid_sample_func_nchw(data, grid):
out = output_tensor((4,), "int64")
out[0] = int64(data[0])
out[1] = int64(data[1])
out[2] = int64(grid[2])
out[3] = int64(grid[3])
return out
@script
def _grid_sample_func_ncdhw(data, grid):
out = output_tensor((5,), "int64")
out[0] = int64(data[0])
out[1] = int64(data[1])
out[2] = int64(grid[2])
out[3] = int64(grid[3])
out[4] = int64(grid[4])
return out
@reg.register_shape_func("image.grid_sample", False)
def grid_sample_func(attrs, inputs, _):
"""
Shape function for grid_sample op.
"""
if attrs.layout == "NCHW":
script_func = _grid_sample_func_nchw
elif attrs.layout == "NCDHW":
script_func = _grid_sample_func_ncdhw
else:
msg = f"layout {attrs.layout} is not supported"
raise ValueError(msg)
return [script_func(inputs[0], inputs[1])]
| # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, unused-argument
"""Backend compiler related feature registration"""
from __future__ import absolute_import
from tvm.te.hybrid import script
from tvm.runtime import convert
from tvm import topi
from tvm.topi.utils import get_const_tuple
from .. import op as reg
from .. import strategy
from ..op import OpPattern
from .image import resize2d
# resize
@reg.register_compute("image.resize1d")
def compute_resize1d(attrs, inputs, out_type):
"""compute definition for resize1d op"""
size = attrs.size
roi = attrs.roi
layout = attrs.layout
method = attrs.method
coord_trans = attrs.coordinate_transformation_mode
rounding_method = attrs.rounding_method
cubic_alpha = attrs.cubic_alpha
cubic_exclude = attrs.cubic_exclude
extrapolation_value = attrs.extrapolation_value
out_dtype = attrs.out_dtype
return [
topi.image.resize1d(
inputs[0],
roi,
size,
layout,
method,
coord_trans,
rounding_method,
cubic_alpha,
cubic_exclude,
extrapolation_value,
out_dtype,
)
]
reg.register_injective_schedule("image.resize1d")
@reg.register_convert_op_layout("image.resize1d")
def convert_image_resize1d(attrs, inputs, tinfos, desired_layouts):
"""Convert Layout pass registration for image resize1d op.
Parameters
----------
attrs : tvm.ir.Attrs
Attributes of current resize op
inputs : list of tvm.relay.Expr
The args of the Relay expr to be legalized
tinfos : list of types
List of input and output types
desired_layouts : list of layout strings
List of layouts defining our desired
layout for the data input.
Returns
-------
result : tvm.relay.Expr
The transformed expr
"""
new_attrs = dict(attrs)
assert len(desired_layouts) == 1, "Only one desired layout is expected"
desired_layout = str(desired_layouts[0])
assert desired_layout != "default", "Layout cannot be default"
new_attrs["layout"] = desired_layout
return resize1d(*inputs, **new_attrs)
@script
def _resize1d_shape_func(image_shape, size, batch_axis, width_axis, channel_axis):
out = output_tensor((3,), "int64")
out[batch_axis] = int64(image_shape[0])
out[width_axis] = int64(size[1])
out[channel_axis] = image_shape[channel_axis]
return out
@reg.register_shape_func("image.resize1d", False)
def resize1d_shape_func(attrs, inputs, _):
"""
Shape function for resize2d op.
"""
layout = attrs.layout
width_axis = channel_axis = 1
for i, letter in enumerate(layout):
if letter == "N":
batch_axis = i
if letter == "W":
width_axis = i
if letter == "C":
channel_axis = i
size = get_const_tuple(attrs.size)
return [
_resize1d_shape_func(
inputs[0],
convert(size),
convert(batch_axis),
convert(width_axis),
convert(channel_axis),
)
]
@reg.register_compute("image.resize2d")
def compute_resize2d(attrs, inputs, out_type):
"""compute definition for resize2d op"""
size = attrs.size
roi = attrs.roi
layout = attrs.layout
method = attrs.method
coord_trans = attrs.coordinate_transformation_mode
rounding_method = attrs.rounding_method
cubic_alpha = attrs.cubic_alpha
cubic_exclude = attrs.cubic_exclude
extrapolation_value = attrs.extrapolation_value
out_dtype = attrs.out_dtype
return [
topi.image.resize2d(
inputs[0],
roi,
size,
layout,
method,
coord_trans,
rounding_method,
cubic_alpha,
cubic_exclude,
extrapolation_value,
out_dtype,
)
]
reg.register_injective_schedule("image.resize2d")
@reg.register_convert_op_layout("image.resize2d")
def convert_image_resize2d(attrs, inputs, tinfos, desired_layouts):
"""Convert Layout pass registration for image resize2d op.
Parameters
----------
attrs : tvm.ir.Attrs
Attributes of current resize op
inputs : list of tvm.relay.Expr
The args of the Relay expr to be legalized
tinfos : list of types
List of input and output types
desired_layouts : list of layout strings
List of layouts defining our desired
layout for the data input.
Returns
-------
result : tvm.relay.Expr
The transformed expr
"""
new_attrs = dict(attrs)
assert len(desired_layouts) == 1, "Only one desired layout is expected"
desired_layout = str(desired_layouts[0])
assert desired_layout != "default", "Layout cannot be default"
new_attrs["layout"] = desired_layout
return resize2d(*inputs, **new_attrs)
@script
def _resize2d_shape_func(image_shape, size, batch_axis, height_axis, width_axis, channel_axis):
out = output_tensor((4,), "int64")
out[batch_axis] = int64(image_shape[0])
out[height_axis] = int64(size[0])
out[width_axis] = int64(size[1])
out[channel_axis] = image_shape[channel_axis]
return out
@reg.register_shape_func("image.resize2d", False)
def resize2d_shape_func(attrs, inputs, _):
"""
Shape function for resize2d op.
"""
layout = attrs.layout
height_axis = width_axis = channel_axis = 1
for i, letter in enumerate(layout):
if letter == "N":
batch_axis = i
if letter == "H":
height_axis = i
if letter == "W":
width_axis = i
if letter == "C":
channel_axis = i
size = get_const_tuple(attrs.size)
return [
_resize2d_shape_func(
inputs[0],
convert(size),
convert(batch_axis),
convert(height_axis),
convert(width_axis),
convert(channel_axis),
)
]
@reg.register_compute("image.resize3d")
def compute_resize3d(attrs, inputs, out_type):
"""compute definition for resize3d op"""
size = attrs.size
roi = attrs.roi
layout = attrs.layout
method = attrs.method
coord_trans = attrs.coordinate_transformation_mode
rounding_method = attrs.rounding_method
cubic_alpha = attrs.cubic_alpha
cubic_exclude = attrs.cubic_exclude
extrapolation_value = attrs.extrapolation_value
out_dtype = attrs.out_dtype
return [
topi.image.resize3d(
inputs[0],
roi,
size,
layout,
method,
coord_trans,
rounding_method,
cubic_alpha,
cubic_exclude,
extrapolation_value,
out_dtype,
)
]
reg.register_injective_schedule("image.resize3d")
# crop and resize
@reg.register_compute("image.crop_and_resize")
def compute_crop_and_resize(attrs, inputs, out_type):
crop_size = attrs.crop_size
layout = attrs.layout
method = attrs.method
extrapolation_value = attrs.extrapolation_value
out_dtype = attrs.out_dtype
return [
topi.image.crop_and_resize(
inputs[0],
inputs[1],
inputs[2],
crop_size,
layout,
method,
extrapolation_value,
out_dtype,
)
]
reg.register_injective_schedule("image.crop_and_resize")
@script
def _crop_and_resize_func(
image_shape, boxes_shape, crop_size, height_axis, width_axis, channel_axis
):
out = output_tensor((4,), "int64")
out[0] = boxes_shape[0]
out[height_axis] = int64(crop_size[0])
out[width_axis] = int64(crop_size[1])
out[channel_axis] = image_shape[channel_axis]
return out
@reg.register_shape_func("image.crop_and_resize", False)
def crop_and_resize_func(attrs, inputs, _):
"""
Shape function for crop_and_resize op.
"""
layout = attrs.layout
height_axis = width_axis = channel_axis = 1
for i, letter in enumerate(layout):
if letter == "H":
height_axis = i
if letter == "W":
width_axis = i
if letter == "C":
channel_axis = i
crop_size = get_const_tuple(attrs.crop_size)
return [
_crop_and_resize_func(
inputs[0],
inputs[1],
convert(crop_size),
convert(height_axis),
convert(width_axis),
convert(channel_axis),
)
]
# dilation2d
reg.register_strategy("image.dilation2d", strategy.dilation2d_strategy)
reg.register_pattern("image.dilation2d", OpPattern.OUT_ELEMWISE_FUSABLE)
# affine_grid
@reg.register_compute("image.affine_grid")
def compute_affine_grid(attrs, inputs, out_dtype):
target_shape = get_const_tuple(attrs.target_shape)
return [topi.image.affine_grid(inputs[0], target_shape)]
reg.register_injective_schedule("image.affine_grid")
@script
def _affine_grid_func(data, target_shape):
out = output_tensor((4,), "int64")
out[0] = int64(data[0])
out[1] = int64(2)
out[2] = int64(target_shape[0])
out[3] = int64(target_shape[1])
return out
@reg.register_shape_func("image.affine_grid", False)
def affine_grid_func(attrs, inputs, _):
"""
Shape function for affine_grid op.
"""
target_shape = get_const_tuple(attrs.target_shape)
return [_affine_grid_func(inputs[0], convert(target_shape))]
# grid_sample
@reg.register_compute("image.grid_sample")
def compute_grid_sample(attrs, inputs, out_dtype):
method = attrs.method
layout = attrs.layout
padding_mode = attrs.padding_mode
align_corners = attrs.align_corners
return [
topi.image.grid_sample(inputs[0], inputs[1], method, layout, padding_mode, align_corners)
]
reg.register_injective_schedule("image.grid_sample")
@script
def _grid_sample_func_nchw(data, grid):
out = output_tensor((4,), "int64")
out[0] = int64(data[0])
out[1] = int64(data[1])
out[2] = int64(grid[2])
out[3] = int64(grid[3])
return out
@script
def _grid_sample_func_ncdhw(data, grid):
out = output_tensor((5,), "int64")
out[0] = int64(data[0])
out[1] = int64(data[1])
out[2] = int64(grid[2])
out[3] = int64(grid[3])
out[4] = int64(grid[4])
return out
@reg.register_shape_func("image.grid_sample", False)
def grid_sample_func(attrs, inputs, _):
"""
Shape function for grid_sample op.
"""
if attrs.layout == "NCHW":
script_func = _grid_sample_func_nchw
elif attrs.layout == "NCDHW":
script_func = _grid_sample_func_ncdhw
else:
msg = f"layout {attrs.layout} is not supported"
raise ValueError(msg)
return [script_func(inputs[0], inputs[1])] | en | 0.681954 | # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # pylint: disable=invalid-name, unused-argument Backend compiler related feature registration # resize compute definition for resize1d op Convert Layout pass registration for image resize1d op. Parameters ---------- attrs : tvm.ir.Attrs Attributes of current resize op inputs : list of tvm.relay.Expr The args of the Relay expr to be legalized tinfos : list of types List of input and output types desired_layouts : list of layout strings List of layouts defining our desired layout for the data input. Returns ------- result : tvm.relay.Expr The transformed expr Shape function for resize2d op. compute definition for resize2d op Convert Layout pass registration for image resize2d op. Parameters ---------- attrs : tvm.ir.Attrs Attributes of current resize op inputs : list of tvm.relay.Expr The args of the Relay expr to be legalized tinfos : list of types List of input and output types desired_layouts : list of layout strings List of layouts defining our desired layout for the data input. Returns ------- result : tvm.relay.Expr The transformed expr Shape function for resize2d op. compute definition for resize3d op # crop and resize Shape function for crop_and_resize op. # dilation2d # affine_grid Shape function for affine_grid op. # grid_sample Shape function for grid_sample op. | 1.613267 | 2 |
src/tools/tool.py | zeqiufan/Distributional-Signatures | 1 | 6632067 | <reponame>zeqiufan/Distributional-Signatures
import argparse
import torch
import numpy as np
def parse_args():
parser = argparse.ArgumentParser(
description="Few Shot Text Classification with Distributional Signatures")
parser.add_argument("--data_path", type=str,
default="data/reuters.json",
help="path to dataset")
parser.add_argument("--dataset", type=str, default="reuters",
help="name of the dataset. "
"Options: [20newsgroup, amazon, huffpost, "
"reuters, rcv1, fewrel]")
parser.add_argument("--n_train_class", type=int, default=15,
help="number of meta-train classes")
parser.add_argument("--n_val_class", type=int, default=5,
help="number of meta-val classes")
parser.add_argument("--n_test_class", type=int, default=11,
help="number of meta-test classes")
parser.add_argument("--n_workers", type=int, default=10,
help="Num. of cores used for loading data. Set this "
"to zero if you want to use all the cpus.")
parser.add_argument("--way", type=int, default=5,
help="#classes for each task")
parser.add_argument("--shot", type=int, default=5,
help="#support examples for each class for each task")
parser.add_argument("--query", type=int, default=25,
help="#query examples for each class for each task")
parser.add_argument("--train_epochs", type=int, default=1000,
help="max num of training epochs")
parser.add_argument("--train_episodes", type=int, default=100,
help="#tasks sampled during each training epoch")
parser.add_argument("--val_episodes", type=int, default=100,
help="#asks sampled during each validation epoch")
parser.add_argument("--test_episodes", type=int, default=1000,
help="#tasks sampled during each testing epoch")
parser.add_argument("--wv_path", type=str,
default='../pretrain_wordvec',
help="path to word vector cache")
parser.add_argument("--word_vector", type=str, default='../pretrain_wordvec/wiki.en.vec',
help=("Name of pretrained word embeddings."))
parser.add_argument("--finetune_ebd", action="store_true", default=False,
help=("Finetune embedding during meta-training"))
parser.add_argument("--embedding", type=str, default="mlada",
help=("document embedding method."))
parser.add_argument("--classifier", type=str, default="r2d2",
help=("classifier."))
parser.add_argument("--auxiliary", type=str, nargs="*", default=[],
help=("auxiliary embeddings (used for fewrel)."))
parser.add_argument("--seed", type=int, default=330, help="seed")
parser.add_argument("--dropout", type=float, default=0.1, help="drop rate")
parser.add_argument("--patience", type=int, default=20, help="patience")
parser.add_argument("--clip_grad", type=float, default=None,
help="gradient clipping")
parser.add_argument("--cuda", type=int, default=-1,
help="cuda device, -1 for cpu")
parser.add_argument("--mode", type=str, default="test",
help=("Running mode."
"Options: [train, test]"
"[Default: test]"))
parser.add_argument("--save", action="store_true", default=False,
help="train the model")
parser.add_argument("--notqdm", action="store_true", default=False,
help="disable tqdm")
parser.add_argument("--result_path", type=str, default="")
parser.add_argument("--snapshot", type=str, default="",
help="path to the pretraiend weights")
parser.add_argument("--pretrain", type=str, default=None, help="path to the pretraiend weights for MLADA")
parser.add_argument("--k", type=int, default=None, help="Number of iterations of the adversarial network")
parser.add_argument("--lr_g", type=float, default=1e-3, help="learning rate of G")
parser.add_argument("--lr_d", type=float, default=1e-3, help="learning rate of D")
parser.add_argument("--lr_scheduler", type=str, default=None, help="lr_scheduler")
parser.add_argument("--ExponentialLR_gamma", type=float, default=0.98, help="ExponentialLR_gamma")
parser.add_argument("--train_mode", type=str, default=None, help="you can choose t_add_v or None")
parser.add_argument("--ablation", type=str, default="", help="ablation study:[-DAN, -IL]")
parser.add_argument("--path_drawn_data", type=str, default="reuters_False_data.json", help="path_drawn_data")
parser.add_argument("--Comments", type=str, default="", help="Comments")
parser.add_argument("--id2word", default=None, help="id2word")
return parser.parse_args()
def print_args(args):
"""
Print arguments (only show the relevant arguments)
"""
print("\nParameters:")
for attr, value in sorted(args.__dict__.items()):
print("\t{}={}".format(attr.upper(), value))
print("""
.---. _______
__ __ ___ | | \ ___ `'.
| |/ `.' `. | | ' |--.\ \
| .-. .-. '| | | | \ '
| | | | | || | __ | | | ' __
| | | | | || | .:--.'. | | | | .:--.'.
| | | | | || |/ | \ | | | ' .'/ | \ |
| | | | | || |`" __ | | | |___.' /' `" __ | |
|__| |__| |__|| | .'.''| | /_______.'/ .'.''| |
'---'/ / | |_\_______|/ / / | |_
\ \._,\ '/ \ \._,\ '/
`--' `" `--' `"
""")
def set_seed(seed):
"""
Setting random seeds
"""
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
np.random.seed(seed)
def load_model_state_dict(model, model_path):
model_dict = model.state_dict()
pretrained_dict = torch.load(model_path)
keys = []
for k, v in pretrained_dict.items():
keys.append(k)
i = 0
print("_____________pretrain_parameters______________________________")
for k, v in model_dict.items():
if v.size() == pretrained_dict[keys[i]].size():
model_dict[k] = pretrained_dict[keys[i]]
print(model_dict[k])
i = i + 1
# print(model_dict[k])
print("___________________________________________________________")
model.load_state_dict(model_dict)
return model | import argparse
import torch
import numpy as np
def parse_args():
parser = argparse.ArgumentParser(
description="Few Shot Text Classification with Distributional Signatures")
parser.add_argument("--data_path", type=str,
default="data/reuters.json",
help="path to dataset")
parser.add_argument("--dataset", type=str, default="reuters",
help="name of the dataset. "
"Options: [20newsgroup, amazon, huffpost, "
"reuters, rcv1, fewrel]")
parser.add_argument("--n_train_class", type=int, default=15,
help="number of meta-train classes")
parser.add_argument("--n_val_class", type=int, default=5,
help="number of meta-val classes")
parser.add_argument("--n_test_class", type=int, default=11,
help="number of meta-test classes")
parser.add_argument("--n_workers", type=int, default=10,
help="Num. of cores used for loading data. Set this "
"to zero if you want to use all the cpus.")
parser.add_argument("--way", type=int, default=5,
help="#classes for each task")
parser.add_argument("--shot", type=int, default=5,
help="#support examples for each class for each task")
parser.add_argument("--query", type=int, default=25,
help="#query examples for each class for each task")
parser.add_argument("--train_epochs", type=int, default=1000,
help="max num of training epochs")
parser.add_argument("--train_episodes", type=int, default=100,
help="#tasks sampled during each training epoch")
parser.add_argument("--val_episodes", type=int, default=100,
help="#asks sampled during each validation epoch")
parser.add_argument("--test_episodes", type=int, default=1000,
help="#tasks sampled during each testing epoch")
parser.add_argument("--wv_path", type=str,
default='../pretrain_wordvec',
help="path to word vector cache")
parser.add_argument("--word_vector", type=str, default='../pretrain_wordvec/wiki.en.vec',
help=("Name of pretrained word embeddings."))
parser.add_argument("--finetune_ebd", action="store_true", default=False,
help=("Finetune embedding during meta-training"))
parser.add_argument("--embedding", type=str, default="mlada",
help=("document embedding method."))
parser.add_argument("--classifier", type=str, default="r2d2",
help=("classifier."))
parser.add_argument("--auxiliary", type=str, nargs="*", default=[],
help=("auxiliary embeddings (used for fewrel)."))
parser.add_argument("--seed", type=int, default=330, help="seed")
parser.add_argument("--dropout", type=float, default=0.1, help="drop rate")
parser.add_argument("--patience", type=int, default=20, help="patience")
parser.add_argument("--clip_grad", type=float, default=None,
help="gradient clipping")
parser.add_argument("--cuda", type=int, default=-1,
help="cuda device, -1 for cpu")
parser.add_argument("--mode", type=str, default="test",
help=("Running mode."
"Options: [train, test]"
"[Default: test]"))
parser.add_argument("--save", action="store_true", default=False,
help="train the model")
parser.add_argument("--notqdm", action="store_true", default=False,
help="disable tqdm")
parser.add_argument("--result_path", type=str, default="")
parser.add_argument("--snapshot", type=str, default="",
help="path to the pretraiend weights")
parser.add_argument("--pretrain", type=str, default=None, help="path to the pretraiend weights for MLADA")
parser.add_argument("--k", type=int, default=None, help="Number of iterations of the adversarial network")
parser.add_argument("--lr_g", type=float, default=1e-3, help="learning rate of G")
parser.add_argument("--lr_d", type=float, default=1e-3, help="learning rate of D")
parser.add_argument("--lr_scheduler", type=str, default=None, help="lr_scheduler")
parser.add_argument("--ExponentialLR_gamma", type=float, default=0.98, help="ExponentialLR_gamma")
parser.add_argument("--train_mode", type=str, default=None, help="you can choose t_add_v or None")
parser.add_argument("--ablation", type=str, default="", help="ablation study:[-DAN, -IL]")
parser.add_argument("--path_drawn_data", type=str, default="reuters_False_data.json", help="path_drawn_data")
parser.add_argument("--Comments", type=str, default="", help="Comments")
parser.add_argument("--id2word", default=None, help="id2word")
return parser.parse_args()
def print_args(args):
"""
Print arguments (only show the relevant arguments)
"""
print("\nParameters:")
for attr, value in sorted(args.__dict__.items()):
print("\t{}={}".format(attr.upper(), value))
print("""
.---. _______
__ __ ___ | | \ ___ `'.
| |/ `.' `. | | ' |--.\ \
| .-. .-. '| | | | \ '
| | | | | || | __ | | | ' __
| | | | | || | .:--.'. | | | | .:--.'.
| | | | | || |/ | \ | | | ' .'/ | \ |
| | | | | || |`" __ | | | |___.' /' `" __ | |
|__| |__| |__|| | .'.''| | /_______.'/ .'.''| |
'---'/ / | |_\_______|/ / / | |_
\ \._,\ '/ \ \._,\ '/
`--' `" `--' `"
""")
def set_seed(seed):
"""
Setting random seeds
"""
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
np.random.seed(seed)
def load_model_state_dict(model, model_path):
model_dict = model.state_dict()
pretrained_dict = torch.load(model_path)
keys = []
for k, v in pretrained_dict.items():
keys.append(k)
i = 0
print("_____________pretrain_parameters______________________________")
for k, v in model_dict.items():
if v.size() == pretrained_dict[keys[i]].size():
model_dict[k] = pretrained_dict[keys[i]]
print(model_dict[k])
i = i + 1
# print(model_dict[k])
print("___________________________________________________________")
model.load_state_dict(model_dict)
return model | en | 0.169263 | Print arguments (only show the relevant arguments) .---. _______ __ __ ___ | | \ ___ `'. | |/ `.' `. | | ' |--.\ \ | .-. .-. '| | | | \ ' | | | | | || | __ | | | ' __ | | | | | || | .:--.'. | | | | .:--.'. | | | | | || |/ | \ | | | ' .'/ | \ | | | | | | || |`" __ | | | |___.' /' `" __ | | |__| |__| |__|| | .'.''| | /_______.'/ .'.''| | '---'/ / | |_\_______|/ / / | |_ \ \._,\ '/ \ \._,\ '/ `--' `" `--' `" Setting random seeds # print(model_dict[k]) | 2.749793 | 3 |
Artifacts/Results/plot_verus_losses.py | comnetsAD/ALCC | 6 | 6632068 | #-----------------------------Plot Run Commands------------------------------------#
import os
import numpy as np
import matplotlib.pyplot as plt
import sys
from glob import glob
import pandas as pd
import seaborn as sns
import math
import matplotlib.gridspec as gridspec
from matplotlib.patches import Ellipse
plt.rcParams['text.latex.preamble']=[r'\boldmath']
params = {
'font.size' : 40,
'legend.fontsize': 30,
'text.latex.unicode': True,
}
plt.rcParams.update(params)
plt.rcParams['ytick.labelsize'] = 40
plt.rcParams['xtick.labelsize'] = 40
plt.rcParams["font.weight"] = "bold"
plt.rcParams["axes.labelweight"] = "bold"
def scale(a):
return a/1000000.0
def parse_throughput(filename):
times = []
pktsize = []
throughput_file = open(filename,"r")
if '/verus/' in filename:
tokens = throughput_file.readline().strip().split(",")
else:
tokens = throughput_file.readline().strip().split()
sTime = float(tokens[0])
firstTime = sTime
bucket = []
if '/verus/' in filename:
bucket.append(1500)
else:
bucket.append(float(tokens[1]))
for line in throughput_file:
if '/verus/' in filename:
tokens = line.strip().split(",")
else:
tokens = line.strip().split()
if float(tokens[0])< sTime+1.0:
if '/verus/' in filename:
bucket.append(1500)
else:
bucket.append(float(tokens[1]))
else:
pktsize.append(sum(bucket)*8/1000000.0)
bucket = []
times.append(sTime-firstTime)
while float(tokens[0])-sTime > 1.0:
sTime += 1.0
if sTime - firstTime > float(sys.argv[2]):
break
throughput_file.close()
return pktsize, times
def parse_delay_alccVerus(filename):
delays = []
times = []
cnt = 0
delay_file = open(filename,"r")
tokens = delay_file.readline().strip().split()
sTime = float(tokens[0])
delays.append((float(tokens[1])*1000.0))
times.append((float(tokens[0])))
for line in delay_file:
tokens = line.strip().split()
# if float(tokens[1]) < 10000.0:
delays.append((float(tokens[1])*1000))
times.append((float(tokens[0])-sTime))
delay_file.close()
return delays, times
def parse_delay(filename):
delays = []
times = []
cnt = 0
delay_file = open(filename,"r")
tokens = delay_file.readline().strip().split(",")
sTime = float(tokens[0])
for line in delay_file:
tokens = line.strip().split(",")
if '/verus/' in filename:
pass
elif 'KERNEL' in line or len(tokens) != 6 or float(tokens[2]) < 20:
continue
if (float(tokens[0])-sTime) < float(sys.argv[2]):
delays.append((float(tokens[2])))
times.append((float(tokens[0])-sTime))
delay_file.close()
lastTime = times[-1]
if "0" in filename:
print "---------------------"
cnt = 1
file = filename.split("0")
file = file[0]+str(cnt)+file[1]
print file
while os.path.isfile(file):
print file
delay_file = open(file,"r")
tokens = delay_file.readline().strip().split(",")
sTime = float(tokens[0])
for line in delay_file:
tokens = line.strip().split(",")
if '/verus/' in filename:
pass
elif 'KERNEL' in line or len(tokens) != 6 or float(tokens[2]) < 20:
continue
if (float(tokens[0])-sTime) < float(sys.argv[2]):
delays.append((float(tokens[2])))
times.append((float(tokens[0])-sTime)+lastTime)
cnt+=1
file = filename.split("0")
file = file[0]+str(cnt)+file[1]
lastTime = times[-1]
delay_file.close()
return delays, times
def simple_cdf(data):
data_sorted = np.sort(data)
# calculate the proportional values of samples
cdf = 1. * np.arange(len(data)) / (len(data) - 1)
tmp = []
for k in range(len(cdf)):
if cdf[k] >= 0.25:
tmp.append(data_sorted[k])
break
for k in range(len(cdf)):
if cdf[k] >= 0.5:
tmp.append(data_sorted[k])
break
for k in range(len(cdf)):
if cdf[k] >= 0.75:
tmp.append(data_sorted[k])
break
for k in range(len(cdf)):
if cdf[k] >= 0.95:
tmp.append(data_sorted[k])
break
return tmp
for trace in [sys.argv[1]]:
labels = ["alccVerusCubic","alccVerusCubicNL", "verus"]
delays1 = []
delayTimes1 = []
delays2 = []
delayTimes2 = []
delays3 = []
delayTimes3 = []
throughputDL1 = []
timeDL1 = []
throughputDL2 = []
timeDL2 = []
throughputDL3=[]
timeDL3=[]
if True:
for algo in labels:
if "alccVerusCubic" == algo:
os.system("tshark -r ./Verusloss/{0}/{1}/log.pcap -T fields -e frame.time_epoch -e frame.len 'tcp.srcport==60001' > ./Verusloss/{0}/{1}/throughput.csv 2> /dev/null".format(algo,trace))
throughputDL1, timeDL1 = parse_throughput("./Verusloss/{0}/{1}/throughput.csv".format(algo,trace))
delays1, delayTimes1 = parse_delay("./Verusloss/{0}/{1}/".format(algo,trace)+"Receiver0.out")
elif "alccVerusCubicNL" == algo:
print (algo)
os.system("tshark -r ./Verusloss/{0}/{1}/log.pcap -T fields -e frame.time_epoch -e frame.len 'tcp.srcport==60001' > ./Verusloss/{0}/{1}/throughput.csv 2> /dev/null".format(algo,trace))
throughputDL2, timeDL2 = parse_throughput("./Verusloss/{0}/{1}/throughput.csv".format(algo,trace))
print(timeDL2)
print(throughputDL2[:100])
delays2, delayTimes2 = parse_delay("./Verusloss/{0}/{1}/".format(algo,trace)+"Receiver0.out")
else:
throughputDL3, timeDL3 = parse_throughput("./Verusloss/{0}/{1}/client_60001.out".format(algo,trace))
delays3, delayTimes3 = parse_delay("./Verusloss/{0}/{1}/".format(algo,trace)+"Receiver.out")
sns.set_style("white")
fig, (ax1, ax2) = plt.subplots(2, 1, figsize=(18,10), facecolor='w', sharex=True)
ax2.plot(delayTimes1, delays1, color="g", lw=5)
ax2.plot(delayTimes2, delays2, color="#ff0000", lw=5)
ax2.plot(delayTimes3, delays3, color="#0000ff", lw=5)
ax2.set_xlim([0,250])
ax2.set_xlabel('Time (s)')
ax2.set_ylabel('Delay (ms)')
ax2.set_yscale('log',basey=10)
ax2.grid(True, which="both")
f1=f1 = open ("../../channelTraces/"+sys.argv[1],"r")
BW = []
nextTime = 2900
cnt = 0
for line in f1:
if int(line.strip()) > nextTime:
BW.append(cnt*1492*8)
cnt = 0
nextTime+=1000
else:
cnt+=1
f1.close()
ax1.fill_between(range(len(BW)), 0, list(map(scale,BW)),color='#D3D3D3')
p1, = ax1.plot(timeDL2, throughputDL2, color="#ff0000", lw=5, label='alccVerus (No loss)')
p2, = ax1.plot(timeDL1, throughputDL1, color="g", lw=5, label='alccVerus (loss=1%)')
p3, = ax1.plot(timeDL3, throughputDL3, color="#0000ff", lw=5, label='verus (loss=1%)')
ax1.set_ylabel("Throughput\n(Mbps)")
ax1.set_xlabel("Time (s)")
ax1.set_ylim([0,50])
fig.legend((p1,p2,p3),(p1.get_label(),p2.get_label(),p3.get_label()),ncol=3,loc="upper center")
plt.subplots_adjust(top=0.85)
if not os.path.exists('figures'):
os.makedirs('figures')
fig.savefig('./figures/verusloss-{0}.png'.format(sys.argv[1]),bbox_inches='tight')
plt.close(fig)
| #-----------------------------Plot Run Commands------------------------------------#
import os
import numpy as np
import matplotlib.pyplot as plt
import sys
from glob import glob
import pandas as pd
import seaborn as sns
import math
import matplotlib.gridspec as gridspec
from matplotlib.patches import Ellipse
plt.rcParams['text.latex.preamble']=[r'\boldmath']
params = {
'font.size' : 40,
'legend.fontsize': 30,
'text.latex.unicode': True,
}
plt.rcParams.update(params)
plt.rcParams['ytick.labelsize'] = 40
plt.rcParams['xtick.labelsize'] = 40
plt.rcParams["font.weight"] = "bold"
plt.rcParams["axes.labelweight"] = "bold"
def scale(a):
return a/1000000.0
def parse_throughput(filename):
times = []
pktsize = []
throughput_file = open(filename,"r")
if '/verus/' in filename:
tokens = throughput_file.readline().strip().split(",")
else:
tokens = throughput_file.readline().strip().split()
sTime = float(tokens[0])
firstTime = sTime
bucket = []
if '/verus/' in filename:
bucket.append(1500)
else:
bucket.append(float(tokens[1]))
for line in throughput_file:
if '/verus/' in filename:
tokens = line.strip().split(",")
else:
tokens = line.strip().split()
if float(tokens[0])< sTime+1.0:
if '/verus/' in filename:
bucket.append(1500)
else:
bucket.append(float(tokens[1]))
else:
pktsize.append(sum(bucket)*8/1000000.0)
bucket = []
times.append(sTime-firstTime)
while float(tokens[0])-sTime > 1.0:
sTime += 1.0
if sTime - firstTime > float(sys.argv[2]):
break
throughput_file.close()
return pktsize, times
def parse_delay_alccVerus(filename):
delays = []
times = []
cnt = 0
delay_file = open(filename,"r")
tokens = delay_file.readline().strip().split()
sTime = float(tokens[0])
delays.append((float(tokens[1])*1000.0))
times.append((float(tokens[0])))
for line in delay_file:
tokens = line.strip().split()
# if float(tokens[1]) < 10000.0:
delays.append((float(tokens[1])*1000))
times.append((float(tokens[0])-sTime))
delay_file.close()
return delays, times
def parse_delay(filename):
delays = []
times = []
cnt = 0
delay_file = open(filename,"r")
tokens = delay_file.readline().strip().split(",")
sTime = float(tokens[0])
for line in delay_file:
tokens = line.strip().split(",")
if '/verus/' in filename:
pass
elif 'KERNEL' in line or len(tokens) != 6 or float(tokens[2]) < 20:
continue
if (float(tokens[0])-sTime) < float(sys.argv[2]):
delays.append((float(tokens[2])))
times.append((float(tokens[0])-sTime))
delay_file.close()
lastTime = times[-1]
if "0" in filename:
print "---------------------"
cnt = 1
file = filename.split("0")
file = file[0]+str(cnt)+file[1]
print file
while os.path.isfile(file):
print file
delay_file = open(file,"r")
tokens = delay_file.readline().strip().split(",")
sTime = float(tokens[0])
for line in delay_file:
tokens = line.strip().split(",")
if '/verus/' in filename:
pass
elif 'KERNEL' in line or len(tokens) != 6 or float(tokens[2]) < 20:
continue
if (float(tokens[0])-sTime) < float(sys.argv[2]):
delays.append((float(tokens[2])))
times.append((float(tokens[0])-sTime)+lastTime)
cnt+=1
file = filename.split("0")
file = file[0]+str(cnt)+file[1]
lastTime = times[-1]
delay_file.close()
return delays, times
def simple_cdf(data):
data_sorted = np.sort(data)
# calculate the proportional values of samples
cdf = 1. * np.arange(len(data)) / (len(data) - 1)
tmp = []
for k in range(len(cdf)):
if cdf[k] >= 0.25:
tmp.append(data_sorted[k])
break
for k in range(len(cdf)):
if cdf[k] >= 0.5:
tmp.append(data_sorted[k])
break
for k in range(len(cdf)):
if cdf[k] >= 0.75:
tmp.append(data_sorted[k])
break
for k in range(len(cdf)):
if cdf[k] >= 0.95:
tmp.append(data_sorted[k])
break
return tmp
for trace in [sys.argv[1]]:
labels = ["alccVerusCubic","alccVerusCubicNL", "verus"]
delays1 = []
delayTimes1 = []
delays2 = []
delayTimes2 = []
delays3 = []
delayTimes3 = []
throughputDL1 = []
timeDL1 = []
throughputDL2 = []
timeDL2 = []
throughputDL3=[]
timeDL3=[]
if True:
for algo in labels:
if "alccVerusCubic" == algo:
os.system("tshark -r ./Verusloss/{0}/{1}/log.pcap -T fields -e frame.time_epoch -e frame.len 'tcp.srcport==60001' > ./Verusloss/{0}/{1}/throughput.csv 2> /dev/null".format(algo,trace))
throughputDL1, timeDL1 = parse_throughput("./Verusloss/{0}/{1}/throughput.csv".format(algo,trace))
delays1, delayTimes1 = parse_delay("./Verusloss/{0}/{1}/".format(algo,trace)+"Receiver0.out")
elif "alccVerusCubicNL" == algo:
print (algo)
os.system("tshark -r ./Verusloss/{0}/{1}/log.pcap -T fields -e frame.time_epoch -e frame.len 'tcp.srcport==60001' > ./Verusloss/{0}/{1}/throughput.csv 2> /dev/null".format(algo,trace))
throughputDL2, timeDL2 = parse_throughput("./Verusloss/{0}/{1}/throughput.csv".format(algo,trace))
print(timeDL2)
print(throughputDL2[:100])
delays2, delayTimes2 = parse_delay("./Verusloss/{0}/{1}/".format(algo,trace)+"Receiver0.out")
else:
throughputDL3, timeDL3 = parse_throughput("./Verusloss/{0}/{1}/client_60001.out".format(algo,trace))
delays3, delayTimes3 = parse_delay("./Verusloss/{0}/{1}/".format(algo,trace)+"Receiver.out")
sns.set_style("white")
fig, (ax1, ax2) = plt.subplots(2, 1, figsize=(18,10), facecolor='w', sharex=True)
ax2.plot(delayTimes1, delays1, color="g", lw=5)
ax2.plot(delayTimes2, delays2, color="#ff0000", lw=5)
ax2.plot(delayTimes3, delays3, color="#0000ff", lw=5)
ax2.set_xlim([0,250])
ax2.set_xlabel('Time (s)')
ax2.set_ylabel('Delay (ms)')
ax2.set_yscale('log',basey=10)
ax2.grid(True, which="both")
f1=f1 = open ("../../channelTraces/"+sys.argv[1],"r")
BW = []
nextTime = 2900
cnt = 0
for line in f1:
if int(line.strip()) > nextTime:
BW.append(cnt*1492*8)
cnt = 0
nextTime+=1000
else:
cnt+=1
f1.close()
ax1.fill_between(range(len(BW)), 0, list(map(scale,BW)),color='#D3D3D3')
p1, = ax1.plot(timeDL2, throughputDL2, color="#ff0000", lw=5, label='alccVerus (No loss)')
p2, = ax1.plot(timeDL1, throughputDL1, color="g", lw=5, label='alccVerus (loss=1%)')
p3, = ax1.plot(timeDL3, throughputDL3, color="#0000ff", lw=5, label='verus (loss=1%)')
ax1.set_ylabel("Throughput\n(Mbps)")
ax1.set_xlabel("Time (s)")
ax1.set_ylim([0,50])
fig.legend((p1,p2,p3),(p1.get_label(),p2.get_label(),p3.get_label()),ncol=3,loc="upper center")
plt.subplots_adjust(top=0.85)
if not os.path.exists('figures'):
os.makedirs('figures')
fig.savefig('./figures/verusloss-{0}.png'.format(sys.argv[1]),bbox_inches='tight')
plt.close(fig)
| en | 0.155907 | #-----------------------------Plot Run Commands------------------------------------# # if float(tokens[1]) < 10000.0: # calculate the proportional values of samples | 2.440152 | 2 |
lib/model/nms/seq_nms.py | YeLyuUT/FastVOD | 1 | 6632069 | # --------------------------------------------------------
# Flow-Guided Feature Aggregation
# Copyright (c) 2017 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Modified by<NAME>, <NAME>
# --------------------------------------------------------
# Based on:
# MX-RCNN
# Copyright (c) 2016 by Contributors
# Licence under The Apache 2.0 License
# https://github.com/ijkguo/mx-rcnn/
# --------------------------------------------------------
import numpy as np
import profile
import cv2
import time
import copy
import cPickle as pickle
import os
import numpy as np
import pysnooper
CLASSES = ('__background__',
'airplane', 'antelope', 'bear', 'bicycle', 'bird', 'bus',
'car', 'cattle', 'dog', 'domestic cat', 'elephant', 'fox',
'giant panda', 'hamster', 'horse', 'lion', 'lizard', 'monkey',
'motorcycle', 'rabbit', 'red panda', 'sheep', 'snake', 'squirrel',
'tiger', 'train', 'turtle', 'watercraft', 'whale', 'zebra')
NMS_THRESH = 0.3
IOU_THRESH = 0.5
MAX_THRESH=1e-2
def createLinks(dets_all):
links_all = []
frame_num = len(dets_all[0])
cls_num = len(CLASSES) - 1
for cls_ind in range(cls_num):
links_cls = []
for frame_ind in range(frame_num - 1):
dets1 = dets_all[cls_ind][frame_ind]
dets2 = dets_all[cls_ind][frame_ind + 1]
box1_num = len(dets1)
box2_num = len(dets2)
if frame_ind == 0:
areas1 = np.empty(box1_num)
for box1_ind, box1 in enumerate(dets1):
areas1[box1_ind] = (box1[2] - box1[0] + 1) * (box1[3] - box1[1] + 1)
else:
areas1 = areas2
areas2 = np.empty(box2_num)
for box2_ind, box2 in enumerate(dets2):
areas2[box2_ind] = (box2[2] - box2[0] + 1) * (box2[3] - box2[1] + 1)
links_frame = []
for box1_ind, box1 in enumerate(dets1):
area1 = areas1[box1_ind]
x1 = np.maximum(box1[0], dets2[:, 0])
y1 = np.maximum(box1[1], dets2[:, 1])
x2 = np.minimum(box1[2], dets2[:, 2])
y2 = np.minimum(box1[3], dets2[:, 3])
w = np.maximum(0.0, x2 - x1 + 1)
h = np.maximum(0.0, y2 - y1 + 1)
inter = w * h
ovrs = inter / (area1 + areas2 - inter)
links_box = [ovr_ind for ovr_ind, ovr in enumerate(ovrs) if
ovr >= IOU_THRESH]
links_frame.append(links_box)
links_cls.append(links_frame)
links_all.append(links_cls)
return links_all
def maxPath(dets_all, links_all):
for cls_ind, links_cls in enumerate(links_all):
max_begin = time.time()
delete_sets=[[]for i in range(0,len(dets_all[0]))]
delete_single_box=[]
dets_cls = dets_all[cls_ind]
num_path=0
# compute the number of links
sum_links=0
for frame_ind, frame in enumerate(links_cls):
for box_ind,box in enumerate(frame):
sum_links+=len(box)
while True:
num_path+=1
rootindex, maxpath, maxsum = findMaxPath(links_cls, dets_cls,delete_single_box)
if (maxsum<MAX_THRESH or sum_links==0 or len(maxpath) <1):
break
if (len(maxpath)==1):
delete=[rootindex,maxpath[0]]
delete_single_box.append(delete)
rescore(dets_cls, rootindex, maxpath, maxsum)
t4=time.time()
delete_set,num_delete=deleteLink(dets_cls, links_cls, rootindex, maxpath, NMS_THRESH)
sum_links-=num_delete
for i, box_ind in enumerate(maxpath):
delete_set[i].remove(box_ind)
delete_single_box.append([[rootindex+i],box_ind])
for j in delete_set[i]:
dets_cls[i+rootindex][j]=np.zeros(5)
delete_sets[i+rootindex]=delete_sets[i+rootindex]+delete_set[i]
for frame_idx,frame in enumerate(dets_all[cls_ind]):
a=range(0,len(frame))
keep=list(set(a).difference(set(delete_sets[frame_idx])))
dets_all[cls_ind][frame_idx]=frame[keep,:]
return dets_all
def findMaxPath(links,dets,delete_single_box):
len_dets=[len(dets[i]) for i in xrange(len(dets))]
max_boxes=np.max(len_dets)
num_frame=len(links)+1
a=np.zeros([num_frame,max_boxes])
new_dets=np.zeros([num_frame,max_boxes])
for delete_box in delete_single_box:
new_dets[delete_box[0],delete_box[1]]=1
if(max_boxes==0):
max_path=[]
return 0,max_path,0
b=np.full((num_frame,max_boxes),-1)
for l in xrange(len(dets)):
for j in xrange(len(dets[l])):
if(new_dets[l,j]==0):
a[l,j]=dets[l][j][-1]
for i in xrange(1,num_frame):
l1=i-1;
for box_id,box in enumerate(links[l1]):
for next_box_id in box:
weight_new=a[i-1,box_id]+dets[i][next_box_id][-1]
if(weight_new>a[i,next_box_id]):
a[i,next_box_id]=weight_new
b[i,next_box_id]=box_id
i,j=np.unravel_index(a.argmax(),a.shape)
maxpath=[j]
maxscore=a[i,j]
while(b[i,j]!=-1):
maxpath.append(b[i,j])
j=b[i,j]
i=i-1
rootindex=i
maxpath.reverse()
return rootindex, maxpath, maxscore
def rescore(dets, rootindex, maxpath, maxsum):
newscore = maxsum / len(maxpath)
for i, box_ind in enumerate(maxpath):
dets[rootindex + i][box_ind][4] = newscore
def deleteLink(dets, links, rootindex, maxpath, thesh):
delete_set=[]
num_delete_links=0
for i, box_ind in enumerate(maxpath):
areas = [(box[2] - box[0] + 1) * (box[3] - box[1] + 1) for box in dets[rootindex + i]]
area1 = areas[box_ind]
box1 = dets[rootindex + i][box_ind]
x1 = np.maximum(box1[0], dets[rootindex + i][:, 0])
y1 = np.maximum(box1[1], dets[rootindex + i][:, 1])
x2 = np.minimum(box1[2], dets[rootindex + i][:, 2])
y2 = np.minimum(box1[3], dets[rootindex + i][:, 3])
w = np.maximum(0.0, x2 - x1 + 1)
h = np.maximum(0.0, y2 - y1 + 1)
inter = w * h
ovrs = inter / (area1 + areas - inter)
#saving the box need to delete
deletes = [ovr_ind for ovr_ind, ovr in enumerate(ovrs) if ovr >= 0.3]
delete_set.append(deletes)
#delete the links except for the last frame
if rootindex + i < len(links):
for delete_ind in deletes:
num_delete_links+=len(links[rootindex+i][delete_ind])
links[rootindex + i][delete_ind] = []
if i > 0 or rootindex > 0:
#delete the links which point to box_ind
for priorbox in links[rootindex + i - 1]:
for delete_ind in deletes:
if delete_ind in priorbox:
priorbox.remove(delete_ind)
num_delete_links+=1
return delete_set,num_delete_links
def seq_nms(dets):
links = createLinks(dets)
dets=maxPath(dets, links)
return dets
| # --------------------------------------------------------
# Flow-Guided Feature Aggregation
# Copyright (c) 2017 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Modified by<NAME>, <NAME>
# --------------------------------------------------------
# Based on:
# MX-RCNN
# Copyright (c) 2016 by Contributors
# Licence under The Apache 2.0 License
# https://github.com/ijkguo/mx-rcnn/
# --------------------------------------------------------
import numpy as np
import profile
import cv2
import time
import copy
import cPickle as pickle
import os
import numpy as np
import pysnooper
CLASSES = ('__background__',
'airplane', 'antelope', 'bear', 'bicycle', 'bird', 'bus',
'car', 'cattle', 'dog', 'domestic cat', 'elephant', 'fox',
'giant panda', 'hamster', 'horse', 'lion', 'lizard', 'monkey',
'motorcycle', 'rabbit', 'red panda', 'sheep', 'snake', 'squirrel',
'tiger', 'train', 'turtle', 'watercraft', 'whale', 'zebra')
NMS_THRESH = 0.3
IOU_THRESH = 0.5
MAX_THRESH=1e-2
def createLinks(dets_all):
links_all = []
frame_num = len(dets_all[0])
cls_num = len(CLASSES) - 1
for cls_ind in range(cls_num):
links_cls = []
for frame_ind in range(frame_num - 1):
dets1 = dets_all[cls_ind][frame_ind]
dets2 = dets_all[cls_ind][frame_ind + 1]
box1_num = len(dets1)
box2_num = len(dets2)
if frame_ind == 0:
areas1 = np.empty(box1_num)
for box1_ind, box1 in enumerate(dets1):
areas1[box1_ind] = (box1[2] - box1[0] + 1) * (box1[3] - box1[1] + 1)
else:
areas1 = areas2
areas2 = np.empty(box2_num)
for box2_ind, box2 in enumerate(dets2):
areas2[box2_ind] = (box2[2] - box2[0] + 1) * (box2[3] - box2[1] + 1)
links_frame = []
for box1_ind, box1 in enumerate(dets1):
area1 = areas1[box1_ind]
x1 = np.maximum(box1[0], dets2[:, 0])
y1 = np.maximum(box1[1], dets2[:, 1])
x2 = np.minimum(box1[2], dets2[:, 2])
y2 = np.minimum(box1[3], dets2[:, 3])
w = np.maximum(0.0, x2 - x1 + 1)
h = np.maximum(0.0, y2 - y1 + 1)
inter = w * h
ovrs = inter / (area1 + areas2 - inter)
links_box = [ovr_ind for ovr_ind, ovr in enumerate(ovrs) if
ovr >= IOU_THRESH]
links_frame.append(links_box)
links_cls.append(links_frame)
links_all.append(links_cls)
return links_all
def maxPath(dets_all, links_all):
for cls_ind, links_cls in enumerate(links_all):
max_begin = time.time()
delete_sets=[[]for i in range(0,len(dets_all[0]))]
delete_single_box=[]
dets_cls = dets_all[cls_ind]
num_path=0
# compute the number of links
sum_links=0
for frame_ind, frame in enumerate(links_cls):
for box_ind,box in enumerate(frame):
sum_links+=len(box)
while True:
num_path+=1
rootindex, maxpath, maxsum = findMaxPath(links_cls, dets_cls,delete_single_box)
if (maxsum<MAX_THRESH or sum_links==0 or len(maxpath) <1):
break
if (len(maxpath)==1):
delete=[rootindex,maxpath[0]]
delete_single_box.append(delete)
rescore(dets_cls, rootindex, maxpath, maxsum)
t4=time.time()
delete_set,num_delete=deleteLink(dets_cls, links_cls, rootindex, maxpath, NMS_THRESH)
sum_links-=num_delete
for i, box_ind in enumerate(maxpath):
delete_set[i].remove(box_ind)
delete_single_box.append([[rootindex+i],box_ind])
for j in delete_set[i]:
dets_cls[i+rootindex][j]=np.zeros(5)
delete_sets[i+rootindex]=delete_sets[i+rootindex]+delete_set[i]
for frame_idx,frame in enumerate(dets_all[cls_ind]):
a=range(0,len(frame))
keep=list(set(a).difference(set(delete_sets[frame_idx])))
dets_all[cls_ind][frame_idx]=frame[keep,:]
return dets_all
def findMaxPath(links,dets,delete_single_box):
len_dets=[len(dets[i]) for i in xrange(len(dets))]
max_boxes=np.max(len_dets)
num_frame=len(links)+1
a=np.zeros([num_frame,max_boxes])
new_dets=np.zeros([num_frame,max_boxes])
for delete_box in delete_single_box:
new_dets[delete_box[0],delete_box[1]]=1
if(max_boxes==0):
max_path=[]
return 0,max_path,0
b=np.full((num_frame,max_boxes),-1)
for l in xrange(len(dets)):
for j in xrange(len(dets[l])):
if(new_dets[l,j]==0):
a[l,j]=dets[l][j][-1]
for i in xrange(1,num_frame):
l1=i-1;
for box_id,box in enumerate(links[l1]):
for next_box_id in box:
weight_new=a[i-1,box_id]+dets[i][next_box_id][-1]
if(weight_new>a[i,next_box_id]):
a[i,next_box_id]=weight_new
b[i,next_box_id]=box_id
i,j=np.unravel_index(a.argmax(),a.shape)
maxpath=[j]
maxscore=a[i,j]
while(b[i,j]!=-1):
maxpath.append(b[i,j])
j=b[i,j]
i=i-1
rootindex=i
maxpath.reverse()
return rootindex, maxpath, maxscore
def rescore(dets, rootindex, maxpath, maxsum):
newscore = maxsum / len(maxpath)
for i, box_ind in enumerate(maxpath):
dets[rootindex + i][box_ind][4] = newscore
def deleteLink(dets, links, rootindex, maxpath, thesh):
delete_set=[]
num_delete_links=0
for i, box_ind in enumerate(maxpath):
areas = [(box[2] - box[0] + 1) * (box[3] - box[1] + 1) for box in dets[rootindex + i]]
area1 = areas[box_ind]
box1 = dets[rootindex + i][box_ind]
x1 = np.maximum(box1[0], dets[rootindex + i][:, 0])
y1 = np.maximum(box1[1], dets[rootindex + i][:, 1])
x2 = np.minimum(box1[2], dets[rootindex + i][:, 2])
y2 = np.minimum(box1[3], dets[rootindex + i][:, 3])
w = np.maximum(0.0, x2 - x1 + 1)
h = np.maximum(0.0, y2 - y1 + 1)
inter = w * h
ovrs = inter / (area1 + areas - inter)
#saving the box need to delete
deletes = [ovr_ind for ovr_ind, ovr in enumerate(ovrs) if ovr >= 0.3]
delete_set.append(deletes)
#delete the links except for the last frame
if rootindex + i < len(links):
for delete_ind in deletes:
num_delete_links+=len(links[rootindex+i][delete_ind])
links[rootindex + i][delete_ind] = []
if i > 0 or rootindex > 0:
#delete the links which point to box_ind
for priorbox in links[rootindex + i - 1]:
for delete_ind in deletes:
if delete_ind in priorbox:
priorbox.remove(delete_ind)
num_delete_links+=1
return delete_set,num_delete_links
def seq_nms(dets):
links = createLinks(dets)
dets=maxPath(dets, links)
return dets
| en | 0.683965 | # -------------------------------------------------------- # Flow-Guided Feature Aggregation # Copyright (c) 2017 Microsoft # Licensed under The MIT License [see LICENSE for details] # Modified by<NAME>, <NAME> # -------------------------------------------------------- # Based on: # MX-RCNN # Copyright (c) 2016 by Contributors # Licence under The Apache 2.0 License # https://github.com/ijkguo/mx-rcnn/ # -------------------------------------------------------- # compute the number of links #saving the box need to delete #delete the links except for the last frame #delete the links which point to box_ind | 2.23139 | 2 |
software/workflows/mde/sub_directories/core_sub_directories.py | Searchlight2/Searchlight2 | 17 | 6632070 | <gh_stars>10-100
import os
from misc.new_directory import new_directory
def core_sub_directories(global_variables, out_path):
new_directory(out_path)
new_directory(os.path.join(out_path, "data"))
new_directory(os.path.join(out_path, "data", "gene_IDs"))
new_directory(os.path.join(out_path, "data", "gene_symbols"))
new_directory(os.path.join(out_path, "data", "statistical_analysis"))
new_directory(os.path.join(out_path, "plots"))
| import os
from misc.new_directory import new_directory
def core_sub_directories(global_variables, out_path):
new_directory(out_path)
new_directory(os.path.join(out_path, "data"))
new_directory(os.path.join(out_path, "data", "gene_IDs"))
new_directory(os.path.join(out_path, "data", "gene_symbols"))
new_directory(os.path.join(out_path, "data", "statistical_analysis"))
new_directory(os.path.join(out_path, "plots")) | none | 1 | 2.364636 | 2 |
|
custom_components/config_flow.py | hombrelab/home-assistant-backup-state | 0 | 6632071 | # Copyright (c) 2021 Hombrelab <<EMAIL>>
# Config flow for the Backup State component.
import logging
import voluptuous as vol
from homeassistant.components import mqtt
from homeassistant import config_entries, exceptions
from homeassistant.config_entries import ConfigFlow
from .const import (
DOMAIN,
NAME,
TOPIC,
DEFAULT_NAME,
DEFAULT_TOPIC
)
_LOGGER = logging.getLogger(__name__)
@config_entries.HANDLERS.register(DOMAIN)
class BackupStateConfigFlow(ConfigFlow, domain=DOMAIN):
VERSION = 1
CONNECTION_CLASS = config_entries.CONN_CLASS_UNKNOWN
async def async_step_user(self, user_input=None):
if user_input is None:
return await self._show_setup_form(user_input)
errors = {}
try:
for entry in self._async_current_entries():
if user_input[NAME] == entry.data[NAME]:
raise ValidationError
except ValidationError:
errors["base"] = "name_error"
return await self._show_setup_form(errors)
try:
await is_valid(user_input)
except ValidationError:
errors["base"] = "variables_error"
return await self._show_setup_form(errors)
data = {
NAME: user_input[NAME],
TOPIC: user_input[TOPIC],
}
return self.async_create_entry(
title=user_input[NAME],
data=data,
)
async def _show_setup_form(self, errors=None):
return self.async_show_form(
step_id="user",
data_schema=vol.Schema(
{
vol.Required(NAME, default=DEFAULT_NAME): str,
vol.Required(TOPIC, default=DEFAULT_TOPIC): str,
}
),
errors=errors or {},
)
async def is_valid(user_input):
if not user_input[NAME].strip():
user_input[NAME] = DEFAULT_NAME
if not user_input[TOPIC].strip():
raise ValidationError
class ValidationError(exceptions.HomeAssistantError):
"""Error to indicate that data is not valid"""
| # Copyright (c) 2021 Hombrelab <<EMAIL>>
# Config flow for the Backup State component.
import logging
import voluptuous as vol
from homeassistant.components import mqtt
from homeassistant import config_entries, exceptions
from homeassistant.config_entries import ConfigFlow
from .const import (
DOMAIN,
NAME,
TOPIC,
DEFAULT_NAME,
DEFAULT_TOPIC
)
_LOGGER = logging.getLogger(__name__)
@config_entries.HANDLERS.register(DOMAIN)
class BackupStateConfigFlow(ConfigFlow, domain=DOMAIN):
VERSION = 1
CONNECTION_CLASS = config_entries.CONN_CLASS_UNKNOWN
async def async_step_user(self, user_input=None):
if user_input is None:
return await self._show_setup_form(user_input)
errors = {}
try:
for entry in self._async_current_entries():
if user_input[NAME] == entry.data[NAME]:
raise ValidationError
except ValidationError:
errors["base"] = "name_error"
return await self._show_setup_form(errors)
try:
await is_valid(user_input)
except ValidationError:
errors["base"] = "variables_error"
return await self._show_setup_form(errors)
data = {
NAME: user_input[NAME],
TOPIC: user_input[TOPIC],
}
return self.async_create_entry(
title=user_input[NAME],
data=data,
)
async def _show_setup_form(self, errors=None):
return self.async_show_form(
step_id="user",
data_schema=vol.Schema(
{
vol.Required(NAME, default=DEFAULT_NAME): str,
vol.Required(TOPIC, default=DEFAULT_TOPIC): str,
}
),
errors=errors or {},
)
async def is_valid(user_input):
if not user_input[NAME].strip():
user_input[NAME] = DEFAULT_NAME
if not user_input[TOPIC].strip():
raise ValidationError
class ValidationError(exceptions.HomeAssistantError):
"""Error to indicate that data is not valid"""
| en | 0.685772 | # Copyright (c) 2021 Hombrelab <<EMAIL>> # Config flow for the Backup State component. Error to indicate that data is not valid | 2.230178 | 2 |
tests/celery_app.py | khamenman/django-logic-celery | 6 | 6632072 | <filename>tests/celery_app.py
from celery import Celery
app = Celery('tests')
app.config_from_object('django.conf:settings', namespace='CELERY')
app.autodiscover_tasks()
| <filename>tests/celery_app.py
from celery import Celery
app = Celery('tests')
app.config_from_object('django.conf:settings', namespace='CELERY')
app.autodiscover_tasks()
| none | 1 | 1.512343 | 2 |
|
env/lib/python3.8/site-packages/anymail/backends/postmark.py | avdhari/enigma | 0 | 6632073 | import re
from ..exceptions import AnymailRequestsAPIError
from ..message import AnymailRecipientStatus
from ..utils import get_anymail_setting, parse_address_list
from .base_requests import AnymailRequestsBackend, RequestsPayload
class EmailBackend(AnymailRequestsBackend):
"""
Postmark API Email Backend
"""
esp_name = "Postmark"
def __init__(self, **kwargs):
"""Init options from Django settings"""
esp_name = self.esp_name
self.server_token = get_anymail_setting('server_token', esp_name=esp_name, kwargs=kwargs, allow_bare=True)
api_url = get_anymail_setting('api_url', esp_name=esp_name, kwargs=kwargs,
default="https://api.postmarkapp.com/")
if not api_url.endswith("/"):
api_url += "/"
super(EmailBackend, self).__init__(api_url, **kwargs)
def build_message_payload(self, message, defaults):
return PostmarkPayload(message, defaults, self)
def raise_for_status(self, response, payload, message):
# We need to handle 422 responses in parse_recipient_status
if response.status_code != 422:
super(EmailBackend, self).raise_for_status(response, payload, message)
def parse_recipient_status(self, response, payload, message):
# default to "unknown" status for each recipient, unless/until we find otherwise
unknown_status = AnymailRecipientStatus(message_id=None, status='unknown')
recipient_status = {to.addr_spec: unknown_status for to in payload.to_emails}
parsed_response = self.deserialize_json_response(response, payload, message)
if not isinstance(parsed_response, list):
# non-batch calls return a single response object
parsed_response = [parsed_response]
for one_response in parsed_response:
try:
# these fields should always be present
error_code = one_response["ErrorCode"]
msg = one_response["Message"]
except (KeyError, TypeError):
raise AnymailRequestsAPIError("Invalid Postmark API response format",
email_message=message, payload=payload, response=response,
backend=self)
if error_code == 0:
# At least partial success, and (some) email was sent.
try:
to_header = one_response["To"]
message_id = one_response["MessageID"]
except KeyError:
raise AnymailRequestsAPIError("Invalid Postmark API success response format",
email_message=message, payload=payload,
response=response, backend=self)
for to in parse_address_list(to_header):
recipient_status[to.addr_spec.lower()] = AnymailRecipientStatus(
message_id=message_id, status='sent')
# Sadly, have to parse human-readable message to figure out if everyone got it:
# "Message OK, but will not deliver to these inactive addresses: {addr_spec, ...}.
# Inactive recipients are ones that have generated a hard bounce or a spam complaint."
reject_addr_specs = self._addr_specs_from_error_msg(
msg, r'inactive addresses:\s*(.*)\.\s*Inactive recipients')
for reject_addr_spec in reject_addr_specs:
recipient_status[reject_addr_spec] = AnymailRecipientStatus(
message_id=None, status='rejected')
elif error_code == 300: # Invalid email request
# Either the From address or at least one recipient was invalid. Email not sent.
# response["To"] is not populated for this error; must examine response["Message"]:
# "Invalid 'To' address: '{addr_spec}'."
# "Error parsing 'To': Illegal email domain '{domain}' in address '{addr_spec}'."
# "Error parsing 'To': Illegal email address '{addr_spec}'. It must contain the '@' symbol."
# "Invalid 'From' address: '{email_address}'."
if "'From' address" in msg:
# Normal error
raise AnymailRequestsAPIError(email_message=message, payload=payload, response=response,
backend=self)
else:
# Use AnymailRecipientsRefused logic
invalid_addr_specs = self._addr_specs_from_error_msg(msg, r"address:?\s*'(.*)'")
for invalid_addr_spec in invalid_addr_specs:
recipient_status[invalid_addr_spec] = AnymailRecipientStatus(
message_id=None, status='invalid')
elif error_code == 406: # Inactive recipient
# All recipients were rejected as hard-bounce or spam-complaint. Email not sent.
# response["To"] is not populated for this error; must examine response["Message"]:
# "You tried to send to a recipient that has been marked as inactive.\n
# Found inactive addresses: {addr_spec, ...}.\n
# Inactive recipients are ones that have generated a hard bounce or a spam complaint. "
reject_addr_specs = self._addr_specs_from_error_msg(
msg, r'inactive addresses:\s*(.*)\.\s*Inactive recipients')
for reject_addr_spec in reject_addr_specs:
recipient_status[reject_addr_spec] = AnymailRecipientStatus(
message_id=None, status='rejected')
else: # Other error
raise AnymailRequestsAPIError(email_message=message, payload=payload, response=response,
backend=self)
return recipient_status
@staticmethod
def _addr_specs_from_error_msg(error_msg, pattern):
"""Extract a list of email addr_specs from Postmark error_msg.
pattern must be a re whose first group matches a comma-separated
list of addr_specs in the message
"""
match = re.search(pattern, error_msg, re.MULTILINE)
if match:
emails = match.group(1) # "<EMAIL>, <EMAIL>"
return [email.strip().lower() for email in emails.split(',')]
else:
return []
class PostmarkPayload(RequestsPayload):
def __init__(self, message, defaults, backend, *args, **kwargs):
headers = {
'Content-Type': 'application/json',
'Accept': 'application/json',
# 'X-Postmark-Server-Token': see get_request_params (and set_esp_extra)
}
self.server_token = backend.server_token # added to headers later, so esp_extra can override
self.to_emails = []
self.merge_data = None
super(PostmarkPayload, self).__init__(message, defaults, backend, headers=headers, *args, **kwargs)
def get_api_endpoint(self):
batch_send = self.merge_data is not None and len(self.to_emails) > 1
if 'TemplateAlias' in self.data or 'TemplateId' in self.data or 'TemplateModel' in self.data:
if batch_send:
return "email/batchWithTemplates"
else:
# This is the one Postmark API documented to have a trailing slash. (Typo?)
return "email/withTemplate/"
else:
if batch_send:
return "email/batch"
else:
return "email"
def get_request_params(self, api_url):
params = super(PostmarkPayload, self).get_request_params(api_url)
params['headers']['X-Postmark-Server-Token'] = self.server_token
return params
def serialize_data(self):
data = self.data
api_endpoint = self.get_api_endpoint()
if api_endpoint == "email/batchWithTemplates":
data = {"Messages": [self.data_for_recipient(to) for to in self.to_emails]}
elif api_endpoint == "email/batch":
data = [self.data_for_recipient(to) for to in self.to_emails]
return self.serialize_json(data)
def data_for_recipient(self, to):
data = self.data.copy()
data["To"] = to.address
if self.merge_data and to.addr_spec in self.merge_data:
recipient_data = self.merge_data[to.addr_spec]
if "TemplateModel" in data:
# merge recipient_data into merge_global_data
data["TemplateModel"] = data["TemplateModel"].copy()
data["TemplateModel"].update(recipient_data)
else:
data["TemplateModel"] = recipient_data
return data
#
# Payload construction
#
def init_payload(self):
self.data = {} # becomes json
def set_from_email_list(self, emails):
# Postmark accepts multiple From email addresses
# (though truncates to just the first, on their end, as of 4/2017)
self.data["From"] = ", ".join([email.address for email in emails])
def set_recipients(self, recipient_type, emails):
assert recipient_type in ["to", "cc", "bcc"]
if emails:
field = recipient_type.capitalize()
self.data[field] = ', '.join([email.address for email in emails])
if recipient_type == "to":
self.to_emails = emails
def set_subject(self, subject):
self.data["Subject"] = subject
def set_reply_to(self, emails):
if emails:
reply_to = ", ".join([email.address for email in emails])
self.data["ReplyTo"] = reply_to
def set_extra_headers(self, headers):
self.data["Headers"] = [
{"Name": key, "Value": value}
for key, value in headers.items()
]
def set_text_body(self, body):
self.data["TextBody"] = body
def set_html_body(self, body):
if "HtmlBody" in self.data:
# second html body could show up through multiple alternatives, or html body + alternative
self.unsupported_feature("multiple html parts")
self.data["HtmlBody"] = body
def make_attachment(self, attachment):
"""Returns Postmark attachment dict for attachment"""
att = {
"Name": attachment.name or "",
"Content": attachment.b64content,
"ContentType": attachment.mimetype,
}
if attachment.inline:
att["ContentID"] = "cid:%s" % attachment.cid
return att
def set_attachments(self, attachments):
if attachments:
self.data["Attachments"] = [
self.make_attachment(attachment) for attachment in attachments
]
def set_metadata(self, metadata):
self.data["Metadata"] = metadata
# Postmark doesn't support delayed sending
# def set_send_at(self, send_at):
def set_tags(self, tags):
if len(tags) > 0:
self.data["Tag"] = tags[0]
if len(tags) > 1:
self.unsupported_feature('multiple tags (%r)' % tags)
def set_track_clicks(self, track_clicks):
self.data["TrackLinks"] = 'HtmlAndText' if track_clicks else 'None'
def set_track_opens(self, track_opens):
self.data["TrackOpens"] = track_opens
def set_template_id(self, template_id):
try:
self.data["TemplateId"] = int(template_id)
except ValueError:
self.data["TemplateAlias"] = template_id
# Subject, TextBody, and HtmlBody aren't allowed with TemplateId;
# delete Django default subject and body empty strings:
for field in ("Subject", "TextBody", "HtmlBody"):
if field in self.data and not self.data[field]:
del self.data[field]
def set_merge_data(self, merge_data):
# late-bind
self.merge_data = merge_data
def set_merge_global_data(self, merge_global_data):
self.data["TemplateModel"] = merge_global_data
def set_esp_extra(self, extra):
self.data.update(extra)
# Special handling for 'server_token':
self.server_token = self.data.pop('server_token', self.server_token)
| import re
from ..exceptions import AnymailRequestsAPIError
from ..message import AnymailRecipientStatus
from ..utils import get_anymail_setting, parse_address_list
from .base_requests import AnymailRequestsBackend, RequestsPayload
class EmailBackend(AnymailRequestsBackend):
"""
Postmark API Email Backend
"""
esp_name = "Postmark"
def __init__(self, **kwargs):
"""Init options from Django settings"""
esp_name = self.esp_name
self.server_token = get_anymail_setting('server_token', esp_name=esp_name, kwargs=kwargs, allow_bare=True)
api_url = get_anymail_setting('api_url', esp_name=esp_name, kwargs=kwargs,
default="https://api.postmarkapp.com/")
if not api_url.endswith("/"):
api_url += "/"
super(EmailBackend, self).__init__(api_url, **kwargs)
def build_message_payload(self, message, defaults):
return PostmarkPayload(message, defaults, self)
def raise_for_status(self, response, payload, message):
# We need to handle 422 responses in parse_recipient_status
if response.status_code != 422:
super(EmailBackend, self).raise_for_status(response, payload, message)
def parse_recipient_status(self, response, payload, message):
# default to "unknown" status for each recipient, unless/until we find otherwise
unknown_status = AnymailRecipientStatus(message_id=None, status='unknown')
recipient_status = {to.addr_spec: unknown_status for to in payload.to_emails}
parsed_response = self.deserialize_json_response(response, payload, message)
if not isinstance(parsed_response, list):
# non-batch calls return a single response object
parsed_response = [parsed_response]
for one_response in parsed_response:
try:
# these fields should always be present
error_code = one_response["ErrorCode"]
msg = one_response["Message"]
except (KeyError, TypeError):
raise AnymailRequestsAPIError("Invalid Postmark API response format",
email_message=message, payload=payload, response=response,
backend=self)
if error_code == 0:
# At least partial success, and (some) email was sent.
try:
to_header = one_response["To"]
message_id = one_response["MessageID"]
except KeyError:
raise AnymailRequestsAPIError("Invalid Postmark API success response format",
email_message=message, payload=payload,
response=response, backend=self)
for to in parse_address_list(to_header):
recipient_status[to.addr_spec.lower()] = AnymailRecipientStatus(
message_id=message_id, status='sent')
# Sadly, have to parse human-readable message to figure out if everyone got it:
# "Message OK, but will not deliver to these inactive addresses: {addr_spec, ...}.
# Inactive recipients are ones that have generated a hard bounce or a spam complaint."
reject_addr_specs = self._addr_specs_from_error_msg(
msg, r'inactive addresses:\s*(.*)\.\s*Inactive recipients')
for reject_addr_spec in reject_addr_specs:
recipient_status[reject_addr_spec] = AnymailRecipientStatus(
message_id=None, status='rejected')
elif error_code == 300: # Invalid email request
# Either the From address or at least one recipient was invalid. Email not sent.
# response["To"] is not populated for this error; must examine response["Message"]:
# "Invalid 'To' address: '{addr_spec}'."
# "Error parsing 'To': Illegal email domain '{domain}' in address '{addr_spec}'."
# "Error parsing 'To': Illegal email address '{addr_spec}'. It must contain the '@' symbol."
# "Invalid 'From' address: '{email_address}'."
if "'From' address" in msg:
# Normal error
raise AnymailRequestsAPIError(email_message=message, payload=payload, response=response,
backend=self)
else:
# Use AnymailRecipientsRefused logic
invalid_addr_specs = self._addr_specs_from_error_msg(msg, r"address:?\s*'(.*)'")
for invalid_addr_spec in invalid_addr_specs:
recipient_status[invalid_addr_spec] = AnymailRecipientStatus(
message_id=None, status='invalid')
elif error_code == 406: # Inactive recipient
# All recipients were rejected as hard-bounce or spam-complaint. Email not sent.
# response["To"] is not populated for this error; must examine response["Message"]:
# "You tried to send to a recipient that has been marked as inactive.\n
# Found inactive addresses: {addr_spec, ...}.\n
# Inactive recipients are ones that have generated a hard bounce or a spam complaint. "
reject_addr_specs = self._addr_specs_from_error_msg(
msg, r'inactive addresses:\s*(.*)\.\s*Inactive recipients')
for reject_addr_spec in reject_addr_specs:
recipient_status[reject_addr_spec] = AnymailRecipientStatus(
message_id=None, status='rejected')
else: # Other error
raise AnymailRequestsAPIError(email_message=message, payload=payload, response=response,
backend=self)
return recipient_status
@staticmethod
def _addr_specs_from_error_msg(error_msg, pattern):
"""Extract a list of email addr_specs from Postmark error_msg.
pattern must be a re whose first group matches a comma-separated
list of addr_specs in the message
"""
match = re.search(pattern, error_msg, re.MULTILINE)
if match:
emails = match.group(1) # "<EMAIL>, <EMAIL>"
return [email.strip().lower() for email in emails.split(',')]
else:
return []
class PostmarkPayload(RequestsPayload):
def __init__(self, message, defaults, backend, *args, **kwargs):
headers = {
'Content-Type': 'application/json',
'Accept': 'application/json',
# 'X-Postmark-Server-Token': see get_request_params (and set_esp_extra)
}
self.server_token = backend.server_token # added to headers later, so esp_extra can override
self.to_emails = []
self.merge_data = None
super(PostmarkPayload, self).__init__(message, defaults, backend, headers=headers, *args, **kwargs)
def get_api_endpoint(self):
batch_send = self.merge_data is not None and len(self.to_emails) > 1
if 'TemplateAlias' in self.data or 'TemplateId' in self.data or 'TemplateModel' in self.data:
if batch_send:
return "email/batchWithTemplates"
else:
# This is the one Postmark API documented to have a trailing slash. (Typo?)
return "email/withTemplate/"
else:
if batch_send:
return "email/batch"
else:
return "email"
def get_request_params(self, api_url):
params = super(PostmarkPayload, self).get_request_params(api_url)
params['headers']['X-Postmark-Server-Token'] = self.server_token
return params
def serialize_data(self):
data = self.data
api_endpoint = self.get_api_endpoint()
if api_endpoint == "email/batchWithTemplates":
data = {"Messages": [self.data_for_recipient(to) for to in self.to_emails]}
elif api_endpoint == "email/batch":
data = [self.data_for_recipient(to) for to in self.to_emails]
return self.serialize_json(data)
def data_for_recipient(self, to):
data = self.data.copy()
data["To"] = to.address
if self.merge_data and to.addr_spec in self.merge_data:
recipient_data = self.merge_data[to.addr_spec]
if "TemplateModel" in data:
# merge recipient_data into merge_global_data
data["TemplateModel"] = data["TemplateModel"].copy()
data["TemplateModel"].update(recipient_data)
else:
data["TemplateModel"] = recipient_data
return data
#
# Payload construction
#
def init_payload(self):
self.data = {} # becomes json
def set_from_email_list(self, emails):
# Postmark accepts multiple From email addresses
# (though truncates to just the first, on their end, as of 4/2017)
self.data["From"] = ", ".join([email.address for email in emails])
def set_recipients(self, recipient_type, emails):
assert recipient_type in ["to", "cc", "bcc"]
if emails:
field = recipient_type.capitalize()
self.data[field] = ', '.join([email.address for email in emails])
if recipient_type == "to":
self.to_emails = emails
def set_subject(self, subject):
self.data["Subject"] = subject
def set_reply_to(self, emails):
if emails:
reply_to = ", ".join([email.address for email in emails])
self.data["ReplyTo"] = reply_to
def set_extra_headers(self, headers):
self.data["Headers"] = [
{"Name": key, "Value": value}
for key, value in headers.items()
]
def set_text_body(self, body):
self.data["TextBody"] = body
def set_html_body(self, body):
if "HtmlBody" in self.data:
# second html body could show up through multiple alternatives, or html body + alternative
self.unsupported_feature("multiple html parts")
self.data["HtmlBody"] = body
def make_attachment(self, attachment):
"""Returns Postmark attachment dict for attachment"""
att = {
"Name": attachment.name or "",
"Content": attachment.b64content,
"ContentType": attachment.mimetype,
}
if attachment.inline:
att["ContentID"] = "cid:%s" % attachment.cid
return att
def set_attachments(self, attachments):
if attachments:
self.data["Attachments"] = [
self.make_attachment(attachment) for attachment in attachments
]
def set_metadata(self, metadata):
self.data["Metadata"] = metadata
# Postmark doesn't support delayed sending
# def set_send_at(self, send_at):
def set_tags(self, tags):
if len(tags) > 0:
self.data["Tag"] = tags[0]
if len(tags) > 1:
self.unsupported_feature('multiple tags (%r)' % tags)
def set_track_clicks(self, track_clicks):
self.data["TrackLinks"] = 'HtmlAndText' if track_clicks else 'None'
def set_track_opens(self, track_opens):
self.data["TrackOpens"] = track_opens
def set_template_id(self, template_id):
try:
self.data["TemplateId"] = int(template_id)
except ValueError:
self.data["TemplateAlias"] = template_id
# Subject, TextBody, and HtmlBody aren't allowed with TemplateId;
# delete Django default subject and body empty strings:
for field in ("Subject", "TextBody", "HtmlBody"):
if field in self.data and not self.data[field]:
del self.data[field]
def set_merge_data(self, merge_data):
# late-bind
self.merge_data = merge_data
def set_merge_global_data(self, merge_global_data):
self.data["TemplateModel"] = merge_global_data
def set_esp_extra(self, extra):
self.data.update(extra)
# Special handling for 'server_token':
self.server_token = self.data.pop('server_token', self.server_token)
| en | 0.869812 | Postmark API Email Backend Init options from Django settings # We need to handle 422 responses in parse_recipient_status # default to "unknown" status for each recipient, unless/until we find otherwise # non-batch calls return a single response object # these fields should always be present # At least partial success, and (some) email was sent. # Sadly, have to parse human-readable message to figure out if everyone got it: # "Message OK, but will not deliver to these inactive addresses: {addr_spec, ...}. # Inactive recipients are ones that have generated a hard bounce or a spam complaint." # Invalid email request # Either the From address or at least one recipient was invalid. Email not sent. # response["To"] is not populated for this error; must examine response["Message"]: # "Invalid 'To' address: '{addr_spec}'." # "Error parsing 'To': Illegal email domain '{domain}' in address '{addr_spec}'." # "Error parsing 'To': Illegal email address '{addr_spec}'. It must contain the '@' symbol." # "Invalid 'From' address: '{email_address}'." # Normal error # Use AnymailRecipientsRefused logic # Inactive recipient # All recipients were rejected as hard-bounce or spam-complaint. Email not sent. # response["To"] is not populated for this error; must examine response["Message"]: # "You tried to send to a recipient that has been marked as inactive.\n # Found inactive addresses: {addr_spec, ...}.\n # Inactive recipients are ones that have generated a hard bounce or a spam complaint. " # Other error Extract a list of email addr_specs from Postmark error_msg. pattern must be a re whose first group matches a comma-separated list of addr_specs in the message # "<EMAIL>, <EMAIL>" # 'X-Postmark-Server-Token': see get_request_params (and set_esp_extra) # added to headers later, so esp_extra can override # This is the one Postmark API documented to have a trailing slash. (Typo?) # merge recipient_data into merge_global_data # # Payload construction # # becomes json # Postmark accepts multiple From email addresses # (though truncates to just the first, on their end, as of 4/2017) # second html body could show up through multiple alternatives, or html body + alternative Returns Postmark attachment dict for attachment # Postmark doesn't support delayed sending # def set_send_at(self, send_at): # Subject, TextBody, and HtmlBody aren't allowed with TemplateId; # delete Django default subject and body empty strings: # late-bind # Special handling for 'server_token': | 2.286587 | 2 |
gitlab2prov/pipelines.py | DLR-SC/gitlab2prov | 13 | 6632074 | from typing import List, Tuple
from prov.model import ProvDocument
from gitlab2prov.api import GitlabClient
from gitlab2prov.models import create_graph
from gitlab2prov.procs import (CommitProcessor, CommitResourceProcessor,
IssueResourceProcessor, MergeRequestResourceProcessor, ReleaseTagProcessor)
from gitlab2prov.procs.meta import CommitModelPackage, ResourceModelPackage
from gitlab2prov.utils.types import (Award, Commit, Diff, Issue, Label, MergeRequest,
Note)
class CommitPipeline:
"""
Pipeline that fetches, processes and models git commits of a project.
"""
@staticmethod
async def fetch(client: GitlabClient) -> Tuple[List[Commit], List[Diff]]:
"""
Retrieve commits and their diffs from the project API wrapper.
"""
async with client as clt:
commits = await clt.commits()
diffs = await clt.commit_diffs()
return commits, diffs
@staticmethod
def process(commits: List[Commit], diffs: List[Diff]) -> List[CommitModelPackage]:
"""
Return list of commit model packages.
"""
packages = CommitProcessor.process(commits, diffs)
return packages
@staticmethod
def create_model(packages: List[CommitModelPackage]) -> ProvDocument:
"""
Return populated PROV graph for resource model.
"""
model = create_graph(packages)
return model
class CommitResourcePipeline:
"""
Pipeline that fetches, processes and models project commits.
"""
@staticmethod
async def fetch(client) -> Tuple[List[Commit], List[List[Note]]]:
"""
Retrieve commits and their notes from the project API wrapped.
"""
async with client as clt:
commits = await clt.commits()
notes = await clt.commit_notes()
return commits, notes
@staticmethod
def process(commits: List[Commit], notes: List[List[Note]]) -> List[ResourceModelPackage]:
"""
Return list of resource model packages.
"""
packages = CommitResourceProcessor.process(commits, notes)
return packages
@staticmethod
def create_model(packages: List[ResourceModelPackage]) -> ProvDocument:
"""
Return populated PROV graph for resource model.
"""
model = create_graph(packages)
return model
class IssueResourcePipeline:
"""
Pipeline that fetches, processes and models project issues.
"""
@staticmethod
async def fetch(client) -> Tuple[List[Issue],
List[List[Note]],
List[List[Label]],
List[List[Award]],
List[List[Award]]]:
"""Retrieve issues, their labels, their awards, their notes and
the awards of all notes from the project API wrapper."""
async with client as clt:
issues = await clt.issues()
labels = await clt.issue_labels()
awards = await clt.issue_awards()
notes = await clt.issue_notes()
note_awards = await clt.issue_note_awards()
return issues, notes, labels, awards, note_awards
@staticmethod
def process(issues: List[Issue],
notes: List[List[Note]],
labels: List[List[Label]],
awards: List[List[Award]],
note_awards: List[List[Award]]) -> List[ResourceModelPackage]:
"""
Return list of resource model packages.
"""
packages = IssueResourceProcessor.process(issues, notes, labels, awards, note_awards)
return packages
@staticmethod
def create_model(packages: List[ResourceModelPackage]) -> ProvDocument:
"""
Return populated PROV graph for resource model.
"""
model = create_graph(packages)
return model
class MergeRequestResourcePipeline:
"""
Pipeline that fetches, processes and models project merge requests.
"""
@staticmethod
async def fetch(client) -> Tuple[List[MergeRequest],
List[List[Note]],
List[List[Label]],
List[List[Award]],
List[List[Award]]]:
"""
Retrieve merge requests, their labels, their awards, their
notes and all awards for each note from the project API wrapper.
"""
async with client as clt:
merge_requests = await clt.merge_requests()
labels = await clt.merge_request_labels()
awards = await clt.merge_request_awards()
notes = await clt.merge_request_notes()
note_awards = await clt.merge_request_note_awards()
return merge_requests, notes, labels, awards, note_awards
@staticmethod
def process(merge_requests: List[MergeRequest],
notes: List[List[Note]],
labels: List[List[Label]],
awards: List[List[Award]],
note_awards: List[List[Award]]) -> List[ResourceModelPackage]:
"""
Return list of resource model packages.
"""
packages = MergeRequestResourceProcessor.process(merge_requests, notes, labels, awards, note_awards)
return packages
@staticmethod
def create_model(packages: List[ResourceModelPackage]) -> ProvDocument:
"""
Return populated PROV graph for resource model.
"""
model = create_graph(packages)
return model
class ReleaseTagPipeline:
@staticmethod
async def fetch(client):
async with client as clt:
releases = await clt.releases()
tags = await clt.tags()
return releases, tags
@staticmethod
def process(releases, tags):
packages = ReleaseTagProcessor.process(releases, tags)
return packages
@staticmethod
def create_model(packages):
model = create_graph(packages)
return model
| from typing import List, Tuple
from prov.model import ProvDocument
from gitlab2prov.api import GitlabClient
from gitlab2prov.models import create_graph
from gitlab2prov.procs import (CommitProcessor, CommitResourceProcessor,
IssueResourceProcessor, MergeRequestResourceProcessor, ReleaseTagProcessor)
from gitlab2prov.procs.meta import CommitModelPackage, ResourceModelPackage
from gitlab2prov.utils.types import (Award, Commit, Diff, Issue, Label, MergeRequest,
Note)
class CommitPipeline:
"""
Pipeline that fetches, processes and models git commits of a project.
"""
@staticmethod
async def fetch(client: GitlabClient) -> Tuple[List[Commit], List[Diff]]:
"""
Retrieve commits and their diffs from the project API wrapper.
"""
async with client as clt:
commits = await clt.commits()
diffs = await clt.commit_diffs()
return commits, diffs
@staticmethod
def process(commits: List[Commit], diffs: List[Diff]) -> List[CommitModelPackage]:
"""
Return list of commit model packages.
"""
packages = CommitProcessor.process(commits, diffs)
return packages
@staticmethod
def create_model(packages: List[CommitModelPackage]) -> ProvDocument:
"""
Return populated PROV graph for resource model.
"""
model = create_graph(packages)
return model
class CommitResourcePipeline:
"""
Pipeline that fetches, processes and models project commits.
"""
@staticmethod
async def fetch(client) -> Tuple[List[Commit], List[List[Note]]]:
"""
Retrieve commits and their notes from the project API wrapped.
"""
async with client as clt:
commits = await clt.commits()
notes = await clt.commit_notes()
return commits, notes
@staticmethod
def process(commits: List[Commit], notes: List[List[Note]]) -> List[ResourceModelPackage]:
"""
Return list of resource model packages.
"""
packages = CommitResourceProcessor.process(commits, notes)
return packages
@staticmethod
def create_model(packages: List[ResourceModelPackage]) -> ProvDocument:
"""
Return populated PROV graph for resource model.
"""
model = create_graph(packages)
return model
class IssueResourcePipeline:
"""
Pipeline that fetches, processes and models project issues.
"""
@staticmethod
async def fetch(client) -> Tuple[List[Issue],
List[List[Note]],
List[List[Label]],
List[List[Award]],
List[List[Award]]]:
"""Retrieve issues, their labels, their awards, their notes and
the awards of all notes from the project API wrapper."""
async with client as clt:
issues = await clt.issues()
labels = await clt.issue_labels()
awards = await clt.issue_awards()
notes = await clt.issue_notes()
note_awards = await clt.issue_note_awards()
return issues, notes, labels, awards, note_awards
@staticmethod
def process(issues: List[Issue],
notes: List[List[Note]],
labels: List[List[Label]],
awards: List[List[Award]],
note_awards: List[List[Award]]) -> List[ResourceModelPackage]:
"""
Return list of resource model packages.
"""
packages = IssueResourceProcessor.process(issues, notes, labels, awards, note_awards)
return packages
@staticmethod
def create_model(packages: List[ResourceModelPackage]) -> ProvDocument:
"""
Return populated PROV graph for resource model.
"""
model = create_graph(packages)
return model
class MergeRequestResourcePipeline:
"""
Pipeline that fetches, processes and models project merge requests.
"""
@staticmethod
async def fetch(client) -> Tuple[List[MergeRequest],
List[List[Note]],
List[List[Label]],
List[List[Award]],
List[List[Award]]]:
"""
Retrieve merge requests, their labels, their awards, their
notes and all awards for each note from the project API wrapper.
"""
async with client as clt:
merge_requests = await clt.merge_requests()
labels = await clt.merge_request_labels()
awards = await clt.merge_request_awards()
notes = await clt.merge_request_notes()
note_awards = await clt.merge_request_note_awards()
return merge_requests, notes, labels, awards, note_awards
@staticmethod
def process(merge_requests: List[MergeRequest],
notes: List[List[Note]],
labels: List[List[Label]],
awards: List[List[Award]],
note_awards: List[List[Award]]) -> List[ResourceModelPackage]:
"""
Return list of resource model packages.
"""
packages = MergeRequestResourceProcessor.process(merge_requests, notes, labels, awards, note_awards)
return packages
@staticmethod
def create_model(packages: List[ResourceModelPackage]) -> ProvDocument:
"""
Return populated PROV graph for resource model.
"""
model = create_graph(packages)
return model
class ReleaseTagPipeline:
@staticmethod
async def fetch(client):
async with client as clt:
releases = await clt.releases()
tags = await clt.tags()
return releases, tags
@staticmethod
def process(releases, tags):
packages = ReleaseTagProcessor.process(releases, tags)
return packages
@staticmethod
def create_model(packages):
model = create_graph(packages)
return model
| en | 0.916788 | Pipeline that fetches, processes and models git commits of a project. Retrieve commits and their diffs from the project API wrapper. Return list of commit model packages. Return populated PROV graph for resource model. Pipeline that fetches, processes and models project commits. Retrieve commits and their notes from the project API wrapped. Return list of resource model packages. Return populated PROV graph for resource model. Pipeline that fetches, processes and models project issues. Retrieve issues, their labels, their awards, their notes and the awards of all notes from the project API wrapper. Return list of resource model packages. Return populated PROV graph for resource model. Pipeline that fetches, processes and models project merge requests. Retrieve merge requests, their labels, their awards, their notes and all awards for each note from the project API wrapper. Return list of resource model packages. Return populated PROV graph for resource model. | 2.46897 | 2 |
setup.py | taintedkernel/powerline-vcsh | 0 | 6632075 | <filename>setup.py
from setuptools import setup
setup(
name = 'powerline-vcsh',
description = 'A Powerline segment for showing the current VCSH repo',
version = '1.0.0',
keywords = 'powerline vcsh prompt',
license = 'MIT',
author = '<NAME>',
author_email = '<EMAIL>',
url = 'https://github.com/taintedkernel/powerline-vcsh',
packages = ['powerline_vcsh'],
install_requires = ['powerline-status'],
classifiers = [
'Environment :: Console',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Topic :: Terminals'
]
)
| <filename>setup.py
from setuptools import setup
setup(
name = 'powerline-vcsh',
description = 'A Powerline segment for showing the current VCSH repo',
version = '1.0.0',
keywords = 'powerline vcsh prompt',
license = 'MIT',
author = '<NAME>',
author_email = '<EMAIL>',
url = 'https://github.com/taintedkernel/powerline-vcsh',
packages = ['powerline_vcsh'],
install_requires = ['powerline-status'],
classifiers = [
'Environment :: Console',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Topic :: Terminals'
]
)
| none | 1 | 1.303054 | 1 |
|
CryostatGUI/util/broker_app_clients.py | Cryostat-GUI/Cryostat-GUI | 2 | 6632076 | <filename>CryostatGUI/util/broker_app_clients.py
#!/usr/bin/python
# -*- coding: UTF-8 -*-
# This broker subscribes to all applications.
# If an application sends a signal, the Broker will publish the signal to all ControlClients subscribed to this broker.
import zmq
def main():
context = zmq.Context()
# Socket facing producers
frontend = context.socket(zmq.XPUB)
frontend.bind("tcp://127.0.0.1:5561")
# Socket facing consumers
backend = context.socket(zmq.XSUB)
backend.bind("tcp://127.0.0.1:5562")
zmq.proxy(frontend, backend)
# We never get here…
frontend.close()
backend.close()
context.term()
if __name__ == "__main__":
main()
| <filename>CryostatGUI/util/broker_app_clients.py
#!/usr/bin/python
# -*- coding: UTF-8 -*-
# This broker subscribes to all applications.
# If an application sends a signal, the Broker will publish the signal to all ControlClients subscribed to this broker.
import zmq
def main():
context = zmq.Context()
# Socket facing producers
frontend = context.socket(zmq.XPUB)
frontend.bind("tcp://127.0.0.1:5561")
# Socket facing consumers
backend = context.socket(zmq.XSUB)
backend.bind("tcp://127.0.0.1:5562")
zmq.proxy(frontend, backend)
# We never get here…
frontend.close()
backend.close()
context.term()
if __name__ == "__main__":
main()
| en | 0.769738 | #!/usr/bin/python # -*- coding: UTF-8 -*- # This broker subscribes to all applications. # If an application sends a signal, the Broker will publish the signal to all ControlClients subscribed to this broker. # Socket facing producers # Socket facing consumers # We never get here… | 2.271631 | 2 |
ginga/qtw/plugins/WBrowser.py | astrofrog/ginga | 1 | 6632077 | <filename>ginga/qtw/plugins/WBrowser.py<gh_stars>1-10
#
# WBrowser.py -- Web Browser plugin for fits viewer
#
# <NAME> (<EMAIL>)
#
# Copyright (c) <NAME>. All rights reserved.
# This is open-source software licensed under a BSD license.
# Please see the file LICENSE.txt for details.
#
import sys, os
from ginga import GingaPlugin
from ginga.qtw.QtHelp import QtGui, QtCore
has_webkit = False
try:
from ginga.qtw.QtHelp import QtWebKit as webkit
has_webkit = True
except ImportError:
pass
moduleHome = os.path.split(sys.modules[__name__].__file__)[0]
class WBrowser(GingaPlugin.GlobalPlugin):
def __init__(self, fv):
# superclass defines some variables for us, like logger
super(WBrowser, self).__init__(fv)
self.browser = None
def build_gui(self, container):
rvbox = container
if not has_webkit:
self.browser = QtGui.QLabel("Please install the python-webkit package to enable this plugin")
else:
self.browser = webkit.QWebView()
sw = QtGui.QScrollArea()
sw.setWidgetResizable(True)
#sw.set_border_width(2)
sw.setWidget(self.browser)
rvbox.addWidget(sw, stretch=1)
sw.show()
self.entry = QtGui.QLineEdit()
rvbox.addWidget(self.entry, stretch=0)
self.entry.returnPressed.connect(self.browse_cb)
if has_webkit:
helpfile = os.path.abspath(os.path.join(moduleHome, "..",
"..", "doc", "manual",
"quickref.html"))
helpurl = "file://%s" % (helpfile)
self.entry.setText(helpurl)
self.browse(helpurl)
def browse(self, url):
self.logger.debug("Browsing '%s'" % (url))
self.browser.load(QtCore.QUrl(url))
self.browser.show()
def browse_cb(self):
url = str(self.entry.text()).strip()
self.browse(url)
def __str__(self):
return 'wbrowser'
#END
| <filename>ginga/qtw/plugins/WBrowser.py<gh_stars>1-10
#
# WBrowser.py -- Web Browser plugin for fits viewer
#
# <NAME> (<EMAIL>)
#
# Copyright (c) <NAME>. All rights reserved.
# This is open-source software licensed under a BSD license.
# Please see the file LICENSE.txt for details.
#
import sys, os
from ginga import GingaPlugin
from ginga.qtw.QtHelp import QtGui, QtCore
has_webkit = False
try:
from ginga.qtw.QtHelp import QtWebKit as webkit
has_webkit = True
except ImportError:
pass
moduleHome = os.path.split(sys.modules[__name__].__file__)[0]
class WBrowser(GingaPlugin.GlobalPlugin):
def __init__(self, fv):
# superclass defines some variables for us, like logger
super(WBrowser, self).__init__(fv)
self.browser = None
def build_gui(self, container):
rvbox = container
if not has_webkit:
self.browser = QtGui.QLabel("Please install the python-webkit package to enable this plugin")
else:
self.browser = webkit.QWebView()
sw = QtGui.QScrollArea()
sw.setWidgetResizable(True)
#sw.set_border_width(2)
sw.setWidget(self.browser)
rvbox.addWidget(sw, stretch=1)
sw.show()
self.entry = QtGui.QLineEdit()
rvbox.addWidget(self.entry, stretch=0)
self.entry.returnPressed.connect(self.browse_cb)
if has_webkit:
helpfile = os.path.abspath(os.path.join(moduleHome, "..",
"..", "doc", "manual",
"quickref.html"))
helpurl = "file://%s" % (helpfile)
self.entry.setText(helpurl)
self.browse(helpurl)
def browse(self, url):
self.logger.debug("Browsing '%s'" % (url))
self.browser.load(QtCore.QUrl(url))
self.browser.show()
def browse_cb(self):
url = str(self.entry.text()).strip()
self.browse(url)
def __str__(self):
return 'wbrowser'
#END
| en | 0.746536 | # # WBrowser.py -- Web Browser plugin for fits viewer # # <NAME> (<EMAIL>) # # Copyright (c) <NAME>. All rights reserved. # This is open-source software licensed under a BSD license. # Please see the file LICENSE.txt for details. # # superclass defines some variables for us, like logger #sw.set_border_width(2) #END | 2.116008 | 2 |
interact/agents/__init__.py | rystrauss/interact | 1 | 6632078 | from .a2c import A2CAgent
from .ddpg import DDPGAgent, TD3Agent
from .dqn import DQNAgent
from .ppg import PPGAgent
from .ppo import PPOAgent
from .sac import SACAgent
| from .a2c import A2CAgent
from .ddpg import DDPGAgent, TD3Agent
from .dqn import DQNAgent
from .ppg import PPGAgent
from .ppo import PPOAgent
from .sac import SACAgent
| none | 1 | 0.989007 | 1 |
|
SimModel_Python_API/simmodel_swig/Release/SimConnection_HotWaterFlow_Default.py | EnEff-BIM/EnEffBIM-Framework | 3 | 6632079 | # This file was automatically generated by SWIG (http://www.swig.org).
# Version 3.0.7
#
# Do not make changes to this file unless you know what you are doing--modify
# the SWIG interface file instead.
from sys import version_info
if version_info >= (2, 6, 0):
def swig_import_helper():
from os.path import dirname
import imp
fp = None
try:
fp, pathname, description = imp.find_module('_SimConnection_HotWaterFlow_Default', [dirname(__file__)])
except ImportError:
import _SimConnection_HotWaterFlow_Default
return _SimConnection_HotWaterFlow_Default
if fp is not None:
try:
_mod = imp.load_module('_SimConnection_HotWaterFlow_Default', fp, pathname, description)
finally:
fp.close()
return _mod
_SimConnection_HotWaterFlow_Default = swig_import_helper()
del swig_import_helper
else:
import _SimConnection_HotWaterFlow_Default
del version_info
try:
_swig_property = property
except NameError:
pass # Python < 2.2 doesn't have 'property'.
def _swig_setattr_nondynamic(self, class_type, name, value, static=1):
if (name == "thisown"):
return self.this.own(value)
if (name == "this"):
if type(value).__name__ == 'SwigPyObject':
self.__dict__[name] = value
return
method = class_type.__swig_setmethods__.get(name, None)
if method:
return method(self, value)
if (not static):
if _newclass:
object.__setattr__(self, name, value)
else:
self.__dict__[name] = value
else:
raise AttributeError("You cannot add attributes to %s" % self)
def _swig_setattr(self, class_type, name, value):
return _swig_setattr_nondynamic(self, class_type, name, value, 0)
def _swig_getattr_nondynamic(self, class_type, name, static=1):
if (name == "thisown"):
return self.this.own()
method = class_type.__swig_getmethods__.get(name, None)
if method:
return method(self)
if (not static):
return object.__getattr__(self, name)
else:
raise AttributeError(name)
def _swig_getattr(self, class_type, name):
return _swig_getattr_nondynamic(self, class_type, name, 0)
def _swig_repr(self):
try:
strthis = "proxy of " + self.this.__repr__()
except:
strthis = ""
return "<%s.%s; %s >" % (self.__class__.__module__, self.__class__.__name__, strthis,)
try:
_object = object
_newclass = 1
except AttributeError:
class _object:
pass
_newclass = 0
try:
import weakref
weakref_proxy = weakref.proxy
except:
weakref_proxy = lambda x: x
import base
class SimConnection(base.SimResourceObject):
__swig_setmethods__ = {}
for _s in [base.SimResourceObject]:
__swig_setmethods__.update(getattr(_s, '__swig_setmethods__', {}))
__setattr__ = lambda self, name, value: _swig_setattr(self, SimConnection, name, value)
__swig_getmethods__ = {}
for _s in [base.SimResourceObject]:
__swig_getmethods__.update(getattr(_s, '__swig_getmethods__', {}))
__getattr__ = lambda self, name: _swig_getattr(self, SimConnection, name)
__repr__ = _swig_repr
def RelatingPort(self, *args):
return _SimConnection_HotWaterFlow_Default.SimConnection_RelatingPort(self, *args)
def RelatedPort(self, *args):
return _SimConnection_HotWaterFlow_Default.SimConnection_RelatedPort(self, *args)
def RealizingElement(self, *args):
return _SimConnection_HotWaterFlow_Default.SimConnection_RealizingElement(self, *args)
def SourcePort(self, *args):
return _SimConnection_HotWaterFlow_Default.SimConnection_SourcePort(self, *args)
def TargetPort(self, *args):
return _SimConnection_HotWaterFlow_Default.SimConnection_TargetPort(self, *args)
def __init__(self, *args):
this = _SimConnection_HotWaterFlow_Default.new_SimConnection(*args)
try:
self.this.append(this)
except:
self.this = this
def _clone(self, f=0, c=None):
return _SimConnection_HotWaterFlow_Default.SimConnection__clone(self, f, c)
__swig_destroy__ = _SimConnection_HotWaterFlow_Default.delete_SimConnection
__del__ = lambda self: None
SimConnection_swigregister = _SimConnection_HotWaterFlow_Default.SimConnection_swigregister
SimConnection_swigregister(SimConnection)
class SimConnection_HotWaterFlow(SimConnection):
__swig_setmethods__ = {}
for _s in [SimConnection]:
__swig_setmethods__.update(getattr(_s, '__swig_setmethods__', {}))
__setattr__ = lambda self, name, value: _swig_setattr(self, SimConnection_HotWaterFlow, name, value)
__swig_getmethods__ = {}
for _s in [SimConnection]:
__swig_getmethods__.update(getattr(_s, '__swig_getmethods__', {}))
__getattr__ = lambda self, name: _swig_getattr(self, SimConnection_HotWaterFlow, name)
__repr__ = _swig_repr
def __init__(self, *args):
this = _SimConnection_HotWaterFlow_Default.new_SimConnection_HotWaterFlow(*args)
try:
self.this.append(this)
except:
self.this = this
def _clone(self, f=0, c=None):
return _SimConnection_HotWaterFlow_Default.SimConnection_HotWaterFlow__clone(self, f, c)
__swig_destroy__ = _SimConnection_HotWaterFlow_Default.delete_SimConnection_HotWaterFlow
__del__ = lambda self: None
SimConnection_HotWaterFlow_swigregister = _SimConnection_HotWaterFlow_Default.SimConnection_HotWaterFlow_swigregister
SimConnection_HotWaterFlow_swigregister(SimConnection_HotWaterFlow)
class SimConnection_HotWaterFlow_Default(SimConnection_HotWaterFlow):
__swig_setmethods__ = {}
for _s in [SimConnection_HotWaterFlow]:
__swig_setmethods__.update(getattr(_s, '__swig_setmethods__', {}))
__setattr__ = lambda self, name, value: _swig_setattr(self, SimConnection_HotWaterFlow_Default, name, value)
__swig_getmethods__ = {}
for _s in [SimConnection_HotWaterFlow]:
__swig_getmethods__.update(getattr(_s, '__swig_getmethods__', {}))
__getattr__ = lambda self, name: _swig_getattr(self, SimConnection_HotWaterFlow_Default, name)
__repr__ = _swig_repr
def __init__(self, *args):
this = _SimConnection_HotWaterFlow_Default.new_SimConnection_HotWaterFlow_Default(*args)
try:
self.this.append(this)
except:
self.this = this
def _clone(self, f=0, c=None):
return _SimConnection_HotWaterFlow_Default.SimConnection_HotWaterFlow_Default__clone(self, f, c)
__swig_destroy__ = _SimConnection_HotWaterFlow_Default.delete_SimConnection_HotWaterFlow_Default
__del__ = lambda self: None
SimConnection_HotWaterFlow_Default_swigregister = _SimConnection_HotWaterFlow_Default.SimConnection_HotWaterFlow_Default_swigregister
SimConnection_HotWaterFlow_Default_swigregister(SimConnection_HotWaterFlow_Default)
class SimConnection_HotWaterFlow_Default_sequence(base.sequence_common):
__swig_setmethods__ = {}
for _s in [base.sequence_common]:
__swig_setmethods__.update(getattr(_s, '__swig_setmethods__', {}))
__setattr__ = lambda self, name, value: _swig_setattr(self, SimConnection_HotWaterFlow_Default_sequence, name, value)
__swig_getmethods__ = {}
for _s in [base.sequence_common]:
__swig_getmethods__.update(getattr(_s, '__swig_getmethods__', {}))
__getattr__ = lambda self, name: _swig_getattr(self, SimConnection_HotWaterFlow_Default_sequence, name)
__repr__ = _swig_repr
def __init__(self, *args):
this = _SimConnection_HotWaterFlow_Default.new_SimConnection_HotWaterFlow_Default_sequence(*args)
try:
self.this.append(this)
except:
self.this = this
def assign(self, n, x):
return _SimConnection_HotWaterFlow_Default.SimConnection_HotWaterFlow_Default_sequence_assign(self, n, x)
def begin(self, *args):
return _SimConnection_HotWaterFlow_Default.SimConnection_HotWaterFlow_Default_sequence_begin(self, *args)
def end(self, *args):
return _SimConnection_HotWaterFlow_Default.SimConnection_HotWaterFlow_Default_sequence_end(self, *args)
def rbegin(self, *args):
return _SimConnection_HotWaterFlow_Default.SimConnection_HotWaterFlow_Default_sequence_rbegin(self, *args)
def rend(self, *args):
return _SimConnection_HotWaterFlow_Default.SimConnection_HotWaterFlow_Default_sequence_rend(self, *args)
def at(self, *args):
return _SimConnection_HotWaterFlow_Default.SimConnection_HotWaterFlow_Default_sequence_at(self, *args)
def front(self, *args):
return _SimConnection_HotWaterFlow_Default.SimConnection_HotWaterFlow_Default_sequence_front(self, *args)
def back(self, *args):
return _SimConnection_HotWaterFlow_Default.SimConnection_HotWaterFlow_Default_sequence_back(self, *args)
def push_back(self, *args):
return _SimConnection_HotWaterFlow_Default.SimConnection_HotWaterFlow_Default_sequence_push_back(self, *args)
def pop_back(self):
return _SimConnection_HotWaterFlow_Default.SimConnection_HotWaterFlow_Default_sequence_pop_back(self)
def detach_back(self, pop=True):
return _SimConnection_HotWaterFlow_Default.SimConnection_HotWaterFlow_Default_sequence_detach_back(self, pop)
def insert(self, *args):
return _SimConnection_HotWaterFlow_Default.SimConnection_HotWaterFlow_Default_sequence_insert(self, *args)
def erase(self, *args):
return _SimConnection_HotWaterFlow_Default.SimConnection_HotWaterFlow_Default_sequence_erase(self, *args)
def detach(self, position, r, erase=True):
return _SimConnection_HotWaterFlow_Default.SimConnection_HotWaterFlow_Default_sequence_detach(self, position, r, erase)
def swap(self, x):
return _SimConnection_HotWaterFlow_Default.SimConnection_HotWaterFlow_Default_sequence_swap(self, x)
__swig_destroy__ = _SimConnection_HotWaterFlow_Default.delete_SimConnection_HotWaterFlow_Default_sequence
__del__ = lambda self: None
SimConnection_HotWaterFlow_Default_sequence_swigregister = _SimConnection_HotWaterFlow_Default.SimConnection_HotWaterFlow_Default_sequence_swigregister
SimConnection_HotWaterFlow_Default_sequence_swigregister(SimConnection_HotWaterFlow_Default_sequence)
# This file is compatible with both classic and new-style classes.
| # This file was automatically generated by SWIG (http://www.swig.org).
# Version 3.0.7
#
# Do not make changes to this file unless you know what you are doing--modify
# the SWIG interface file instead.
from sys import version_info
if version_info >= (2, 6, 0):
def swig_import_helper():
from os.path import dirname
import imp
fp = None
try:
fp, pathname, description = imp.find_module('_SimConnection_HotWaterFlow_Default', [dirname(__file__)])
except ImportError:
import _SimConnection_HotWaterFlow_Default
return _SimConnection_HotWaterFlow_Default
if fp is not None:
try:
_mod = imp.load_module('_SimConnection_HotWaterFlow_Default', fp, pathname, description)
finally:
fp.close()
return _mod
_SimConnection_HotWaterFlow_Default = swig_import_helper()
del swig_import_helper
else:
import _SimConnection_HotWaterFlow_Default
del version_info
try:
_swig_property = property
except NameError:
pass # Python < 2.2 doesn't have 'property'.
def _swig_setattr_nondynamic(self, class_type, name, value, static=1):
if (name == "thisown"):
return self.this.own(value)
if (name == "this"):
if type(value).__name__ == 'SwigPyObject':
self.__dict__[name] = value
return
method = class_type.__swig_setmethods__.get(name, None)
if method:
return method(self, value)
if (not static):
if _newclass:
object.__setattr__(self, name, value)
else:
self.__dict__[name] = value
else:
raise AttributeError("You cannot add attributes to %s" % self)
def _swig_setattr(self, class_type, name, value):
return _swig_setattr_nondynamic(self, class_type, name, value, 0)
def _swig_getattr_nondynamic(self, class_type, name, static=1):
if (name == "thisown"):
return self.this.own()
method = class_type.__swig_getmethods__.get(name, None)
if method:
return method(self)
if (not static):
return object.__getattr__(self, name)
else:
raise AttributeError(name)
def _swig_getattr(self, class_type, name):
return _swig_getattr_nondynamic(self, class_type, name, 0)
def _swig_repr(self):
try:
strthis = "proxy of " + self.this.__repr__()
except:
strthis = ""
return "<%s.%s; %s >" % (self.__class__.__module__, self.__class__.__name__, strthis,)
try:
_object = object
_newclass = 1
except AttributeError:
class _object:
pass
_newclass = 0
try:
import weakref
weakref_proxy = weakref.proxy
except:
weakref_proxy = lambda x: x
import base
class SimConnection(base.SimResourceObject):
__swig_setmethods__ = {}
for _s in [base.SimResourceObject]:
__swig_setmethods__.update(getattr(_s, '__swig_setmethods__', {}))
__setattr__ = lambda self, name, value: _swig_setattr(self, SimConnection, name, value)
__swig_getmethods__ = {}
for _s in [base.SimResourceObject]:
__swig_getmethods__.update(getattr(_s, '__swig_getmethods__', {}))
__getattr__ = lambda self, name: _swig_getattr(self, SimConnection, name)
__repr__ = _swig_repr
def RelatingPort(self, *args):
return _SimConnection_HotWaterFlow_Default.SimConnection_RelatingPort(self, *args)
def RelatedPort(self, *args):
return _SimConnection_HotWaterFlow_Default.SimConnection_RelatedPort(self, *args)
def RealizingElement(self, *args):
return _SimConnection_HotWaterFlow_Default.SimConnection_RealizingElement(self, *args)
def SourcePort(self, *args):
return _SimConnection_HotWaterFlow_Default.SimConnection_SourcePort(self, *args)
def TargetPort(self, *args):
return _SimConnection_HotWaterFlow_Default.SimConnection_TargetPort(self, *args)
def __init__(self, *args):
this = _SimConnection_HotWaterFlow_Default.new_SimConnection(*args)
try:
self.this.append(this)
except:
self.this = this
def _clone(self, f=0, c=None):
return _SimConnection_HotWaterFlow_Default.SimConnection__clone(self, f, c)
__swig_destroy__ = _SimConnection_HotWaterFlow_Default.delete_SimConnection
__del__ = lambda self: None
SimConnection_swigregister = _SimConnection_HotWaterFlow_Default.SimConnection_swigregister
SimConnection_swigregister(SimConnection)
class SimConnection_HotWaterFlow(SimConnection):
__swig_setmethods__ = {}
for _s in [SimConnection]:
__swig_setmethods__.update(getattr(_s, '__swig_setmethods__', {}))
__setattr__ = lambda self, name, value: _swig_setattr(self, SimConnection_HotWaterFlow, name, value)
__swig_getmethods__ = {}
for _s in [SimConnection]:
__swig_getmethods__.update(getattr(_s, '__swig_getmethods__', {}))
__getattr__ = lambda self, name: _swig_getattr(self, SimConnection_HotWaterFlow, name)
__repr__ = _swig_repr
def __init__(self, *args):
this = _SimConnection_HotWaterFlow_Default.new_SimConnection_HotWaterFlow(*args)
try:
self.this.append(this)
except:
self.this = this
def _clone(self, f=0, c=None):
return _SimConnection_HotWaterFlow_Default.SimConnection_HotWaterFlow__clone(self, f, c)
__swig_destroy__ = _SimConnection_HotWaterFlow_Default.delete_SimConnection_HotWaterFlow
__del__ = lambda self: None
SimConnection_HotWaterFlow_swigregister = _SimConnection_HotWaterFlow_Default.SimConnection_HotWaterFlow_swigregister
SimConnection_HotWaterFlow_swigregister(SimConnection_HotWaterFlow)
class SimConnection_HotWaterFlow_Default(SimConnection_HotWaterFlow):
__swig_setmethods__ = {}
for _s in [SimConnection_HotWaterFlow]:
__swig_setmethods__.update(getattr(_s, '__swig_setmethods__', {}))
__setattr__ = lambda self, name, value: _swig_setattr(self, SimConnection_HotWaterFlow_Default, name, value)
__swig_getmethods__ = {}
for _s in [SimConnection_HotWaterFlow]:
__swig_getmethods__.update(getattr(_s, '__swig_getmethods__', {}))
__getattr__ = lambda self, name: _swig_getattr(self, SimConnection_HotWaterFlow_Default, name)
__repr__ = _swig_repr
def __init__(self, *args):
this = _SimConnection_HotWaterFlow_Default.new_SimConnection_HotWaterFlow_Default(*args)
try:
self.this.append(this)
except:
self.this = this
def _clone(self, f=0, c=None):
return _SimConnection_HotWaterFlow_Default.SimConnection_HotWaterFlow_Default__clone(self, f, c)
__swig_destroy__ = _SimConnection_HotWaterFlow_Default.delete_SimConnection_HotWaterFlow_Default
__del__ = lambda self: None
SimConnection_HotWaterFlow_Default_swigregister = _SimConnection_HotWaterFlow_Default.SimConnection_HotWaterFlow_Default_swigregister
SimConnection_HotWaterFlow_Default_swigregister(SimConnection_HotWaterFlow_Default)
class SimConnection_HotWaterFlow_Default_sequence(base.sequence_common):
__swig_setmethods__ = {}
for _s in [base.sequence_common]:
__swig_setmethods__.update(getattr(_s, '__swig_setmethods__', {}))
__setattr__ = lambda self, name, value: _swig_setattr(self, SimConnection_HotWaterFlow_Default_sequence, name, value)
__swig_getmethods__ = {}
for _s in [base.sequence_common]:
__swig_getmethods__.update(getattr(_s, '__swig_getmethods__', {}))
__getattr__ = lambda self, name: _swig_getattr(self, SimConnection_HotWaterFlow_Default_sequence, name)
__repr__ = _swig_repr
def __init__(self, *args):
this = _SimConnection_HotWaterFlow_Default.new_SimConnection_HotWaterFlow_Default_sequence(*args)
try:
self.this.append(this)
except:
self.this = this
def assign(self, n, x):
return _SimConnection_HotWaterFlow_Default.SimConnection_HotWaterFlow_Default_sequence_assign(self, n, x)
def begin(self, *args):
return _SimConnection_HotWaterFlow_Default.SimConnection_HotWaterFlow_Default_sequence_begin(self, *args)
def end(self, *args):
return _SimConnection_HotWaterFlow_Default.SimConnection_HotWaterFlow_Default_sequence_end(self, *args)
def rbegin(self, *args):
return _SimConnection_HotWaterFlow_Default.SimConnection_HotWaterFlow_Default_sequence_rbegin(self, *args)
def rend(self, *args):
return _SimConnection_HotWaterFlow_Default.SimConnection_HotWaterFlow_Default_sequence_rend(self, *args)
def at(self, *args):
return _SimConnection_HotWaterFlow_Default.SimConnection_HotWaterFlow_Default_sequence_at(self, *args)
def front(self, *args):
return _SimConnection_HotWaterFlow_Default.SimConnection_HotWaterFlow_Default_sequence_front(self, *args)
def back(self, *args):
return _SimConnection_HotWaterFlow_Default.SimConnection_HotWaterFlow_Default_sequence_back(self, *args)
def push_back(self, *args):
return _SimConnection_HotWaterFlow_Default.SimConnection_HotWaterFlow_Default_sequence_push_back(self, *args)
def pop_back(self):
return _SimConnection_HotWaterFlow_Default.SimConnection_HotWaterFlow_Default_sequence_pop_back(self)
def detach_back(self, pop=True):
return _SimConnection_HotWaterFlow_Default.SimConnection_HotWaterFlow_Default_sequence_detach_back(self, pop)
def insert(self, *args):
return _SimConnection_HotWaterFlow_Default.SimConnection_HotWaterFlow_Default_sequence_insert(self, *args)
def erase(self, *args):
return _SimConnection_HotWaterFlow_Default.SimConnection_HotWaterFlow_Default_sequence_erase(self, *args)
def detach(self, position, r, erase=True):
return _SimConnection_HotWaterFlow_Default.SimConnection_HotWaterFlow_Default_sequence_detach(self, position, r, erase)
def swap(self, x):
return _SimConnection_HotWaterFlow_Default.SimConnection_HotWaterFlow_Default_sequence_swap(self, x)
__swig_destroy__ = _SimConnection_HotWaterFlow_Default.delete_SimConnection_HotWaterFlow_Default_sequence
__del__ = lambda self: None
SimConnection_HotWaterFlow_Default_sequence_swigregister = _SimConnection_HotWaterFlow_Default.SimConnection_HotWaterFlow_Default_sequence_swigregister
SimConnection_HotWaterFlow_Default_sequence_swigregister(SimConnection_HotWaterFlow_Default_sequence)
# This file is compatible with both classic and new-style classes.
| en | 0.890954 | # This file was automatically generated by SWIG (http://www.swig.org). # Version 3.0.7 # # Do not make changes to this file unless you know what you are doing--modify # the SWIG interface file instead. # Python < 2.2 doesn't have 'property'. # This file is compatible with both classic and new-style classes. | 2.041446 | 2 |
02/02-1.py | pak21/aoc2017 | 0 | 6632080 | #!/usr/bin/python3
import sys
with open(sys.argv[1]) as input_file:
total = 0
for line in input_file:
number_strings = line.rstrip().split()
numbers = [int(s) for s in number_strings]
min_value = min(numbers)
max_value = max(numbers)
difference = max_value - min_value
total = total + difference
print(total)
| #!/usr/bin/python3
import sys
with open(sys.argv[1]) as input_file:
total = 0
for line in input_file:
number_strings = line.rstrip().split()
numbers = [int(s) for s in number_strings]
min_value = min(numbers)
max_value = max(numbers)
difference = max_value - min_value
total = total + difference
print(total)
| fr | 0.386793 | #!/usr/bin/python3 | 3.539333 | 4 |
mergify_engine/constants.py | truthiswill/mergify-engine | 266 | 6632081 | <gh_stars>100-1000
# -*- encoding: utf-8 -*-
#
# Copyright © 2020 Mergify SAS
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
SUMMARY_NAME = "Summary"
MERGE_QUEUE_BRANCH_PREFIX = "mergify/merge-queue"
MERGE_QUEUE_SUMMARY_NAME = "Queue: Embarked in merge train"
CHECKS_TIMEOUT_CONDITION_LABEL = "checks-are-on-time"
MERGIFY_OPENSOURCE_SPONSOR_DOC = (
"<hr />\n"
":sparkling_heart: Mergify is proud to provide this service "
"for free to open source projects.\n\n"
":rocket: You can help us by [becoming a sponsor](/sponsors/Mergifyio)!\n"
)
MERGIFY_MERGE_QUEUE_PULL_REQUEST_DOC = """
More informations about Mergify merge queue can be found in the [documentation](https://docs.mergify.com/actions/queue.html).
<details>
<summary>Mergify commands</summary>
<br />
You can also trigger Mergify actions by commenting on this pull request:
- `@Mergifyio refresh` will re-evaluate the queue rules
Additionally, on Mergify [dashboard](https://dashboard.mergify.com) you can:
- look at your merge queues
- generate the Mergify configuration with the config editor.
Finally, you can contact us on https://mergify.com
</details>
"""
MERGIFY_PULL_REQUEST_DOC = """
<details>
<summary>Mergify commands and options</summary>
<br />
More conditions and actions can be found in the [documentation](https://docs.mergify.com/).
You can also trigger Mergify actions by commenting on this pull request:
- `@Mergifyio refresh` will re-evaluate the rules
- `@Mergifyio rebase` will rebase this PR on its base branch
- `@Mergifyio update` will merge the base branch into this PR
- `@Mergifyio backport <destination>` will backport this PR on `<destination>` branch
Additionally, on Mergify [dashboard](https://dashboard.mergify.com/) you can:
- look at your merge queues
- generate the Mergify configuration with the config editor.
Finally, you can contact us on https://mergify.com
</details>
"""
| # -*- encoding: utf-8 -*-
#
# Copyright © 2020 Mergify SAS
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
SUMMARY_NAME = "Summary"
MERGE_QUEUE_BRANCH_PREFIX = "mergify/merge-queue"
MERGE_QUEUE_SUMMARY_NAME = "Queue: Embarked in merge train"
CHECKS_TIMEOUT_CONDITION_LABEL = "checks-are-on-time"
MERGIFY_OPENSOURCE_SPONSOR_DOC = (
"<hr />\n"
":sparkling_heart: Mergify is proud to provide this service "
"for free to open source projects.\n\n"
":rocket: You can help us by [becoming a sponsor](/sponsors/Mergifyio)!\n"
)
MERGIFY_MERGE_QUEUE_PULL_REQUEST_DOC = """
More informations about Mergify merge queue can be found in the [documentation](https://docs.mergify.com/actions/queue.html).
<details>
<summary>Mergify commands</summary>
<br />
You can also trigger Mergify actions by commenting on this pull request:
- `@Mergifyio refresh` will re-evaluate the queue rules
Additionally, on Mergify [dashboard](https://dashboard.mergify.com) you can:
- look at your merge queues
- generate the Mergify configuration with the config editor.
Finally, you can contact us on https://mergify.com
</details>
"""
MERGIFY_PULL_REQUEST_DOC = """
<details>
<summary>Mergify commands and options</summary>
<br />
More conditions and actions can be found in the [documentation](https://docs.mergify.com/).
You can also trigger Mergify actions by commenting on this pull request:
- `@Mergifyio refresh` will re-evaluate the rules
- `@Mergifyio rebase` will rebase this PR on its base branch
- `@Mergifyio update` will merge the base branch into this PR
- `@Mergifyio backport <destination>` will backport this PR on `<destination>` branch
Additionally, on Mergify [dashboard](https://dashboard.mergify.com/) you can:
- look at your merge queues
- generate the Mergify configuration with the config editor.
Finally, you can contact us on https://mergify.com
</details>
""" | en | 0.802157 | # -*- encoding: utf-8 -*- # # Copyright © 2020 Mergify SAS # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. More informations about Mergify merge queue can be found in the [documentation](https://docs.mergify.com/actions/queue.html). <details> <summary>Mergify commands</summary> <br /> You can also trigger Mergify actions by commenting on this pull request: - `@Mergifyio refresh` will re-evaluate the queue rules Additionally, on Mergify [dashboard](https://dashboard.mergify.com) you can: - look at your merge queues - generate the Mergify configuration with the config editor. Finally, you can contact us on https://mergify.com </details> <details> <summary>Mergify commands and options</summary> <br /> More conditions and actions can be found in the [documentation](https://docs.mergify.com/). You can also trigger Mergify actions by commenting on this pull request: - `@Mergifyio refresh` will re-evaluate the rules - `@Mergifyio rebase` will rebase this PR on its base branch - `@Mergifyio update` will merge the base branch into this PR - `@Mergifyio backport <destination>` will backport this PR on `<destination>` branch Additionally, on Mergify [dashboard](https://dashboard.mergify.com/) you can: - look at your merge queues - generate the Mergify configuration with the config editor. Finally, you can contact us on https://mergify.com </details> | 1.354154 | 1 |
src/lib/nets/volumetric/unet3p3d.py | charzharr/Hierarchical-Contrastive-Pretraining | 0 | 6632082 | <reponame>charzharr/Hierarchical-Contrastive-Pretraining<filename>src/lib/nets/volumetric/unet3p3d.py
import torch
from torch import nn
import torch.nn.functional as F
from ..basemodel import BaseModel
class _EncBlk_3d(nn.Module):
def __init__(self, in_channels, out_channels, dropout=False):
super(_EncBlk_3d, self).__init__()
layers = [
nn.Conv3d(in_channels, out_channels, kernel_size=3, padding=1,
bias=False),
nn.BatchNorm3d(out_channels),
nn.ReLU(inplace=True),
nn.Conv3d(out_channels, out_channels, kernel_size=3, padding=1,
bias=False),
nn.BatchNorm3d(out_channels),
nn.ReLU(inplace=True),
]
if dropout:
layers.append(nn.Dropout(p=0.5))
layers.append(nn.MaxPool3d(kernel_size=2, stride=2))
self.encode = nn.Sequential(*layers)
def forward(self, x):
return self.encode(x)
class _Blk_3d(nn.Module):
def __init__(self, in_channels, middle_channels):
super(_Blk_3d, self).__init__()
self.decode = nn.Sequential(
nn.Conv3d(in_channels, middle_channels, kernel_size=3, padding=1,
bias=False),
nn.BatchNorm3d(middle_channels),
nn.ReLU(inplace=True),
nn.Conv3d(middle_channels, middle_channels, kernel_size=3,
padding=1, bias=False),
nn.BatchNorm3d(middle_channels),
nn.ReLU(inplace=True)
)
def forward(self, x):
return self.decode(x)
# class _DecBlk_3d(nn.Module):
# def __init__(self, in_channels, middle_channels, out_channels):
# super(_DecBlk_3d, self).__init__()
# self.decode = nn.Sequential(
# nn.Conv3d(in_channels, middle_channels, kernel_size=3, padding=1),
# nn.BatchNorm3d(middle_channels),
# nn.ReLU(inplace=True),
# nn.Conv3d(middle_channels, middle_channels, kernel_size=3, padding=1),
# nn.BatchNorm3d(middle_channels),
# nn.ReLU(inplace=True),
# nn.ConvTranspose3d(middle_channels, out_channels, kernel_size=2, stride=2),
# )
# def forward(self, x):
# return self.decode(x)
class UNet3plus(BaseModel):
def __init__(self, in_channels, num_classes, ini_channels, deep_sup=False):
self.deep_sup = deep_sup
super(UNet3plus, self).__init__()
self.enc1 = _EncBlk_3d(in_channels, ini_channels)
self.enc2 = _EncBlk_3d(ini_channels, 2*ini_channels)
self.enc3 = _EncBlk_3d(2*ini_channels, 4*ini_channels) # 8*ini_channels
self.enc4 = _EncBlk_3d(4*ini_channels, 8*ini_channels, dropout=True)
self.center = _Blk_3d(8*ini_channels, 16*ini_channels)
self.center_up = nn.ConvTranspose3d(16*ini_channels, 8*ini_channels,
kernel_size=4, stride=2, padding=1)
dec4_in_dims = (16 + 1 + 2 + 4) * ini_channels
self.dec4 = _Blk_3d(dec4_in_dims, 8*ini_channels)
self.dec4_up = nn.ConvTranspose3d(8*ini_channels, 4*ini_channels,
kernel_size=4, stride=2, padding=1)
dec3_in_dims = (8 + 1 + 2 + 16) * ini_channels
self.dec3 = _Blk_3d(dec3_in_dims, 4*ini_channels)
self.dec3_up = nn.ConvTranspose3d(4*ini_channels, 2*ini_channels,
kernel_size=4, stride=2, padding=1)
dec2_in_dims = (4 + 1 + 16 + 8) * ini_channels
self.dec2 = _Blk_3d(dec2_in_dims, 2*ini_channels)
self.dec2_up = nn.ConvTranspose3d(2*ini_channels, ini_channels,
kernel_size=4, stride=2, padding=1)
dec1_in_dims = (2 + 16 + 8 + 4) * ini_channels
self.dec1 = nn.Sequential(
nn.Conv3d(dec1_in_dims, ini_channels, kernel_size=3, padding=1,
bias=False),
nn.BatchNorm3d(ini_channels),
nn.ReLU(inplace=True),
nn.Conv3d(ini_channels, ini_channels, kernel_size=3, padding=1,
bias=False),
nn.BatchNorm3d(ini_channels),
nn.ReLU(inplace=True),
)
self.final = nn.Conv3d(ini_channels, num_classes, kernel_size=1)
self.max_pool = nn.MaxPool3d(kernel_size=2, stride=2)
# deep supervisions
if self.deep_sup:
self.center_ds = nn.Sequential(
nn.Conv3d(16*ini_channels, num_classes, kernel_size=1)
)
self.dec4_ds = nn.Sequential(
nn.Conv3d(8*ini_channels, num_classes, kernel_size=1)
)
self.dec3_ds = nn.Sequential(
nn.Conv3d(4*ini_channels, num_classes, kernel_size=1)
)
self.dec2_ds = nn.Sequential(
nn.Conv3d(2*ini_channels, num_classes, kernel_size=1)
)
tot_params, tot_tparams = self.param_counts
print(f'💠 UNet3p-3D model initiated with n_classes={num_classes}, '
f'(deep_sup={deep_sup})\n'
f' n_input={in_channels}, ini_chans={ini_channels}\n'
f' params={tot_params:,}, trainable_params={tot_tparams:,}.')
def forward(self, x):
# Encoding stage
enc1 = self.enc1(x)
enc1_pool1 = self.max_pool(enc1)
enc1_pool2 = self.max_pool(enc1_pool1)
enc1_pool3 = self.max_pool(enc1_pool2)
enc2 = self.enc2(enc1)
enc2_pool1 = self.max_pool(enc2)
enc2_pool2 = self.max_pool(enc2_pool1)
enc3 = self.enc3(enc2)
enc3_pool1 = self.max_pool(enc3)
enc4 = self.enc4(enc3)
center = self.center(enc4)
# Decoding stage
center_up = self.center_up(center)
center_dense_up2 = F.interpolate(center, scale_factor=4.0,
mode='trilinear', align_corners=True)
center_dense_up3 = F.interpolate(center, scale_factor=8.0,
mode='trilinear', align_corners=True)
center_dense_up4 = F.interpolate(center, scale_factor=16.0,
mode='trilinear', align_corners=True)
dec4 = self.dec4(torch.cat([
center_up,
F.interpolate(enc4, center_up.size()[2:], mode='trilinear',
align_corners=False),
F.interpolate(enc1_pool3, center_up.size()[2:],
mode='trilinear', align_corners=False),
F.interpolate(enc2_pool2, center_up.size()[2:],
mode='trilinear', align_corners=False),
F.interpolate(enc3_pool1, center_up.size()[2:],
mode='trilinear', align_corners=False)
], 1))
dec4_dense_up2 = F.interpolate(dec4, scale_factor=4.0, mode='trilinear',
align_corners=True)
dec4_dense_up3 = F.interpolate(dec4, scale_factor=8.0, mode='trilinear',
align_corners=True)
dec4_up = self.dec4_up(dec4)
dec3 = self.dec3(torch.cat([
dec4_up,
F.interpolate(enc3, dec4_up.size()[2:],
mode='trilinear', align_corners=False),
F.interpolate(enc1_pool2, dec4_up.size()[2:],
mode='trilinear', align_corners=False),
F.interpolate(enc2_pool1, dec4_up.size()[2:],
mode='trilinear', align_corners=False),
F.interpolate(center_dense_up2, dec4_up.size()[2:],
mode='trilinear', align_corners=False)
], 1))
dec3_dense_up2 = F.interpolate(dec3, scale_factor=4.0, mode='trilinear',
align_corners=True)
dec3_up = self.dec3_up(dec3)
dec2 = self.dec2(torch.cat([
dec3_up,
F.interpolate(enc2, dec3_up.size()[2:],
mode='trilinear', align_corners=False),
F.interpolate(enc1_pool1, dec3_up.size()[2:],
mode='trilinear', align_corners=False),
F.interpolate(center_dense_up3, dec3_up.size()[2:],
mode='trilinear', align_corners=False),
F.interpolate(dec4_dense_up2, dec3_up.size()[2:],
mode='trilinear', align_corners=False)
], 1))
dec2_up = self.dec2_up(dec2)
dec1 = self.dec1(torch.cat([
dec2_up,
F.interpolate(enc1, dec2_up.size()[2:],
mode='trilinear', align_corners=False),
F.interpolate(center_dense_up4, dec2_up.size()[2:],
mode='trilinear', align_corners=False),
F.interpolate(dec4_dense_up3, dec2_up.size()[2:],
mode='trilinear', align_corners=False),
F.interpolate(dec3_dense_up2, dec2_up.size()[2:],
mode='trilinear', align_corners=False)
], 1))
final = self.final(dec1)
out = F.interpolate(final, x.size()[2:], mode='trilinear',
align_corners=False)
# deep supervisions: conv + trilinear up-sampling
if self.deep_sup:
center_ds = self.center_ds(center)
center_ds = F.interpolate(center_ds, scale_factor=16.0,
mode='trilinear', align_corners=True)
dec4_ds = self.dec4_ds(dec4)
dec4_ds = F.interpolate(dec4_ds, scale_factor=8.0,
mode='trilinear', align_corners=True)
dec3_ds = self.dec3_ds(dec3)
dec3_ds = F.interpolate(dec3_ds, scale_factor=4.0,
mode='trilinear', align_corners=True)
dec2_ds = self.dec2_ds(dec2)
dec2_ds = F.interpolate(dec2_ds, scale_factor=2.0,
mode='trilinear', align_corners=True)
return {
'out': out,
'2x': dec2_ds,
'4x': dec3_ds,
'8x': dec4_ds,
'16x': center_ds
}
return {'out': out}
| import torch
from torch import nn
import torch.nn.functional as F
from ..basemodel import BaseModel
class _EncBlk_3d(nn.Module):
def __init__(self, in_channels, out_channels, dropout=False):
super(_EncBlk_3d, self).__init__()
layers = [
nn.Conv3d(in_channels, out_channels, kernel_size=3, padding=1,
bias=False),
nn.BatchNorm3d(out_channels),
nn.ReLU(inplace=True),
nn.Conv3d(out_channels, out_channels, kernel_size=3, padding=1,
bias=False),
nn.BatchNorm3d(out_channels),
nn.ReLU(inplace=True),
]
if dropout:
layers.append(nn.Dropout(p=0.5))
layers.append(nn.MaxPool3d(kernel_size=2, stride=2))
self.encode = nn.Sequential(*layers)
def forward(self, x):
return self.encode(x)
class _Blk_3d(nn.Module):
def __init__(self, in_channels, middle_channels):
super(_Blk_3d, self).__init__()
self.decode = nn.Sequential(
nn.Conv3d(in_channels, middle_channels, kernel_size=3, padding=1,
bias=False),
nn.BatchNorm3d(middle_channels),
nn.ReLU(inplace=True),
nn.Conv3d(middle_channels, middle_channels, kernel_size=3,
padding=1, bias=False),
nn.BatchNorm3d(middle_channels),
nn.ReLU(inplace=True)
)
def forward(self, x):
return self.decode(x)
# class _DecBlk_3d(nn.Module):
# def __init__(self, in_channels, middle_channels, out_channels):
# super(_DecBlk_3d, self).__init__()
# self.decode = nn.Sequential(
# nn.Conv3d(in_channels, middle_channels, kernel_size=3, padding=1),
# nn.BatchNorm3d(middle_channels),
# nn.ReLU(inplace=True),
# nn.Conv3d(middle_channels, middle_channels, kernel_size=3, padding=1),
# nn.BatchNorm3d(middle_channels),
# nn.ReLU(inplace=True),
# nn.ConvTranspose3d(middle_channels, out_channels, kernel_size=2, stride=2),
# )
# def forward(self, x):
# return self.decode(x)
class UNet3plus(BaseModel):
def __init__(self, in_channels, num_classes, ini_channels, deep_sup=False):
self.deep_sup = deep_sup
super(UNet3plus, self).__init__()
self.enc1 = _EncBlk_3d(in_channels, ini_channels)
self.enc2 = _EncBlk_3d(ini_channels, 2*ini_channels)
self.enc3 = _EncBlk_3d(2*ini_channels, 4*ini_channels) # 8*ini_channels
self.enc4 = _EncBlk_3d(4*ini_channels, 8*ini_channels, dropout=True)
self.center = _Blk_3d(8*ini_channels, 16*ini_channels)
self.center_up = nn.ConvTranspose3d(16*ini_channels, 8*ini_channels,
kernel_size=4, stride=2, padding=1)
dec4_in_dims = (16 + 1 + 2 + 4) * ini_channels
self.dec4 = _Blk_3d(dec4_in_dims, 8*ini_channels)
self.dec4_up = nn.ConvTranspose3d(8*ini_channels, 4*ini_channels,
kernel_size=4, stride=2, padding=1)
dec3_in_dims = (8 + 1 + 2 + 16) * ini_channels
self.dec3 = _Blk_3d(dec3_in_dims, 4*ini_channels)
self.dec3_up = nn.ConvTranspose3d(4*ini_channels, 2*ini_channels,
kernel_size=4, stride=2, padding=1)
dec2_in_dims = (4 + 1 + 16 + 8) * ini_channels
self.dec2 = _Blk_3d(dec2_in_dims, 2*ini_channels)
self.dec2_up = nn.ConvTranspose3d(2*ini_channels, ini_channels,
kernel_size=4, stride=2, padding=1)
dec1_in_dims = (2 + 16 + 8 + 4) * ini_channels
self.dec1 = nn.Sequential(
nn.Conv3d(dec1_in_dims, ini_channels, kernel_size=3, padding=1,
bias=False),
nn.BatchNorm3d(ini_channels),
nn.ReLU(inplace=True),
nn.Conv3d(ini_channels, ini_channels, kernel_size=3, padding=1,
bias=False),
nn.BatchNorm3d(ini_channels),
nn.ReLU(inplace=True),
)
self.final = nn.Conv3d(ini_channels, num_classes, kernel_size=1)
self.max_pool = nn.MaxPool3d(kernel_size=2, stride=2)
# deep supervisions
if self.deep_sup:
self.center_ds = nn.Sequential(
nn.Conv3d(16*ini_channels, num_classes, kernel_size=1)
)
self.dec4_ds = nn.Sequential(
nn.Conv3d(8*ini_channels, num_classes, kernel_size=1)
)
self.dec3_ds = nn.Sequential(
nn.Conv3d(4*ini_channels, num_classes, kernel_size=1)
)
self.dec2_ds = nn.Sequential(
nn.Conv3d(2*ini_channels, num_classes, kernel_size=1)
)
tot_params, tot_tparams = self.param_counts
print(f'💠 UNet3p-3D model initiated with n_classes={num_classes}, '
f'(deep_sup={deep_sup})\n'
f' n_input={in_channels}, ini_chans={ini_channels}\n'
f' params={tot_params:,}, trainable_params={tot_tparams:,}.')
def forward(self, x):
# Encoding stage
enc1 = self.enc1(x)
enc1_pool1 = self.max_pool(enc1)
enc1_pool2 = self.max_pool(enc1_pool1)
enc1_pool3 = self.max_pool(enc1_pool2)
enc2 = self.enc2(enc1)
enc2_pool1 = self.max_pool(enc2)
enc2_pool2 = self.max_pool(enc2_pool1)
enc3 = self.enc3(enc2)
enc3_pool1 = self.max_pool(enc3)
enc4 = self.enc4(enc3)
center = self.center(enc4)
# Decoding stage
center_up = self.center_up(center)
center_dense_up2 = F.interpolate(center, scale_factor=4.0,
mode='trilinear', align_corners=True)
center_dense_up3 = F.interpolate(center, scale_factor=8.0,
mode='trilinear', align_corners=True)
center_dense_up4 = F.interpolate(center, scale_factor=16.0,
mode='trilinear', align_corners=True)
dec4 = self.dec4(torch.cat([
center_up,
F.interpolate(enc4, center_up.size()[2:], mode='trilinear',
align_corners=False),
F.interpolate(enc1_pool3, center_up.size()[2:],
mode='trilinear', align_corners=False),
F.interpolate(enc2_pool2, center_up.size()[2:],
mode='trilinear', align_corners=False),
F.interpolate(enc3_pool1, center_up.size()[2:],
mode='trilinear', align_corners=False)
], 1))
dec4_dense_up2 = F.interpolate(dec4, scale_factor=4.0, mode='trilinear',
align_corners=True)
dec4_dense_up3 = F.interpolate(dec4, scale_factor=8.0, mode='trilinear',
align_corners=True)
dec4_up = self.dec4_up(dec4)
dec3 = self.dec3(torch.cat([
dec4_up,
F.interpolate(enc3, dec4_up.size()[2:],
mode='trilinear', align_corners=False),
F.interpolate(enc1_pool2, dec4_up.size()[2:],
mode='trilinear', align_corners=False),
F.interpolate(enc2_pool1, dec4_up.size()[2:],
mode='trilinear', align_corners=False),
F.interpolate(center_dense_up2, dec4_up.size()[2:],
mode='trilinear', align_corners=False)
], 1))
dec3_dense_up2 = F.interpolate(dec3, scale_factor=4.0, mode='trilinear',
align_corners=True)
dec3_up = self.dec3_up(dec3)
dec2 = self.dec2(torch.cat([
dec3_up,
F.interpolate(enc2, dec3_up.size()[2:],
mode='trilinear', align_corners=False),
F.interpolate(enc1_pool1, dec3_up.size()[2:],
mode='trilinear', align_corners=False),
F.interpolate(center_dense_up3, dec3_up.size()[2:],
mode='trilinear', align_corners=False),
F.interpolate(dec4_dense_up2, dec3_up.size()[2:],
mode='trilinear', align_corners=False)
], 1))
dec2_up = self.dec2_up(dec2)
dec1 = self.dec1(torch.cat([
dec2_up,
F.interpolate(enc1, dec2_up.size()[2:],
mode='trilinear', align_corners=False),
F.interpolate(center_dense_up4, dec2_up.size()[2:],
mode='trilinear', align_corners=False),
F.interpolate(dec4_dense_up3, dec2_up.size()[2:],
mode='trilinear', align_corners=False),
F.interpolate(dec3_dense_up2, dec2_up.size()[2:],
mode='trilinear', align_corners=False)
], 1))
final = self.final(dec1)
out = F.interpolate(final, x.size()[2:], mode='trilinear',
align_corners=False)
# deep supervisions: conv + trilinear up-sampling
if self.deep_sup:
center_ds = self.center_ds(center)
center_ds = F.interpolate(center_ds, scale_factor=16.0,
mode='trilinear', align_corners=True)
dec4_ds = self.dec4_ds(dec4)
dec4_ds = F.interpolate(dec4_ds, scale_factor=8.0,
mode='trilinear', align_corners=True)
dec3_ds = self.dec3_ds(dec3)
dec3_ds = F.interpolate(dec3_ds, scale_factor=4.0,
mode='trilinear', align_corners=True)
dec2_ds = self.dec2_ds(dec2)
dec2_ds = F.interpolate(dec2_ds, scale_factor=2.0,
mode='trilinear', align_corners=True)
return {
'out': out,
'2x': dec2_ds,
'4x': dec3_ds,
'8x': dec4_ds,
'16x': center_ds
}
return {'out': out} | en | 0.520822 | # class _DecBlk_3d(nn.Module): # def __init__(self, in_channels, middle_channels, out_channels): # super(_DecBlk_3d, self).__init__() # self.decode = nn.Sequential( # nn.Conv3d(in_channels, middle_channels, kernel_size=3, padding=1), # nn.BatchNorm3d(middle_channels), # nn.ReLU(inplace=True), # nn.Conv3d(middle_channels, middle_channels, kernel_size=3, padding=1), # nn.BatchNorm3d(middle_channels), # nn.ReLU(inplace=True), # nn.ConvTranspose3d(middle_channels, out_channels, kernel_size=2, stride=2), # ) # def forward(self, x): # return self.decode(x) # 8*ini_channels # deep supervisions # Encoding stage # Decoding stage # deep supervisions: conv + trilinear up-sampling | 2.394014 | 2 |
cohere/src_py/utilities/transtest.py | brussel13/cohere | 7 | 6632083 | <filename>cohere/src_py/utilities/transtest.py
import numpy as np
dims=(5,5,5)
dxdir=1
dydir=1
dzdir=1
r = np.mgrid[(dims[0] - 1) * dxdir:-dxdir:-dxdir, \
0:dims[1] * dydir:dydir,\
0:dims[2] * dzdir:dzdir]
origshape=r.shape
r.shape = 3, dims[0] * dims[1] * dims[2]
r = r.transpose()
Tdir=np.array( [[0.1,0,0],[0,1,0],[0,0,1]])
print( Tdir)
dir_coords = np.dot(r, Tdir)
dir_coords = dir_coords.transpose()
dir_coords.shape=origshape
| <filename>cohere/src_py/utilities/transtest.py
import numpy as np
dims=(5,5,5)
dxdir=1
dydir=1
dzdir=1
r = np.mgrid[(dims[0] - 1) * dxdir:-dxdir:-dxdir, \
0:dims[1] * dydir:dydir,\
0:dims[2] * dzdir:dzdir]
origshape=r.shape
r.shape = 3, dims[0] * dims[1] * dims[2]
r = r.transpose()
Tdir=np.array( [[0.1,0,0],[0,1,0],[0,0,1]])
print( Tdir)
dir_coords = np.dot(r, Tdir)
dir_coords = dir_coords.transpose()
dir_coords.shape=origshape
| none | 1 | 2.497994 | 2 |
|
amqtt/mqtt/protocol/client_handler.py | nfsnfs/amqtt | 29 | 6632084 | <gh_stars>10-100
# Copyright (c) 2015 <NAME>
#
# See the file license.txt for copying permission.
import asyncio
from asyncio import futures
from amqtt.mqtt.protocol.handler import ProtocolHandler, EVENT_MQTT_PACKET_RECEIVED
from amqtt.mqtt.disconnect import DisconnectPacket
from amqtt.mqtt.pingreq import PingReqPacket
from amqtt.mqtt.pingresp import PingRespPacket
from amqtt.mqtt.subscribe import SubscribePacket
from amqtt.mqtt.suback import SubackPacket
from amqtt.mqtt.unsubscribe import UnsubscribePacket
from amqtt.mqtt.unsuback import UnsubackPacket
from amqtt.mqtt.connect import ConnectVariableHeader, ConnectPayload, ConnectPacket
from amqtt.mqtt.connack import ConnackPacket
from amqtt.session import Session
from amqtt.plugins.manager import PluginManager
class ClientProtocolHandler(ProtocolHandler):
def __init__(
self, plugins_manager: PluginManager, session: Session = None, loop=None
):
super().__init__(plugins_manager, session, loop=loop)
self._ping_task = None
self._pingresp_queue = asyncio.Queue(loop=self._loop)
self._subscriptions_waiter = dict()
self._unsubscriptions_waiter = dict()
self._disconnect_waiter = None
async def start(self):
await super().start()
if self._disconnect_waiter is None:
self._disconnect_waiter = futures.Future(loop=self._loop)
async def stop(self):
await super().stop()
if self._ping_task and not self._ping_task.cancelled():
self.logger.debug("Cancel ping task")
self._ping_task.cancel()
if not self._disconnect_waiter.done():
self._disconnect_waiter.cancel()
self._disconnect_waiter = None
def _build_connect_packet(self):
vh = ConnectVariableHeader()
payload = ConnectPayload()
vh.keep_alive = self.session.keep_alive
vh.clean_session_flag = self.session.clean_session
vh.will_retain_flag = self.session.will_retain
payload.client_id = self.session.client_id
if self.session.username:
vh.username_flag = True
payload.username = self.session.username
else:
vh.username_flag = False
if self.session.password:
vh.password_flag = True
payload.password = self.session.password
else:
vh.password_flag = False
if self.session.will_flag:
vh.will_flag = True
vh.will_qos = self.session.will_qos
payload.will_message = self.session.will_message
payload.will_topic = self.session.will_topic
else:
vh.will_flag = False
packet = ConnectPacket(vh=vh, payload=payload)
return packet
async def mqtt_connect(self):
connect_packet = self._build_connect_packet()
await self._send_packet(connect_packet)
connack = await ConnackPacket.from_stream(self.reader)
await self.plugins_manager.fire_event(
EVENT_MQTT_PACKET_RECEIVED, packet=connack, session=self.session
)
return connack.return_code
def handle_write_timeout(self):
try:
if not self._ping_task:
self.logger.debug("Scheduling Ping")
self._ping_task = asyncio.ensure_future(self.mqtt_ping())
except Exception as e:
self.logger.debug("Exception ignored in ping task: %r" % e)
def handle_read_timeout(self):
pass
async def mqtt_subscribe(self, topics, packet_id):
"""
:param topics: array of topics [{'filter':'/a/b', 'qos': 0x00}, ...]
:return:
"""
# Build and send SUBSCRIBE message
subscribe = SubscribePacket.build(topics, packet_id)
await self._send_packet(subscribe)
# Wait for SUBACK is received
waiter = futures.Future(loop=self._loop)
self._subscriptions_waiter[subscribe.variable_header.packet_id] = waiter
return_codes = await waiter
del self._subscriptions_waiter[subscribe.variable_header.packet_id]
return return_codes
async def handle_suback(self, suback: SubackPacket):
packet_id = suback.variable_header.packet_id
try:
waiter = self._subscriptions_waiter.get(packet_id)
waiter.set_result(suback.payload.return_codes)
except KeyError:
self.logger.warning(
"Received SUBACK for unknown pending subscription with Id: %s"
% packet_id
)
async def mqtt_unsubscribe(self, topics, packet_id):
"""
:param topics: array of topics ['/a/b', ...]
:return:
"""
unsubscribe = UnsubscribePacket.build(topics, packet_id)
await self._send_packet(unsubscribe)
waiter = futures.Future(loop=self._loop)
self._unsubscriptions_waiter[unsubscribe.variable_header.packet_id] = waiter
await waiter
del self._unsubscriptions_waiter[unsubscribe.variable_header.packet_id]
async def handle_unsuback(self, unsuback: UnsubackPacket):
packet_id = unsuback.variable_header.packet_id
try:
waiter = self._unsubscriptions_waiter.get(packet_id)
waiter.set_result(None)
except KeyError:
self.logger.warning(
"Received UNSUBACK for unknown pending subscription with Id: %s"
% packet_id
)
async def mqtt_disconnect(self):
disconnect_packet = DisconnectPacket()
await self._send_packet(disconnect_packet)
async def mqtt_ping(self):
ping_packet = PingReqPacket()
await self._send_packet(ping_packet)
resp = await self._pingresp_queue.get()
if self._ping_task:
self._ping_task = None
return resp
async def handle_pingresp(self, pingresp: PingRespPacket):
await self._pingresp_queue.put(pingresp)
async def handle_connection_closed(self):
self.logger.debug("Broker closed connection")
if not self._disconnect_waiter.done():
self._disconnect_waiter.set_result(None)
async def wait_disconnect(self):
await self._disconnect_waiter
| # Copyright (c) 2015 <NAME>
#
# See the file license.txt for copying permission.
import asyncio
from asyncio import futures
from amqtt.mqtt.protocol.handler import ProtocolHandler, EVENT_MQTT_PACKET_RECEIVED
from amqtt.mqtt.disconnect import DisconnectPacket
from amqtt.mqtt.pingreq import PingReqPacket
from amqtt.mqtt.pingresp import PingRespPacket
from amqtt.mqtt.subscribe import SubscribePacket
from amqtt.mqtt.suback import SubackPacket
from amqtt.mqtt.unsubscribe import UnsubscribePacket
from amqtt.mqtt.unsuback import UnsubackPacket
from amqtt.mqtt.connect import ConnectVariableHeader, ConnectPayload, ConnectPacket
from amqtt.mqtt.connack import ConnackPacket
from amqtt.session import Session
from amqtt.plugins.manager import PluginManager
class ClientProtocolHandler(ProtocolHandler):
def __init__(
self, plugins_manager: PluginManager, session: Session = None, loop=None
):
super().__init__(plugins_manager, session, loop=loop)
self._ping_task = None
self._pingresp_queue = asyncio.Queue(loop=self._loop)
self._subscriptions_waiter = dict()
self._unsubscriptions_waiter = dict()
self._disconnect_waiter = None
async def start(self):
await super().start()
if self._disconnect_waiter is None:
self._disconnect_waiter = futures.Future(loop=self._loop)
async def stop(self):
await super().stop()
if self._ping_task and not self._ping_task.cancelled():
self.logger.debug("Cancel ping task")
self._ping_task.cancel()
if not self._disconnect_waiter.done():
self._disconnect_waiter.cancel()
self._disconnect_waiter = None
def _build_connect_packet(self):
vh = ConnectVariableHeader()
payload = ConnectPayload()
vh.keep_alive = self.session.keep_alive
vh.clean_session_flag = self.session.clean_session
vh.will_retain_flag = self.session.will_retain
payload.client_id = self.session.client_id
if self.session.username:
vh.username_flag = True
payload.username = self.session.username
else:
vh.username_flag = False
if self.session.password:
vh.password_flag = True
payload.password = self.session.password
else:
vh.password_flag = False
if self.session.will_flag:
vh.will_flag = True
vh.will_qos = self.session.will_qos
payload.will_message = self.session.will_message
payload.will_topic = self.session.will_topic
else:
vh.will_flag = False
packet = ConnectPacket(vh=vh, payload=payload)
return packet
async def mqtt_connect(self):
connect_packet = self._build_connect_packet()
await self._send_packet(connect_packet)
connack = await ConnackPacket.from_stream(self.reader)
await self.plugins_manager.fire_event(
EVENT_MQTT_PACKET_RECEIVED, packet=connack, session=self.session
)
return connack.return_code
def handle_write_timeout(self):
try:
if not self._ping_task:
self.logger.debug("Scheduling Ping")
self._ping_task = asyncio.ensure_future(self.mqtt_ping())
except Exception as e:
self.logger.debug("Exception ignored in ping task: %r" % e)
def handle_read_timeout(self):
pass
async def mqtt_subscribe(self, topics, packet_id):
"""
:param topics: array of topics [{'filter':'/a/b', 'qos': 0x00}, ...]
:return:
"""
# Build and send SUBSCRIBE message
subscribe = SubscribePacket.build(topics, packet_id)
await self._send_packet(subscribe)
# Wait for SUBACK is received
waiter = futures.Future(loop=self._loop)
self._subscriptions_waiter[subscribe.variable_header.packet_id] = waiter
return_codes = await waiter
del self._subscriptions_waiter[subscribe.variable_header.packet_id]
return return_codes
async def handle_suback(self, suback: SubackPacket):
packet_id = suback.variable_header.packet_id
try:
waiter = self._subscriptions_waiter.get(packet_id)
waiter.set_result(suback.payload.return_codes)
except KeyError:
self.logger.warning(
"Received SUBACK for unknown pending subscription with Id: %s"
% packet_id
)
async def mqtt_unsubscribe(self, topics, packet_id):
"""
:param topics: array of topics ['/a/b', ...]
:return:
"""
unsubscribe = UnsubscribePacket.build(topics, packet_id)
await self._send_packet(unsubscribe)
waiter = futures.Future(loop=self._loop)
self._unsubscriptions_waiter[unsubscribe.variable_header.packet_id] = waiter
await waiter
del self._unsubscriptions_waiter[unsubscribe.variable_header.packet_id]
async def handle_unsuback(self, unsuback: UnsubackPacket):
packet_id = unsuback.variable_header.packet_id
try:
waiter = self._unsubscriptions_waiter.get(packet_id)
waiter.set_result(None)
except KeyError:
self.logger.warning(
"Received UNSUBACK for unknown pending subscription with Id: %s"
% packet_id
)
async def mqtt_disconnect(self):
disconnect_packet = DisconnectPacket()
await self._send_packet(disconnect_packet)
async def mqtt_ping(self):
ping_packet = PingReqPacket()
await self._send_packet(ping_packet)
resp = await self._pingresp_queue.get()
if self._ping_task:
self._ping_task = None
return resp
async def handle_pingresp(self, pingresp: PingRespPacket):
await self._pingresp_queue.put(pingresp)
async def handle_connection_closed(self):
self.logger.debug("Broker closed connection")
if not self._disconnect_waiter.done():
self._disconnect_waiter.set_result(None)
async def wait_disconnect(self):
await self._disconnect_waiter | en | 0.742393 | # Copyright (c) 2015 <NAME> # # See the file license.txt for copying permission. :param topics: array of topics [{'filter':'/a/b', 'qos': 0x00}, ...] :return: # Build and send SUBSCRIBE message # Wait for SUBACK is received :param topics: array of topics ['/a/b', ...] :return: | 1.919793 | 2 |
tripleoclient/tests/v1/test_overcloud_export.py | d34dh0r53/python-tripleoclient | 0 | 6632085 | <reponame>d34dh0r53/python-tripleoclient
# Copyright 2019 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import mock
from osc_lib.tests import utils
from tripleoclient.v1 import overcloud_export
class TestOvercloudExport(utils.TestCommand):
def setUp(self):
super(TestOvercloudExport, self).setUp()
self.cmd = overcloud_export.ExportOvercloud(self.app, None)
self.app.client_manager.orchestration = mock.Mock()
self.tripleoclient = mock.Mock()
self.app.client_manager.tripleoclient = self.tripleoclient
self.app.client_manager.tripleoclient.object_store = mock.Mock()
self.mock_open = mock.mock_open()
@mock.patch('os.path.exists')
@mock.patch('yaml.safe_dump')
@mock.patch('tripleoclient.export.export_stack')
@mock.patch('tripleoclient.export.export_passwords')
def test_export(self, mock_export_passwords,
mock_export_stack,
mock_safe_dump,
mock_exists):
argslist = []
verifylist = []
parsed_args = self.check_parser(self.cmd, argslist, verifylist)
mock_exists.return_value = False
mock_export_passwords.return_value = {'key': 'value'}
mock_export_stack.return_value = {'key0': 'value0'}
with mock.patch('six.moves.builtins.open', self.mock_open):
self.cmd.take_action(parsed_args)
mock_export_passwords.assert_called_once_with(
self.app.client_manager.tripleoclient.object_store,
'overcloud', True)
path = os.path.join(os.environ.get('HOME'),
'config-download/overcloud')
mock_export_stack.assert_called_once_with(
self.app.client_manager.orchestration,
'overcloud',
False,
path)
self.assertEqual(
{'parameter_defaults': {'key': 'value',
'key0': 'value0'}},
mock_safe_dump.call_args[0][0])
@mock.patch('os.path.exists')
@mock.patch('yaml.safe_dump')
@mock.patch('tripleoclient.export.export_stack')
@mock.patch('tripleoclient.export.export_passwords')
def test_export_stack_name(self, mock_export_passwords,
mock_export_stack,
mock_safe_dump,
mock_exists):
argslist = ['--stack', 'foo']
verifylist = [('stack', 'foo')]
parsed_args = self.check_parser(self.cmd, argslist, verifylist)
mock_exists.return_value = False
with mock.patch('six.moves.builtins.open', self.mock_open):
self.cmd.take_action(parsed_args)
mock_export_passwords.assert_called_once_with(
self.app.client_manager.tripleoclient.object_store,
'foo', True)
path = os.path.join(os.environ.get('HOME'),
'config-download/foo')
mock_export_stack.assert_called_once_with(
self.app.client_manager.orchestration,
'foo',
False,
path)
@mock.patch('os.path.exists')
@mock.patch('yaml.safe_dump')
@mock.patch('tripleoclient.export.export_stack')
@mock.patch('tripleoclient.export.export_passwords')
def test_export_stack_name_and_dir(self, mock_export_passwords,
mock_export_stack,
mock_safe_dump, mock_exists):
argslist = ['--stack', 'foo',
'--config-download-dir', '/tmp/bar']
verifylist = [('stack', 'foo'),
('config_download_dir', '/tmp/bar')]
parsed_args = self.check_parser(self.cmd, argslist, verifylist)
mock_exists.return_value = False
with mock.patch('six.moves.builtins.open', self.mock_open):
self.cmd.take_action(parsed_args)
mock_export_passwords.assert_called_once_with(
self.app.client_manager.tripleoclient.object_store,
'foo', True)
mock_export_stack.assert_called_once_with(
self.app.client_manager.orchestration,
'foo',
False,
'/tmp/bar')
@mock.patch('os.path.exists')
@mock.patch('yaml.safe_dump')
@mock.patch('tripleoclient.export.export_stack')
@mock.patch('tripleoclient.export.export_passwords')
def test_export_no_excludes(self, mock_export_passwords,
mock_export_stack,
mock_safe_dump, mock_exists):
argslist = ['--stack', 'foo',
'--config-download-dir', '/tmp/bar',
'--no-password-excludes']
verifylist = [('stack', 'foo'),
('config_download_dir', '/tmp/bar'),
('no_password_excludes', True)]
parsed_args = self.check_parser(self.cmd, argslist, verifylist)
mock_exists.return_value = False
with mock.patch('six.moves.builtins.open', self.mock_open):
self.cmd.take_action(parsed_args)
mock_export_passwords.assert_called_once_with(
self.app.client_manager.tripleoclient.object_store,
'foo', False)
mock_export_stack.assert_called_once_with(
self.app.client_manager.orchestration,
'foo',
False,
'/tmp/bar')
| # Copyright 2019 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import mock
from osc_lib.tests import utils
from tripleoclient.v1 import overcloud_export
class TestOvercloudExport(utils.TestCommand):
def setUp(self):
super(TestOvercloudExport, self).setUp()
self.cmd = overcloud_export.ExportOvercloud(self.app, None)
self.app.client_manager.orchestration = mock.Mock()
self.tripleoclient = mock.Mock()
self.app.client_manager.tripleoclient = self.tripleoclient
self.app.client_manager.tripleoclient.object_store = mock.Mock()
self.mock_open = mock.mock_open()
@mock.patch('os.path.exists')
@mock.patch('yaml.safe_dump')
@mock.patch('tripleoclient.export.export_stack')
@mock.patch('tripleoclient.export.export_passwords')
def test_export(self, mock_export_passwords,
mock_export_stack,
mock_safe_dump,
mock_exists):
argslist = []
verifylist = []
parsed_args = self.check_parser(self.cmd, argslist, verifylist)
mock_exists.return_value = False
mock_export_passwords.return_value = {'key': 'value'}
mock_export_stack.return_value = {'key0': 'value0'}
with mock.patch('six.moves.builtins.open', self.mock_open):
self.cmd.take_action(parsed_args)
mock_export_passwords.assert_called_once_with(
self.app.client_manager.tripleoclient.object_store,
'overcloud', True)
path = os.path.join(os.environ.get('HOME'),
'config-download/overcloud')
mock_export_stack.assert_called_once_with(
self.app.client_manager.orchestration,
'overcloud',
False,
path)
self.assertEqual(
{'parameter_defaults': {'key': 'value',
'key0': 'value0'}},
mock_safe_dump.call_args[0][0])
@mock.patch('os.path.exists')
@mock.patch('yaml.safe_dump')
@mock.patch('tripleoclient.export.export_stack')
@mock.patch('tripleoclient.export.export_passwords')
def test_export_stack_name(self, mock_export_passwords,
mock_export_stack,
mock_safe_dump,
mock_exists):
argslist = ['--stack', 'foo']
verifylist = [('stack', 'foo')]
parsed_args = self.check_parser(self.cmd, argslist, verifylist)
mock_exists.return_value = False
with mock.patch('six.moves.builtins.open', self.mock_open):
self.cmd.take_action(parsed_args)
mock_export_passwords.assert_called_once_with(
self.app.client_manager.tripleoclient.object_store,
'foo', True)
path = os.path.join(os.environ.get('HOME'),
'config-download/foo')
mock_export_stack.assert_called_once_with(
self.app.client_manager.orchestration,
'foo',
False,
path)
@mock.patch('os.path.exists')
@mock.patch('yaml.safe_dump')
@mock.patch('tripleoclient.export.export_stack')
@mock.patch('tripleoclient.export.export_passwords')
def test_export_stack_name_and_dir(self, mock_export_passwords,
mock_export_stack,
mock_safe_dump, mock_exists):
argslist = ['--stack', 'foo',
'--config-download-dir', '/tmp/bar']
verifylist = [('stack', 'foo'),
('config_download_dir', '/tmp/bar')]
parsed_args = self.check_parser(self.cmd, argslist, verifylist)
mock_exists.return_value = False
with mock.patch('six.moves.builtins.open', self.mock_open):
self.cmd.take_action(parsed_args)
mock_export_passwords.assert_called_once_with(
self.app.client_manager.tripleoclient.object_store,
'foo', True)
mock_export_stack.assert_called_once_with(
self.app.client_manager.orchestration,
'foo',
False,
'/tmp/bar')
@mock.patch('os.path.exists')
@mock.patch('yaml.safe_dump')
@mock.patch('tripleoclient.export.export_stack')
@mock.patch('tripleoclient.export.export_passwords')
def test_export_no_excludes(self, mock_export_passwords,
mock_export_stack,
mock_safe_dump, mock_exists):
argslist = ['--stack', 'foo',
'--config-download-dir', '/tmp/bar',
'--no-password-excludes']
verifylist = [('stack', 'foo'),
('config_download_dir', '/tmp/bar'),
('no_password_excludes', True)]
parsed_args = self.check_parser(self.cmd, argslist, verifylist)
mock_exists.return_value = False
with mock.patch('six.moves.builtins.open', self.mock_open):
self.cmd.take_action(parsed_args)
mock_export_passwords.assert_called_once_with(
self.app.client_manager.tripleoclient.object_store,
'foo', False)
mock_export_stack.assert_called_once_with(
self.app.client_manager.orchestration,
'foo',
False,
'/tmp/bar') | en | 0.851995 | # Copyright 2019 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. | 1.775921 | 2 |
tests/test_3_nii_dicomseg_conversion.py | deepc-health/nekton | 0 | 6632086 | <reponame>deepc-health/nekton
import pytest
import os
import glob
import nibabel as nib
import pydicom
from pydicom.dataset import Dataset
from pydicom_seg.segmentation_dataset import SegmentationDataset
from nekton.utils.json_helpers import write_json
@pytest.mark.nii2dcmseg
def test_3_1_check_loading_segmapping(converter_dcmseg):
converter = converter_dcmseg
# load the correct mapping
mapping = converter._load_segmap("tests/test_data/sample_segmentation/mapping.json")
assert type(mapping) == Dataset
# no valid seg path
with pytest.raises(AssertionError):
converter._load_segmap("path/not/exist.json")
# nonstd json
dict_data = {"key1": 1, "key2": "a"}
nonstd_json = write_json(dict_data, "./test.json")
with pytest.raises(TypeError):
converter._load_segmap(nonstd_json)
os.remove("./test.json")
@pytest.mark.nii2dcmseg
def test_3_2_check_check_all_dicoms(converter_dcmseg, site_package_path):
converter = converter_dcmseg
dir_dcms = os.path.join(
site_package_path, "pydicom/data/test_files/dicomdirtests/98892001/CT5N/*"
)
path_dcms = [path for path in glob.glob(dir_dcms) if ".json" not in path]
seg = nib.load(
"tests/test_data/sample_segmentation/CT5N_segmentation.nii.gz"
).get_fdata()
sorted_dicom = converter._check_all_dicoms(path_dcms, seg)
assert len(sorted_dicom) == len(path_dcms)
# when the number of dicoms is a mismatch
# removing the first dicom path from the list
with pytest.raises(AssertionError):
converter._check_all_dicoms(path_dcms[1:], seg)
@pytest.mark.nii2dcmseg
def test_3_3_check_create_dicomseg(site_package_path, converter_dcmseg):
mapping = converter_dcmseg._load_segmap(
"tests/test_data/sample_segmentation/mapping.json"
)
seg = nib.load(
"tests/test_data/sample_segmentation/CT5N_segmentation.nii.gz"
).get_fdata()[..., -2:-1]
dir_dcms = os.path.join(
site_package_path, "pydicom/data/test_files/dicomdirtests/98892001/CT5N/*"
)
path_dcm = [path for path in glob.glob(dir_dcms) if ".json" not in path][0]
dcm_ds = pydicom.dcmread(path_dcm)
out_ds = converter_dcmseg._create_dicomseg(mapping, seg, dcm_ds)
assert type(out_ds) is SegmentationDataset
assert dcm_ds.AcquisitionTime == out_ds.AcquisitionTime
@pytest.mark.nii2dcmseg
def test_3_4_check_multilabel_converter(converter_dcmseg):
with pytest.raises(NotImplementedError):
converter_dcmseg.multilabel_converter([], [])
@pytest.mark.nii2dcmseg
def test_3_7_check_end2end_multiclass_singlelayer_converter(
site_package_path, converter_dcmseg
):
dir_dcms = os.path.join(
site_package_path, "pydicom/data/test_files/dicomdirtests/98892001/CT5N/*"
)
path_dcms = [path for path in glob.glob(dir_dcms) if ".json" not in path]
path_mapping = "tests/test_data/sample_segmentation/mapping.json"
path_seg_nifti = "tests/test_data/sample_segmentation/CT5N_segmentation.nii.gz"
dcmsegs = converter_dcmseg.multiclass_converter(
path_seg_nifti, path_mapping, path_dcms
)
assert len(dcmsegs) == 4
for dcmseg in dcmsegs:
assert os.path.exists(dcmseg)
os.remove(dcmseg)
@pytest.mark.nii2dcmseg
def test_3_5_check_end2end_multiclass_multilayer_converter(
site_package_path, converter_dcmseg
):
dir_dcms = os.path.join(
site_package_path, "pydicom/data/test_files/dicomdirtests/98892001/CT5N/*"
)
path_dcms = [path for path in glob.glob(dir_dcms) if ".json" not in path]
path_mapping = "tests/test_data/sample_segmentation/mapping.json"
path_seg_nifti = "tests/test_data/sample_segmentation/CT5N_segmentation.nii.gz"
dcmsegs = converter_dcmseg.multiclass_converter(
path_seg_nifti, path_mapping, path_dcms, multiLayer=True
)
assert len(dcmsegs) == 1
for dcmseg in dcmsegs:
assert os.path.exists(dcmseg)
os.remove(dcmseg)
@pytest.mark.nii2dcmseg
def test_3_6_check_check_all_labels(converter_dcmseg):
mapping = converter_dcmseg._load_segmap(
"tests/test_data/sample_segmentation/mapping.json"
)
fake_mapping = converter_dcmseg._load_segmap(
"tests/test_data/sample_segmentation/fake_mapping.json"
)
seg = nib.load(
"tests/test_data/sample_segmentation/CT5N_segmentation.nii.gz"
).get_fdata()
with pytest.raises(ValueError):
converter_dcmseg._check_all_lables(fake_mapping, seg)
converter_dcmseg._check_all_lables(mapping, seg)
| import pytest
import os
import glob
import nibabel as nib
import pydicom
from pydicom.dataset import Dataset
from pydicom_seg.segmentation_dataset import SegmentationDataset
from nekton.utils.json_helpers import write_json
@pytest.mark.nii2dcmseg
def test_3_1_check_loading_segmapping(converter_dcmseg):
converter = converter_dcmseg
# load the correct mapping
mapping = converter._load_segmap("tests/test_data/sample_segmentation/mapping.json")
assert type(mapping) == Dataset
# no valid seg path
with pytest.raises(AssertionError):
converter._load_segmap("path/not/exist.json")
# nonstd json
dict_data = {"key1": 1, "key2": "a"}
nonstd_json = write_json(dict_data, "./test.json")
with pytest.raises(TypeError):
converter._load_segmap(nonstd_json)
os.remove("./test.json")
@pytest.mark.nii2dcmseg
def test_3_2_check_check_all_dicoms(converter_dcmseg, site_package_path):
converter = converter_dcmseg
dir_dcms = os.path.join(
site_package_path, "pydicom/data/test_files/dicomdirtests/98892001/CT5N/*"
)
path_dcms = [path for path in glob.glob(dir_dcms) if ".json" not in path]
seg = nib.load(
"tests/test_data/sample_segmentation/CT5N_segmentation.nii.gz"
).get_fdata()
sorted_dicom = converter._check_all_dicoms(path_dcms, seg)
assert len(sorted_dicom) == len(path_dcms)
# when the number of dicoms is a mismatch
# removing the first dicom path from the list
with pytest.raises(AssertionError):
converter._check_all_dicoms(path_dcms[1:], seg)
@pytest.mark.nii2dcmseg
def test_3_3_check_create_dicomseg(site_package_path, converter_dcmseg):
mapping = converter_dcmseg._load_segmap(
"tests/test_data/sample_segmentation/mapping.json"
)
seg = nib.load(
"tests/test_data/sample_segmentation/CT5N_segmentation.nii.gz"
).get_fdata()[..., -2:-1]
dir_dcms = os.path.join(
site_package_path, "pydicom/data/test_files/dicomdirtests/98892001/CT5N/*"
)
path_dcm = [path for path in glob.glob(dir_dcms) if ".json" not in path][0]
dcm_ds = pydicom.dcmread(path_dcm)
out_ds = converter_dcmseg._create_dicomseg(mapping, seg, dcm_ds)
assert type(out_ds) is SegmentationDataset
assert dcm_ds.AcquisitionTime == out_ds.AcquisitionTime
@pytest.mark.nii2dcmseg
def test_3_4_check_multilabel_converter(converter_dcmseg):
with pytest.raises(NotImplementedError):
converter_dcmseg.multilabel_converter([], [])
@pytest.mark.nii2dcmseg
def test_3_7_check_end2end_multiclass_singlelayer_converter(
site_package_path, converter_dcmseg
):
dir_dcms = os.path.join(
site_package_path, "pydicom/data/test_files/dicomdirtests/98892001/CT5N/*"
)
path_dcms = [path for path in glob.glob(dir_dcms) if ".json" not in path]
path_mapping = "tests/test_data/sample_segmentation/mapping.json"
path_seg_nifti = "tests/test_data/sample_segmentation/CT5N_segmentation.nii.gz"
dcmsegs = converter_dcmseg.multiclass_converter(
path_seg_nifti, path_mapping, path_dcms
)
assert len(dcmsegs) == 4
for dcmseg in dcmsegs:
assert os.path.exists(dcmseg)
os.remove(dcmseg)
@pytest.mark.nii2dcmseg
def test_3_5_check_end2end_multiclass_multilayer_converter(
site_package_path, converter_dcmseg
):
dir_dcms = os.path.join(
site_package_path, "pydicom/data/test_files/dicomdirtests/98892001/CT5N/*"
)
path_dcms = [path for path in glob.glob(dir_dcms) if ".json" not in path]
path_mapping = "tests/test_data/sample_segmentation/mapping.json"
path_seg_nifti = "tests/test_data/sample_segmentation/CT5N_segmentation.nii.gz"
dcmsegs = converter_dcmseg.multiclass_converter(
path_seg_nifti, path_mapping, path_dcms, multiLayer=True
)
assert len(dcmsegs) == 1
for dcmseg in dcmsegs:
assert os.path.exists(dcmseg)
os.remove(dcmseg)
@pytest.mark.nii2dcmseg
def test_3_6_check_check_all_labels(converter_dcmseg):
mapping = converter_dcmseg._load_segmap(
"tests/test_data/sample_segmentation/mapping.json"
)
fake_mapping = converter_dcmseg._load_segmap(
"tests/test_data/sample_segmentation/fake_mapping.json"
)
seg = nib.load(
"tests/test_data/sample_segmentation/CT5N_segmentation.nii.gz"
).get_fdata()
with pytest.raises(ValueError):
converter_dcmseg._check_all_lables(fake_mapping, seg)
converter_dcmseg._check_all_lables(mapping, seg) | en | 0.681791 | # load the correct mapping # no valid seg path # nonstd json # when the number of dicoms is a mismatch # removing the first dicom path from the list | 2.140682 | 2 |
dvc/utils/http.py | Abrosimov-a-a/dvc | 0 | 6632087 | import io
from contextlib import contextmanager
from dvc.utils.compat import FileNotFoundError
@contextmanager
def open_url(url, mode="r", encoding=None):
"""Opens an url as a readable stream.
Resumes on connection error.
Url could be a string or a callable returning a string.
"""
assert mode in {"r", "rt", "rb"}
with iter_url(url) as (response, it):
bytes_stream = IterStream(it)
if mode == "rb":
yield bytes_stream
else:
encoding = encoding or response.encoding
yield io.TextIOWrapper(bytes_stream, encoding=encoding)
@contextmanager
def iter_url(url, chunk_size=io.DEFAULT_BUFFER_SIZE):
"""Iterate over chunks requested from url."""
import requests
def request(headers=None):
the_url = url() if callable(url) else url
response = requests.get(the_url, stream=True, headers=headers)
if response.status_code == 404:
raise FileNotFoundError("Can't open {}".format(the_url))
response.raise_for_status()
return response
def gen(response):
try:
pos = 0
while True:
try:
for chunk in response.iter_content(chunk_size):
pos += len(chunk)
yield chunk
break
except requests.ConnectionError:
response.close()
if response.headers.get("Accept-Ranges") != "bytes":
raise
# Reopen request from where we stopped
headers = {"Range": "bytes={}-".format(pos)}
response = request(headers)
finally:
response.close()
response = request()
it = gen(response)
try:
yield response, it
finally:
# Ensure connection is closed
it.close()
class IterStream(io.RawIOBase):
"""Wraps an iterator yielding bytes as a file object"""
def __init__(self, iterator):
self.iterator = iterator
self.leftover = None
def readable(self):
return True
# Python 3 requires only .readinto() method, it still uses other ones
# under some circumstances and falls back if those are absent. Since
# iterator already constructs byte strings for us, .readinto() is not the
# most optimal, so we provide .read1() too.
def readinto(self, b):
try:
n = len(b) # We're supposed to return at most this much
chunk = self.leftover or next(self.iterator)
output, self.leftover = chunk[:n], chunk[n:]
n_out = len(output)
b[:n_out] = output
return n_out
except StopIteration:
return 0 # indicate EOF
readinto1 = readinto
def read1(self, n=-1):
try:
chunk = self.leftover or next(self.iterator)
except StopIteration:
return b""
# Return an arbitrary number or bytes
if n <= 0:
self.leftover = None
return chunk
output, self.leftover = chunk[:n], chunk[n:]
return output
| import io
from contextlib import contextmanager
from dvc.utils.compat import FileNotFoundError
@contextmanager
def open_url(url, mode="r", encoding=None):
"""Opens an url as a readable stream.
Resumes on connection error.
Url could be a string or a callable returning a string.
"""
assert mode in {"r", "rt", "rb"}
with iter_url(url) as (response, it):
bytes_stream = IterStream(it)
if mode == "rb":
yield bytes_stream
else:
encoding = encoding or response.encoding
yield io.TextIOWrapper(bytes_stream, encoding=encoding)
@contextmanager
def iter_url(url, chunk_size=io.DEFAULT_BUFFER_SIZE):
"""Iterate over chunks requested from url."""
import requests
def request(headers=None):
the_url = url() if callable(url) else url
response = requests.get(the_url, stream=True, headers=headers)
if response.status_code == 404:
raise FileNotFoundError("Can't open {}".format(the_url))
response.raise_for_status()
return response
def gen(response):
try:
pos = 0
while True:
try:
for chunk in response.iter_content(chunk_size):
pos += len(chunk)
yield chunk
break
except requests.ConnectionError:
response.close()
if response.headers.get("Accept-Ranges") != "bytes":
raise
# Reopen request from where we stopped
headers = {"Range": "bytes={}-".format(pos)}
response = request(headers)
finally:
response.close()
response = request()
it = gen(response)
try:
yield response, it
finally:
# Ensure connection is closed
it.close()
class IterStream(io.RawIOBase):
"""Wraps an iterator yielding bytes as a file object"""
def __init__(self, iterator):
self.iterator = iterator
self.leftover = None
def readable(self):
return True
# Python 3 requires only .readinto() method, it still uses other ones
# under some circumstances and falls back if those are absent. Since
# iterator already constructs byte strings for us, .readinto() is not the
# most optimal, so we provide .read1() too.
def readinto(self, b):
try:
n = len(b) # We're supposed to return at most this much
chunk = self.leftover or next(self.iterator)
output, self.leftover = chunk[:n], chunk[n:]
n_out = len(output)
b[:n_out] = output
return n_out
except StopIteration:
return 0 # indicate EOF
readinto1 = readinto
def read1(self, n=-1):
try:
chunk = self.leftover or next(self.iterator)
except StopIteration:
return b""
# Return an arbitrary number or bytes
if n <= 0:
self.leftover = None
return chunk
output, self.leftover = chunk[:n], chunk[n:]
return output
| en | 0.900914 | Opens an url as a readable stream. Resumes on connection error. Url could be a string or a callable returning a string. Iterate over chunks requested from url. # Reopen request from where we stopped # Ensure connection is closed Wraps an iterator yielding bytes as a file object # Python 3 requires only .readinto() method, it still uses other ones # under some circumstances and falls back if those are absent. Since # iterator already constructs byte strings for us, .readinto() is not the # most optimal, so we provide .read1() too. # We're supposed to return at most this much # indicate EOF # Return an arbitrary number or bytes | 2.962671 | 3 |
test/helper/output/message_test.py | jakob-bagterp/timer-for-python | 2 | 6632088 | from _helper.random import random_decimals, random_thread_name
from _helper.time_fractions import (random_days_as_ns, random_hours_as_ns,
random_microseconds_as_ns,
random_milliseconds_as_ns,
random_minutes_as_ns,
random_nanoseconds_as_ns,
random_seconds_as_ns)
from colorist import Color
from timer.constant.various import NONE_VALUE
from timer.helper.output import message
from timer.model.elapsed_time_fractions import ElapsedTimeFractions
from timer.model.time_fractions import TimeFractions
def process_terminal_message(elapsed_time_ns: int, capfd: object, max_decimals: int | None = None, has_thread: bool = False) -> tuple[str, TimeFractions, ElapsedTimeFractions, int, str]:
fractions = TimeFractions(elapsed_time_ns)
time = fractions.time
decimals = random_decimals() if max_decimals is None else random_decimals(max_decimals)
thread = NONE_VALUE if not has_thread else random_thread_name()
message(thread, fractions, decimals)
terminal_output, _ = capfd.readouterr()
return terminal_output, fractions, time, decimals, thread
def test_output_message_days(capfd: object) -> None:
for _ in range(100):
mock_elapsed_time_ns = random_days_as_ns(allow_zero=False)
terminal_output, fractions, time, _, _ = process_terminal_message(mock_elapsed_time_ns, capfd)
assert terminal_output == f"Elapsed time: {time.days}d {time.hours}h {time.minutes}m {fractions.seconds_rounded()}s\n"
def test_output_message_hours(capfd: object) -> None:
for _ in range(100):
mock_elapsed_time_ns = random_hours_as_ns(allow_zero=False)
terminal_output, fractions, time, _, _ = process_terminal_message(mock_elapsed_time_ns, capfd)
assert terminal_output == f"Elapsed time: {time.hours}h {time.minutes}m {fractions.seconds_rounded()}s\n"
def test_output_message_minutes(capfd: object) -> None:
for _ in range(100):
mock_elapsed_time_ns = random_minutes_as_ns(allow_zero=False)
terminal_output, fractions, time, decimals, _ = process_terminal_message(mock_elapsed_time_ns, capfd)
assert terminal_output == f"Elapsed time: {fractions.count_minutes_to_seconds():.{decimals}f} seconds ({time.minutes}m {fractions.seconds_rounded()}s)\n"
def test_output_message_seconds(capfd: object) -> None:
for _ in range(100):
mock_elapsed_time_ns = random_seconds_as_ns(allow_zero=False)
terminal_output, fractions, _, decimals, _ = process_terminal_message(mock_elapsed_time_ns, capfd)
assert terminal_output == f"Elapsed time: {fractions.count_seconds_to_float():.{decimals}f} seconds\n"
def test_output_message_milliseconds(capfd: object) -> None:
for _ in range(100):
mock_elapsed_time_ns = random_milliseconds_as_ns(allow_zero=False)
terminal_output, fractions, _, decimals, _ = process_terminal_message(mock_elapsed_time_ns, capfd)
assert terminal_output == f"Elapsed time: {fractions.count_milliseconds_to_float():.{decimals}f} milliseconds\n"
def test_output_message_microseconds(capfd: object) -> None:
for _ in range(100):
mock_elapsed_time_ns = random_microseconds_as_ns(allow_zero=False)
terminal_output, fractions, _, decimals, _ = process_terminal_message(mock_elapsed_time_ns, capfd)
assert terminal_output == f"Elapsed time: {fractions.count_microseconds_to_float():.{decimals}f} microseconds\n"
def test_output_message_nanoseconds(capfd: object) -> None:
for _ in range(100):
mock_elapsed_time_ns = random_nanoseconds_as_ns(allow_zero=False)
terminal_output, _, time, _, _ = process_terminal_message(mock_elapsed_time_ns, capfd)
assert terminal_output == f"Elapsed time: {time.nanoseconds} nanoseconds\n"
def test_output_message_hours_with_custom_thread(capfd: object) -> None:
for _ in range(100):
mock_elapsed_time_ns = random_hours_as_ns(allow_zero=False)
terminal_output, fractions, time, _, thread = process_terminal_message(
mock_elapsed_time_ns, capfd, has_thread=True)
assert terminal_output == f"Elapsed time (thread {Color.GREEN}{thread}{Color.OFF}): {time.hours}h {time.minutes}m {fractions.seconds_rounded()}s\n"
| from _helper.random import random_decimals, random_thread_name
from _helper.time_fractions import (random_days_as_ns, random_hours_as_ns,
random_microseconds_as_ns,
random_milliseconds_as_ns,
random_minutes_as_ns,
random_nanoseconds_as_ns,
random_seconds_as_ns)
from colorist import Color
from timer.constant.various import NONE_VALUE
from timer.helper.output import message
from timer.model.elapsed_time_fractions import ElapsedTimeFractions
from timer.model.time_fractions import TimeFractions
def process_terminal_message(elapsed_time_ns: int, capfd: object, max_decimals: int | None = None, has_thread: bool = False) -> tuple[str, TimeFractions, ElapsedTimeFractions, int, str]:
fractions = TimeFractions(elapsed_time_ns)
time = fractions.time
decimals = random_decimals() if max_decimals is None else random_decimals(max_decimals)
thread = NONE_VALUE if not has_thread else random_thread_name()
message(thread, fractions, decimals)
terminal_output, _ = capfd.readouterr()
return terminal_output, fractions, time, decimals, thread
def test_output_message_days(capfd: object) -> None:
for _ in range(100):
mock_elapsed_time_ns = random_days_as_ns(allow_zero=False)
terminal_output, fractions, time, _, _ = process_terminal_message(mock_elapsed_time_ns, capfd)
assert terminal_output == f"Elapsed time: {time.days}d {time.hours}h {time.minutes}m {fractions.seconds_rounded()}s\n"
def test_output_message_hours(capfd: object) -> None:
for _ in range(100):
mock_elapsed_time_ns = random_hours_as_ns(allow_zero=False)
terminal_output, fractions, time, _, _ = process_terminal_message(mock_elapsed_time_ns, capfd)
assert terminal_output == f"Elapsed time: {time.hours}h {time.minutes}m {fractions.seconds_rounded()}s\n"
def test_output_message_minutes(capfd: object) -> None:
for _ in range(100):
mock_elapsed_time_ns = random_minutes_as_ns(allow_zero=False)
terminal_output, fractions, time, decimals, _ = process_terminal_message(mock_elapsed_time_ns, capfd)
assert terminal_output == f"Elapsed time: {fractions.count_minutes_to_seconds():.{decimals}f} seconds ({time.minutes}m {fractions.seconds_rounded()}s)\n"
def test_output_message_seconds(capfd: object) -> None:
for _ in range(100):
mock_elapsed_time_ns = random_seconds_as_ns(allow_zero=False)
terminal_output, fractions, _, decimals, _ = process_terminal_message(mock_elapsed_time_ns, capfd)
assert terminal_output == f"Elapsed time: {fractions.count_seconds_to_float():.{decimals}f} seconds\n"
def test_output_message_milliseconds(capfd: object) -> None:
for _ in range(100):
mock_elapsed_time_ns = random_milliseconds_as_ns(allow_zero=False)
terminal_output, fractions, _, decimals, _ = process_terminal_message(mock_elapsed_time_ns, capfd)
assert terminal_output == f"Elapsed time: {fractions.count_milliseconds_to_float():.{decimals}f} milliseconds\n"
def test_output_message_microseconds(capfd: object) -> None:
for _ in range(100):
mock_elapsed_time_ns = random_microseconds_as_ns(allow_zero=False)
terminal_output, fractions, _, decimals, _ = process_terminal_message(mock_elapsed_time_ns, capfd)
assert terminal_output == f"Elapsed time: {fractions.count_microseconds_to_float():.{decimals}f} microseconds\n"
def test_output_message_nanoseconds(capfd: object) -> None:
for _ in range(100):
mock_elapsed_time_ns = random_nanoseconds_as_ns(allow_zero=False)
terminal_output, _, time, _, _ = process_terminal_message(mock_elapsed_time_ns, capfd)
assert terminal_output == f"Elapsed time: {time.nanoseconds} nanoseconds\n"
def test_output_message_hours_with_custom_thread(capfd: object) -> None:
for _ in range(100):
mock_elapsed_time_ns = random_hours_as_ns(allow_zero=False)
terminal_output, fractions, time, _, thread = process_terminal_message(
mock_elapsed_time_ns, capfd, has_thread=True)
assert terminal_output == f"Elapsed time (thread {Color.GREEN}{thread}{Color.OFF}): {time.hours}h {time.minutes}m {fractions.seconds_rounded()}s\n"
| none | 1 | 2.345821 | 2 |
|
dit/multivariate/common_informations/base_markov_optimizer.py | leoalfonso/dit | 1 | 6632089 | <filename>dit/multivariate/common_informations/base_markov_optimizer.py
"""
Abstract base classes
"""
from __future__ import division
from abc import abstractmethod
import numpy as np
from ...algorithms import BaseAuxVarOptimizer
from ...utils import unitful
from ..dual_total_correlation import dual_total_correlation
from ..entropy import entropy
class MarkovVarOptimizer(BaseAuxVarOptimizer):
"""
Abstract base class for constructing auxiliary variables which render a set
of variables conditionally independent.
"""
name = ""
description = ""
def __init__(self, dist, rvs=None, crvs=None, bound=None, rv_mode=None):
"""
Initialize the optimizer.
Parameters
----------
dist : Distribution
The distribution to compute the auxiliary Markov variable, W, for.
rvs : list, None
A list of lists. Each inner list specifies the indexes of the random
variables to render conditionally independent. If None, then all
random variables are used, which is equivalent to passing
`rvs=dist.rvs`.
crvs : list, None
A single list of indexes specifying the random variables to
condition on. If None, then no variables are conditioned on.
bound : int
Place an artificial bound on the size of W.
rv_mode : str, None
Specifies how to interpret `rvs` and `crvs`. Valid options are:
{'indices', 'names'}. If equal to 'indices', then the elements of
`crvs` and `rvs` are interpreted as random variable indices. If
equal to 'names', the the elements are interpreted as random
variable names. If `None`, then the value of `dist._rv_mode` is
consulted, which defaults to 'indices'.
"""
super(MarkovVarOptimizer, self).__init__(dist, rvs=rvs, crvs=crvs, rv_mode=rv_mode)
theoretical_bound = self.compute_bound()
bound = min(bound, theoretical_bound) if bound else theoretical_bound
rv_bounds = self._shape[1:-1]
self._pmf_to_match = self._pmf.copy()
# remove the rvs other than the first, they need to be generated by W
# in order to satisfy the markov criteria:
self._pmf = self._pmf.sum(axis=tuple(range(1, len(self._shape)-1)))
self._shape = self._pmf.shape
self._all_vars = {0, 1}
self._full_pmf = self._full_pmf.sum(axis=tuple(range(self._n + 1, len(self._full_shape)-1)))
self._full_shape = self._full_pmf.shape
self._full_vars = tuple(range(self._n + 2))
# back up where the rvs and crvs are, they need to be reflect
# the above removals for the sake of adding auxvars:
self.__rvs, self._rvs = self._rvs, {0}
self.__crvs, self._crvs = self._crvs, {1}
self._construct_auxvars([({0, 1}, bound)] +
[({1, 2}, s) for s in rv_bounds])
# put rvs, crvs back:
self._rvs = self.__rvs
self._crvs = self.__crvs
del self.__rvs
del self.__crvs
self._W = {1 + len(self._aux_vars)}
# The constraint that the joint doesn't change.
self.constraints += [{'type': 'eq',
'fun': self.constraint_match_joint,
},
]
self._default_hops = 5
self._additional_options = {'options': {'maxiter': 1000,
'ftol': 1e-6,
'eps': 1.4901161193847656e-9,
}
}
@abstractmethod
def compute_bound(self):
"""
Return a bound on the cardinality of the auxiliary variable.
Returns
-------
bound : int
The bound on the size of W.
"""
pass
def construct_joint(self, x):
"""
Construct the joint distribution.
Parameters
----------
x : np.ndarray
An optimization vector.
Returns
-------
joint : np.ndarray
The joint distribution resulting from the distribution passed
in and the optimization vector.
"""
joint = super(MarkovVarOptimizer, self).construct_joint(x)
joint = np.moveaxis(joint, 1, -1) # move crvs
joint = np.moveaxis(joint, 1, -1) # move W
return joint
def construct_full_joint(self, x):
"""
Construct the joint distribution.
Parameters
----------
x : np.ndarray
An optimization vector.
Returns
-------
joint : np.ndarray
The joint distribution resulting from the distribution passed
in and the optimization vector.
"""
joint = super(MarkovVarOptimizer, self).construct_full_joint(x)
joint = np.moveaxis(joint, self._n + 1, -1) # move crvs
joint = np.moveaxis(joint, self._n + 1, -1) # move W
return joint
def constraint_match_joint(self, x):
"""
Ensure that the joint distribution represented by the optimization
vector matches that of the distribution.
Parameters
----------
x : np.ndarray
An optimization vector.
"""
joint = self.construct_joint(x)
joint = joint.sum(axis=-1) # marginalize out w
delta = (100*(joint - self._pmf_to_match)**2).sum()
return delta
@classmethod
def functional(cls):
"""
Construct a functional form of the optimizer.
"""
@unitful
def common_info(dist, rvs=None, crvs=None, niter=None, maxiter=1000, polish=1e-6, bound=None, rv_mode=None):
dtc = dual_total_correlation(dist, rvs, crvs, rv_mode)
ent = entropy(dist, rvs, crvs, rv_mode)
if np.isclose(dtc, ent):
# Common informations are bound between the dual total correlation and the joint
# entropy. Therefore, if the two are equal, the common information is equal to them
# as well.
return dtc
ci = cls(dist, rvs, crvs, bound, rv_mode)
ci.optimize(niter=niter, maxiter=maxiter, polish=polish)
return ci.objective(ci._optima)
common_info.__doc__ = \
"""
Computes the {name} common information, {description}.
Parameters
----------
dist : Distribution
The distribution for which the {name} common information will be
computed.
rvs : list, None
A list of lists. Each inner list specifies the indexes of the random
variables used to calculate the {name} common information. If None,
then it calculated over all random variables, which is equivalent to
passing `rvs=dist.rvs`.
crvs : list, None
A single list of indexes specifying the random variables to condition
on. If None, then no variables are conditioned on.
niter : int > 0
Number of basin hoppings to perform during the optimization.
maxiter : int > 0
The number of iterations of the optimization subroutine to perform.
polish : False, float
Whether to polish the result or not. If a float, this will perform a
second optimization seeded with the result of the first, but with
smaller tolerances and probabilities below polish set to 0. If
False, don't polish.
bound : int
Bound the size of the Markov variable.
rv_mode : str, None
Specifies how to interpret `rvs` and `crvs`. Valid options are:
{{'indices', 'names'}}. If equal to 'indices', then the elements of
`crvs` and `rvs` are interpreted as random variable indices. If equal
to 'names', the the elements are interpreted as random variable names.
If `None`, then the value of `dist._rv_mode` is consulted, which
defaults to 'indices'.
Returns
-------
ci : float
The {name} common information.
""".format(name=cls.name, description=cls.description)
return common_info
class MinimizingMarkovVarOptimizer(MarkovVarOptimizer): # pragma: no cover
"""
Abstract base class for an optimizer which additionally minimizes the size
of the auxiliary variable.
"""
def optimize(self, x0=None, niter=None, maxiter=None, polish=1e-6, callback=False, minimize=True, min_niter=15):
"""
Parameters
----------
x0 : np.ndarray, None
The vector to initialize the optimization with. If None, a random
vector is used.
niter : int
The number of times to basin hop in the optimization.
maxiter : int
The number of inner optimizer steps to perform.
polish : False, float
Whether to polish the result or not. If a float, this will perform a
second optimization seeded with the result of the first, but with
smaller tolerances and probabilities below polish set to 0. If
False, don't polish.
callback : bool
Whether to utilize a callback or not.
minimize : bool
Whether to minimize the auxiliary variable or not.
min_niter : int
The number of basin hops to make during the minimization of the common variable.
"""
# call the normal optimizer
super(MinimizingMarkovVarOptimizer, self).optimize(x0=x0,
niter=niter,
maxiter=maxiter,
polish=False,
callback=callback)
if minimize:
# minimize the entropy of W
self._post_process(style='entropy', minmax='min', niter=min_niter, maxiter=maxiter)
if polish:
self._polish(cutoff=polish)
| <filename>dit/multivariate/common_informations/base_markov_optimizer.py
"""
Abstract base classes
"""
from __future__ import division
from abc import abstractmethod
import numpy as np
from ...algorithms import BaseAuxVarOptimizer
from ...utils import unitful
from ..dual_total_correlation import dual_total_correlation
from ..entropy import entropy
class MarkovVarOptimizer(BaseAuxVarOptimizer):
"""
Abstract base class for constructing auxiliary variables which render a set
of variables conditionally independent.
"""
name = ""
description = ""
def __init__(self, dist, rvs=None, crvs=None, bound=None, rv_mode=None):
"""
Initialize the optimizer.
Parameters
----------
dist : Distribution
The distribution to compute the auxiliary Markov variable, W, for.
rvs : list, None
A list of lists. Each inner list specifies the indexes of the random
variables to render conditionally independent. If None, then all
random variables are used, which is equivalent to passing
`rvs=dist.rvs`.
crvs : list, None
A single list of indexes specifying the random variables to
condition on. If None, then no variables are conditioned on.
bound : int
Place an artificial bound on the size of W.
rv_mode : str, None
Specifies how to interpret `rvs` and `crvs`. Valid options are:
{'indices', 'names'}. If equal to 'indices', then the elements of
`crvs` and `rvs` are interpreted as random variable indices. If
equal to 'names', the the elements are interpreted as random
variable names. If `None`, then the value of `dist._rv_mode` is
consulted, which defaults to 'indices'.
"""
super(MarkovVarOptimizer, self).__init__(dist, rvs=rvs, crvs=crvs, rv_mode=rv_mode)
theoretical_bound = self.compute_bound()
bound = min(bound, theoretical_bound) if bound else theoretical_bound
rv_bounds = self._shape[1:-1]
self._pmf_to_match = self._pmf.copy()
# remove the rvs other than the first, they need to be generated by W
# in order to satisfy the markov criteria:
self._pmf = self._pmf.sum(axis=tuple(range(1, len(self._shape)-1)))
self._shape = self._pmf.shape
self._all_vars = {0, 1}
self._full_pmf = self._full_pmf.sum(axis=tuple(range(self._n + 1, len(self._full_shape)-1)))
self._full_shape = self._full_pmf.shape
self._full_vars = tuple(range(self._n + 2))
# back up where the rvs and crvs are, they need to be reflect
# the above removals for the sake of adding auxvars:
self.__rvs, self._rvs = self._rvs, {0}
self.__crvs, self._crvs = self._crvs, {1}
self._construct_auxvars([({0, 1}, bound)] +
[({1, 2}, s) for s in rv_bounds])
# put rvs, crvs back:
self._rvs = self.__rvs
self._crvs = self.__crvs
del self.__rvs
del self.__crvs
self._W = {1 + len(self._aux_vars)}
# The constraint that the joint doesn't change.
self.constraints += [{'type': 'eq',
'fun': self.constraint_match_joint,
},
]
self._default_hops = 5
self._additional_options = {'options': {'maxiter': 1000,
'ftol': 1e-6,
'eps': 1.4901161193847656e-9,
}
}
@abstractmethod
def compute_bound(self):
"""
Return a bound on the cardinality of the auxiliary variable.
Returns
-------
bound : int
The bound on the size of W.
"""
pass
def construct_joint(self, x):
"""
Construct the joint distribution.
Parameters
----------
x : np.ndarray
An optimization vector.
Returns
-------
joint : np.ndarray
The joint distribution resulting from the distribution passed
in and the optimization vector.
"""
joint = super(MarkovVarOptimizer, self).construct_joint(x)
joint = np.moveaxis(joint, 1, -1) # move crvs
joint = np.moveaxis(joint, 1, -1) # move W
return joint
def construct_full_joint(self, x):
"""
Construct the joint distribution.
Parameters
----------
x : np.ndarray
An optimization vector.
Returns
-------
joint : np.ndarray
The joint distribution resulting from the distribution passed
in and the optimization vector.
"""
joint = super(MarkovVarOptimizer, self).construct_full_joint(x)
joint = np.moveaxis(joint, self._n + 1, -1) # move crvs
joint = np.moveaxis(joint, self._n + 1, -1) # move W
return joint
def constraint_match_joint(self, x):
"""
Ensure that the joint distribution represented by the optimization
vector matches that of the distribution.
Parameters
----------
x : np.ndarray
An optimization vector.
"""
joint = self.construct_joint(x)
joint = joint.sum(axis=-1) # marginalize out w
delta = (100*(joint - self._pmf_to_match)**2).sum()
return delta
@classmethod
def functional(cls):
"""
Construct a functional form of the optimizer.
"""
@unitful
def common_info(dist, rvs=None, crvs=None, niter=None, maxiter=1000, polish=1e-6, bound=None, rv_mode=None):
dtc = dual_total_correlation(dist, rvs, crvs, rv_mode)
ent = entropy(dist, rvs, crvs, rv_mode)
if np.isclose(dtc, ent):
# Common informations are bound between the dual total correlation and the joint
# entropy. Therefore, if the two are equal, the common information is equal to them
# as well.
return dtc
ci = cls(dist, rvs, crvs, bound, rv_mode)
ci.optimize(niter=niter, maxiter=maxiter, polish=polish)
return ci.objective(ci._optima)
common_info.__doc__ = \
"""
Computes the {name} common information, {description}.
Parameters
----------
dist : Distribution
The distribution for which the {name} common information will be
computed.
rvs : list, None
A list of lists. Each inner list specifies the indexes of the random
variables used to calculate the {name} common information. If None,
then it calculated over all random variables, which is equivalent to
passing `rvs=dist.rvs`.
crvs : list, None
A single list of indexes specifying the random variables to condition
on. If None, then no variables are conditioned on.
niter : int > 0
Number of basin hoppings to perform during the optimization.
maxiter : int > 0
The number of iterations of the optimization subroutine to perform.
polish : False, float
Whether to polish the result or not. If a float, this will perform a
second optimization seeded with the result of the first, but with
smaller tolerances and probabilities below polish set to 0. If
False, don't polish.
bound : int
Bound the size of the Markov variable.
rv_mode : str, None
Specifies how to interpret `rvs` and `crvs`. Valid options are:
{{'indices', 'names'}}. If equal to 'indices', then the elements of
`crvs` and `rvs` are interpreted as random variable indices. If equal
to 'names', the the elements are interpreted as random variable names.
If `None`, then the value of `dist._rv_mode` is consulted, which
defaults to 'indices'.
Returns
-------
ci : float
The {name} common information.
""".format(name=cls.name, description=cls.description)
return common_info
class MinimizingMarkovVarOptimizer(MarkovVarOptimizer): # pragma: no cover
"""
Abstract base class for an optimizer which additionally minimizes the size
of the auxiliary variable.
"""
def optimize(self, x0=None, niter=None, maxiter=None, polish=1e-6, callback=False, minimize=True, min_niter=15):
"""
Parameters
----------
x0 : np.ndarray, None
The vector to initialize the optimization with. If None, a random
vector is used.
niter : int
The number of times to basin hop in the optimization.
maxiter : int
The number of inner optimizer steps to perform.
polish : False, float
Whether to polish the result or not. If a float, this will perform a
second optimization seeded with the result of the first, but with
smaller tolerances and probabilities below polish set to 0. If
False, don't polish.
callback : bool
Whether to utilize a callback or not.
minimize : bool
Whether to minimize the auxiliary variable or not.
min_niter : int
The number of basin hops to make during the minimization of the common variable.
"""
# call the normal optimizer
super(MinimizingMarkovVarOptimizer, self).optimize(x0=x0,
niter=niter,
maxiter=maxiter,
polish=False,
callback=callback)
if minimize:
# minimize the entropy of W
self._post_process(style='entropy', minmax='min', niter=min_niter, maxiter=maxiter)
if polish:
self._polish(cutoff=polish)
| en | 0.770997 | Abstract base classes Abstract base class for constructing auxiliary variables which render a set of variables conditionally independent. Initialize the optimizer. Parameters ---------- dist : Distribution The distribution to compute the auxiliary Markov variable, W, for. rvs : list, None A list of lists. Each inner list specifies the indexes of the random variables to render conditionally independent. If None, then all random variables are used, which is equivalent to passing `rvs=dist.rvs`. crvs : list, None A single list of indexes specifying the random variables to condition on. If None, then no variables are conditioned on. bound : int Place an artificial bound on the size of W. rv_mode : str, None Specifies how to interpret `rvs` and `crvs`. Valid options are: {'indices', 'names'}. If equal to 'indices', then the elements of `crvs` and `rvs` are interpreted as random variable indices. If equal to 'names', the the elements are interpreted as random variable names. If `None`, then the value of `dist._rv_mode` is consulted, which defaults to 'indices'. # remove the rvs other than the first, they need to be generated by W # in order to satisfy the markov criteria: # back up where the rvs and crvs are, they need to be reflect # the above removals for the sake of adding auxvars: # put rvs, crvs back: # The constraint that the joint doesn't change. Return a bound on the cardinality of the auxiliary variable. Returns ------- bound : int The bound on the size of W. Construct the joint distribution. Parameters ---------- x : np.ndarray An optimization vector. Returns ------- joint : np.ndarray The joint distribution resulting from the distribution passed in and the optimization vector. # move crvs # move W Construct the joint distribution. Parameters ---------- x : np.ndarray An optimization vector. Returns ------- joint : np.ndarray The joint distribution resulting from the distribution passed in and the optimization vector. # move crvs # move W Ensure that the joint distribution represented by the optimization vector matches that of the distribution. Parameters ---------- x : np.ndarray An optimization vector. # marginalize out w Construct a functional form of the optimizer. # Common informations are bound between the dual total correlation and the joint # entropy. Therefore, if the two are equal, the common information is equal to them # as well. Computes the {name} common information, {description}. Parameters ---------- dist : Distribution The distribution for which the {name} common information will be computed. rvs : list, None A list of lists. Each inner list specifies the indexes of the random variables used to calculate the {name} common information. If None, then it calculated over all random variables, which is equivalent to passing `rvs=dist.rvs`. crvs : list, None A single list of indexes specifying the random variables to condition on. If None, then no variables are conditioned on. niter : int > 0 Number of basin hoppings to perform during the optimization. maxiter : int > 0 The number of iterations of the optimization subroutine to perform. polish : False, float Whether to polish the result or not. If a float, this will perform a second optimization seeded with the result of the first, but with smaller tolerances and probabilities below polish set to 0. If False, don't polish. bound : int Bound the size of the Markov variable. rv_mode : str, None Specifies how to interpret `rvs` and `crvs`. Valid options are: {{'indices', 'names'}}. If equal to 'indices', then the elements of `crvs` and `rvs` are interpreted as random variable indices. If equal to 'names', the the elements are interpreted as random variable names. If `None`, then the value of `dist._rv_mode` is consulted, which defaults to 'indices'. Returns ------- ci : float The {name} common information. # pragma: no cover Abstract base class for an optimizer which additionally minimizes the size of the auxiliary variable. Parameters ---------- x0 : np.ndarray, None The vector to initialize the optimization with. If None, a random vector is used. niter : int The number of times to basin hop in the optimization. maxiter : int The number of inner optimizer steps to perform. polish : False, float Whether to polish the result or not. If a float, this will perform a second optimization seeded with the result of the first, but with smaller tolerances and probabilities below polish set to 0. If False, don't polish. callback : bool Whether to utilize a callback or not. minimize : bool Whether to minimize the auxiliary variable or not. min_niter : int The number of basin hops to make during the minimization of the common variable. # call the normal optimizer # minimize the entropy of W | 2.559445 | 3 |
sashimmi/subcommands/bind.py | chpatton013/sashimmi | 0 | 6632090 | <filename>sashimmi/subcommands/bind.py
from .subcommand import SubcommandBaseWithWorkspaceWriteLock, register_subcommand
from ..models.shim import read_shims_node, bind_shims
class BindSubcommand(SubcommandBaseWithWorkspaceWriteLock):
def name(self):
return "bind"
def help(self):
return "Bind all shims."
def configure_subparser(self, subparser):
subparser.add_argument(
"--multi",
action="store_true",
default=False,
help="Bind shims in multi-namespace."
)
def run_with_lock(self, args, workspace, lock):
shims = read_shims_node(workspace.root)
bind_shims(
workspace.root, shims,
self.make_multi_lock() if args.multi else None
)
register_subcommand(BindSubcommand())
| <filename>sashimmi/subcommands/bind.py
from .subcommand import SubcommandBaseWithWorkspaceWriteLock, register_subcommand
from ..models.shim import read_shims_node, bind_shims
class BindSubcommand(SubcommandBaseWithWorkspaceWriteLock):
def name(self):
return "bind"
def help(self):
return "Bind all shims."
def configure_subparser(self, subparser):
subparser.add_argument(
"--multi",
action="store_true",
default=False,
help="Bind shims in multi-namespace."
)
def run_with_lock(self, args, workspace, lock):
shims = read_shims_node(workspace.root)
bind_shims(
workspace.root, shims,
self.make_multi_lock() if args.multi else None
)
register_subcommand(BindSubcommand())
| none | 1 | 2.310998 | 2 |
|
ucbgradedists/ucbgradedists/settings/base.py | dailycal-projects/grades | 5 | 6632091 | import os
import dj_database_url
from django.core.exceptions import ImproperlyConfigured
from unipath import Path
# Number of digits to use in rounding mean and standard deviation
PRECISION = 3
def env_var(var_name):
"""Get the environment variable VAR_NAME, or raise an Exception."""
try:
return os.environ[var_name]
except KeyError:
msg = "You need to set the {} environment variable.".format(var_name)
raise ImproperlyConfigured(msg)
# Basic settings
SECRET_KEY = env_var('DJANGO_SECRET_KEY')
DEBUG = False
ALLOWED_HOSTS = []
ADMINS = (
(env_var('ADMIN_NAME'), env_var('ADMIN_EMAIL')),
)
# Email settings, assuming you use gmail.
EMAIL_USE_TLS = True
EMAIL_HOST = 'smtp.gmail.com'
EMAIL_PORT = 587
EMAIL_HOST_USER = ''
EMAIL_HOST_PASSWORD = ''
SERVER_EMAIL = ''
# Set up directory structure and static files
BASE_DIR = Path(__file__).ancestor(3)
DATA_DIR = BASE_DIR.ancestor(1).child("data")
MEDIA_ROOT = BASE_DIR.child("media")
STATIC_ROOT = BASE_DIR.child("staticfiles")
STATIC_URL = '/grades/static/'
STATICFILES_DIRS = (
BASE_DIR.child("static"),
)
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
'sass_processor.finders.CssFinder',
)
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.humanize',
'mathfilters',
'sass_processor',
'django_extensions',
'bakery',
'core',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'ucbgradedists.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [BASE_DIR.child("templates")],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'ucbgradedists.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = { 'default': dj_database_url.config() }
DATABASES['default']['ENGINE'] = 'django_postgrespool'
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Bakery
BUILD_DIR = BASE_DIR.child("build")
BAKERY_VIEWS = (
'graphic.views.GraphicView',
)
AWS_BUCKET_NAME = env_var('AWS_BUCKET_NAME')
AWS_ACCESS_KEY_ID = env_var('AWS_ACCESS_KEY_ID')
AWS_SECRET_ACCESS_KEY = env_var('AWS_SECRET_ACCESS_KEY')
| import os
import dj_database_url
from django.core.exceptions import ImproperlyConfigured
from unipath import Path
# Number of digits to use in rounding mean and standard deviation
PRECISION = 3
def env_var(var_name):
"""Get the environment variable VAR_NAME, or raise an Exception."""
try:
return os.environ[var_name]
except KeyError:
msg = "You need to set the {} environment variable.".format(var_name)
raise ImproperlyConfigured(msg)
# Basic settings
SECRET_KEY = env_var('DJANGO_SECRET_KEY')
DEBUG = False
ALLOWED_HOSTS = []
ADMINS = (
(env_var('ADMIN_NAME'), env_var('ADMIN_EMAIL')),
)
# Email settings, assuming you use gmail.
EMAIL_USE_TLS = True
EMAIL_HOST = 'smtp.gmail.com'
EMAIL_PORT = 587
EMAIL_HOST_USER = ''
EMAIL_HOST_PASSWORD = ''
SERVER_EMAIL = ''
# Set up directory structure and static files
BASE_DIR = Path(__file__).ancestor(3)
DATA_DIR = BASE_DIR.ancestor(1).child("data")
MEDIA_ROOT = BASE_DIR.child("media")
STATIC_ROOT = BASE_DIR.child("staticfiles")
STATIC_URL = '/grades/static/'
STATICFILES_DIRS = (
BASE_DIR.child("static"),
)
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
'sass_processor.finders.CssFinder',
)
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.humanize',
'mathfilters',
'sass_processor',
'django_extensions',
'bakery',
'core',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'ucbgradedists.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [BASE_DIR.child("templates")],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'ucbgradedists.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = { 'default': dj_database_url.config() }
DATABASES['default']['ENGINE'] = 'django_postgrespool'
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Bakery
BUILD_DIR = BASE_DIR.child("build")
BAKERY_VIEWS = (
'graphic.views.GraphicView',
)
AWS_BUCKET_NAME = env_var('AWS_BUCKET_NAME')
AWS_ACCESS_KEY_ID = env_var('AWS_ACCESS_KEY_ID')
AWS_SECRET_ACCESS_KEY = env_var('AWS_SECRET_ACCESS_KEY')
| en | 0.612886 | # Number of digits to use in rounding mean and standard deviation Get the environment variable VAR_NAME, or raise an Exception. # Basic settings # Email settings, assuming you use gmail. # Set up directory structure and static files # Application definition # Database # https://docs.djangoproject.com/en/1.8/ref/settings/#databases # Internationalization # https://docs.djangoproject.com/en/1.8/topics/i18n/ # Bakery | 2.176843 | 2 |
py101/static_meth_att.py | fernandozamoraj/py_sandbox | 0 | 6632092 | class Student:
_staticval = -1
@classmethod
def myclassmethod(x):
print(x._staticval)
@staticmethod
def mystaticmethod():
print(Student._staticval)
def my_method(self):
print(Student._staticval)
Student.myclassmethod()
Student.mystaticmethod()
Student.my_method()
s = Student()
s.myclassmethod()
s.mystaticmethod()
s.my_method()
| class Student:
_staticval = -1
@classmethod
def myclassmethod(x):
print(x._staticval)
@staticmethod
def mystaticmethod():
print(Student._staticval)
def my_method(self):
print(Student._staticval)
Student.myclassmethod()
Student.mystaticmethod()
Student.my_method()
s = Student()
s.myclassmethod()
s.mystaticmethod()
s.my_method()
| none | 1 | 3.060092 | 3 |
|
infoqscraper/test/test_convert.py | naxhh/infoqscraper | 41 | 6632093 | # -*- coding: utf-8 -*-
#
# Copyright (c) 2012, <NAME>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
import shutil
import tempfile
from infoqscraper import client
from infoqscraper import convert
from infoqscraper import scrap
from infoqscraper import test
from infoqscraper.test.compat import unittest
class TestSwfConverter(unittest.TestCase):
def setUp(self):
self.iq = client.InfoQ()
self.tmp_dir = tmp_dir = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self.tmp_dir)
@test.use_cache
def test_swf(self):
# Fetch a slide
pres = scrap.Presentation(self.iq, "Java-GC-Azul-C4")
swf_path = self.iq.download(pres.metadata['slides'][0], self.tmp_dir)
# SWF -> PNG
png_path = swf_path.replace('.swf', '.png')
convert.swf2png(swf_path, png_path)
stat_info = os.stat(png_path)
self.assertGreater(stat_info.st_size, 1000)
| # -*- coding: utf-8 -*-
#
# Copyright (c) 2012, <NAME>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
import shutil
import tempfile
from infoqscraper import client
from infoqscraper import convert
from infoqscraper import scrap
from infoqscraper import test
from infoqscraper.test.compat import unittest
class TestSwfConverter(unittest.TestCase):
def setUp(self):
self.iq = client.InfoQ()
self.tmp_dir = tmp_dir = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self.tmp_dir)
@test.use_cache
def test_swf(self):
# Fetch a slide
pres = scrap.Presentation(self.iq, "Java-GC-Azul-C4")
swf_path = self.iq.download(pres.metadata['slides'][0], self.tmp_dir)
# SWF -> PNG
png_path = swf_path.replace('.swf', '.png')
convert.swf2png(swf_path, png_path)
stat_info = os.stat(png_path)
self.assertGreater(stat_info.st_size, 1000)
| en | 0.689788 | # -*- coding: utf-8 -*- # # Copyright (c) 2012, <NAME> # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY # DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES # (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND # ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # Fetch a slide # SWF -> PNG | 1.662337 | 2 |
waller/waller.py | codeswhite/waller | 0 | 6632094 |
import curses
import os
import stat
import random
import sys
from pathlib import PosixPath
from subprocess import check_output, check_call, call, CalledProcessError
from typing import List, Iterator, Tuple
from .ldm_gtk import LdmGtk
def get_cmd(monitor_name: str) -> List[str]:
"""
The system command which will return path of monitor's wallpaper
* Depends on 'xfconf'
"""
return ['xfconf-query', '-c', 'xfce4-desktop', '-p',
f'/backdrop/screen0/monitor{monitor_name}/workspace0/last-image']
def img_format(image_path: PosixPath) -> (str, None):
"""
Checks the file signature (magic number)
for an image
:param image_path: The path to the image
:return: True if the image is PNG or JPG
"""
signatures = {'JPG': 'ffd8ff',
'PNG': '89504e',
'GIF': '474946'}
with image_path.open('rb') as img_file:
signature = img_file.read(3).hex()
for sig in signatures:
if signature == signatures[sig]:
return sig
return None
def collect_monitors() -> Iterator[str]:
"""
Collect connected monitors, via xRandr
:return: Monitor names
"""
for line in check_output('xrandr').decode().split('\n'):
if ' connected' in line:
yield line.split(' ')[0]
class Waller:
def __init__(self, win):
super().__init__()
self.win = win
# Check monitors
self.mon_id = 0
self.mons = tuple(collect_monitors())
# Get currently used wallaper
current_path = self.get_current_wall()
self.current_name = current_path.name
self.current_dir = current_path.parent.resolve()
# Get 'LDM GTK greeter' wallpaper
self.ldm_bg_path = PosixPath(LdmGtk.get_bg())
# Get available
self.available = tuple(self.collect_available())
# Set permissions
self.reset_permissions()
if win is None:
return
try:
self.interactive()
except KeyboardInterrupt:
pass
# finally:
# config.save()
def interactive(self):
while 1: # Inner Loop
self.win.clear()
# Get current wallpaper
self.current_name = self.get_current_wall().name
current_id = self.get_current_id()
self.show_info(
f'({current_id + 1}/{len(self.available)}) {self.current_name}\n')
key = str(self.win.getkey()).lower()
if not key:
continue
elif key in ('x', 'q'):
return
elif key == 'm':
if len(self.mons) == 1:
continue
self.mon_id += 1
if self.mon_id >= len(self.mons):
self.mon_id = 0
continue
elif key == 'r': # Random
current_id = random.randint(0, len(self.available))
elif key == 'key_left':
current_id -= 1
if current_id < 0:
current_id = len(self.available) - 1
elif key == 'key_right':
current_id += 1
if current_id >= len(self.available):
current_id = 0
elif key == 'l': # DM background
self.change_ldm_bg(self.available[current_id])
continue
else:
continue
# Application
self.apply(current_id)
def get_mon(self) -> str:
return self.mons[self.mon_id]
def apply(self, current_id: int) -> None:
"""
The application function
"""
path = self.current_dir / self.available[current_id]
cmd = get_cmd(self.get_mon()) + ['-s', path]
call(cmd)
def show_info(self, current: str) -> None:
if len(self.mons) > 1:
self.win.addstr('[*] Using monitor: ')
self.win.addstr(
f'{self.get_mon()}\n', curses.color_pair(3))
self.win.addstr('[+] Current wall: ')
self.win.addstr(current, curses.color_pair(3))
self.win.addstr('\n>> Controls: [<] or [>] or [R]\n' +
'[L] to set LDM GTK background\n')
if len(self.mons) > 1:
self.win.addstr('[M] to switch monitor\n',
curses.color_pair(5))
self.win.addstr('[X] or [Q] to exit\n', curses.color_pair(5))
def change_ldm_bg(self, new_bg: str) -> None:
if not LdmGtk.set_bg(self.win, self.ldm_bg_path.name, new_bg):
return
self.ldm_bg_path = self.current_dir / new_bg
self.reset_permissions()
def reset_permissions(self) -> None:
"""
Sets proper permissions [400] for all the images
and [404] for the DM background image
:param avail: Available images
:param ldm_bg_path: DM's background image path
"""
for wall in self.available:
path = self.current_dir / wall
perm = stat.S_IRUSR
if path == self.ldm_bg_path:
perm |= stat.S_IROTH
os.chmod(path, perm)
def collect_available(self) -> Iterator[str]:
"""
Collect available images in the specified directory
:param current_dir: Current wallpapers directory
:return: File-names
"""
for wall in os.listdir(self.current_dir):
if img_format(self.current_dir / wall):
yield wall
def get_current_wall(self) -> PosixPath:
"""
:return: A Path to the current image of primary monitor
"""
return PosixPath(check_output(get_cmd(self.get_mon())).decode().strip())
def get_current_id(self) -> int:
"""
:return: index of current image among other images in the same directory
"""
try:
return self.available.index(self.current_name)
except ValueError:
self.win.addstr(
f'[X] Current wall "{self.current_name}" not found in {self.current_dir}\n', curses.color_pair(2))
self.win.addstr('[+] Press [R] to reset to the first image',
curses.color_pair(5))
if self.win.getkey() != 'r':
exit(1)
# Set current to first
current_id = 0
self.current_name = self.available[current_id]
self.apply(0)
return current_id
def curses_entry(win: curses.window) -> None:
# Curses initialization
curses.use_default_colors()
curses.init_pair(2, curses.COLOR_RED, -1)
curses.init_pair(3, curses.COLOR_GREEN, -1)
curses.init_pair(4, curses.COLOR_BLUE, -1)
curses.init_pair(5, curses.COLOR_YELLOW, -1)
Waller(win)
|
import curses
import os
import stat
import random
import sys
from pathlib import PosixPath
from subprocess import check_output, check_call, call, CalledProcessError
from typing import List, Iterator, Tuple
from .ldm_gtk import LdmGtk
def get_cmd(monitor_name: str) -> List[str]:
"""
The system command which will return path of monitor's wallpaper
* Depends on 'xfconf'
"""
return ['xfconf-query', '-c', 'xfce4-desktop', '-p',
f'/backdrop/screen0/monitor{monitor_name}/workspace0/last-image']
def img_format(image_path: PosixPath) -> (str, None):
"""
Checks the file signature (magic number)
for an image
:param image_path: The path to the image
:return: True if the image is PNG or JPG
"""
signatures = {'JPG': 'ffd8ff',
'PNG': '89504e',
'GIF': '474946'}
with image_path.open('rb') as img_file:
signature = img_file.read(3).hex()
for sig in signatures:
if signature == signatures[sig]:
return sig
return None
def collect_monitors() -> Iterator[str]:
"""
Collect connected monitors, via xRandr
:return: Monitor names
"""
for line in check_output('xrandr').decode().split('\n'):
if ' connected' in line:
yield line.split(' ')[0]
class Waller:
def __init__(self, win):
super().__init__()
self.win = win
# Check monitors
self.mon_id = 0
self.mons = tuple(collect_monitors())
# Get currently used wallaper
current_path = self.get_current_wall()
self.current_name = current_path.name
self.current_dir = current_path.parent.resolve()
# Get 'LDM GTK greeter' wallpaper
self.ldm_bg_path = PosixPath(LdmGtk.get_bg())
# Get available
self.available = tuple(self.collect_available())
# Set permissions
self.reset_permissions()
if win is None:
return
try:
self.interactive()
except KeyboardInterrupt:
pass
# finally:
# config.save()
def interactive(self):
while 1: # Inner Loop
self.win.clear()
# Get current wallpaper
self.current_name = self.get_current_wall().name
current_id = self.get_current_id()
self.show_info(
f'({current_id + 1}/{len(self.available)}) {self.current_name}\n')
key = str(self.win.getkey()).lower()
if not key:
continue
elif key in ('x', 'q'):
return
elif key == 'm':
if len(self.mons) == 1:
continue
self.mon_id += 1
if self.mon_id >= len(self.mons):
self.mon_id = 0
continue
elif key == 'r': # Random
current_id = random.randint(0, len(self.available))
elif key == 'key_left':
current_id -= 1
if current_id < 0:
current_id = len(self.available) - 1
elif key == 'key_right':
current_id += 1
if current_id >= len(self.available):
current_id = 0
elif key == 'l': # DM background
self.change_ldm_bg(self.available[current_id])
continue
else:
continue
# Application
self.apply(current_id)
def get_mon(self) -> str:
return self.mons[self.mon_id]
def apply(self, current_id: int) -> None:
"""
The application function
"""
path = self.current_dir / self.available[current_id]
cmd = get_cmd(self.get_mon()) + ['-s', path]
call(cmd)
def show_info(self, current: str) -> None:
if len(self.mons) > 1:
self.win.addstr('[*] Using monitor: ')
self.win.addstr(
f'{self.get_mon()}\n', curses.color_pair(3))
self.win.addstr('[+] Current wall: ')
self.win.addstr(current, curses.color_pair(3))
self.win.addstr('\n>> Controls: [<] or [>] or [R]\n' +
'[L] to set LDM GTK background\n')
if len(self.mons) > 1:
self.win.addstr('[M] to switch monitor\n',
curses.color_pair(5))
self.win.addstr('[X] or [Q] to exit\n', curses.color_pair(5))
def change_ldm_bg(self, new_bg: str) -> None:
if not LdmGtk.set_bg(self.win, self.ldm_bg_path.name, new_bg):
return
self.ldm_bg_path = self.current_dir / new_bg
self.reset_permissions()
def reset_permissions(self) -> None:
"""
Sets proper permissions [400] for all the images
and [404] for the DM background image
:param avail: Available images
:param ldm_bg_path: DM's background image path
"""
for wall in self.available:
path = self.current_dir / wall
perm = stat.S_IRUSR
if path == self.ldm_bg_path:
perm |= stat.S_IROTH
os.chmod(path, perm)
def collect_available(self) -> Iterator[str]:
"""
Collect available images in the specified directory
:param current_dir: Current wallpapers directory
:return: File-names
"""
for wall in os.listdir(self.current_dir):
if img_format(self.current_dir / wall):
yield wall
def get_current_wall(self) -> PosixPath:
"""
:return: A Path to the current image of primary monitor
"""
return PosixPath(check_output(get_cmd(self.get_mon())).decode().strip())
def get_current_id(self) -> int:
"""
:return: index of current image among other images in the same directory
"""
try:
return self.available.index(self.current_name)
except ValueError:
self.win.addstr(
f'[X] Current wall "{self.current_name}" not found in {self.current_dir}\n', curses.color_pair(2))
self.win.addstr('[+] Press [R] to reset to the first image',
curses.color_pair(5))
if self.win.getkey() != 'r':
exit(1)
# Set current to first
current_id = 0
self.current_name = self.available[current_id]
self.apply(0)
return current_id
def curses_entry(win: curses.window) -> None:
# Curses initialization
curses.use_default_colors()
curses.init_pair(2, curses.COLOR_RED, -1)
curses.init_pair(3, curses.COLOR_GREEN, -1)
curses.init_pair(4, curses.COLOR_BLUE, -1)
curses.init_pair(5, curses.COLOR_YELLOW, -1)
Waller(win)
| en | 0.722596 | The system command which will return path of monitor's wallpaper * Depends on 'xfconf' Checks the file signature (magic number) for an image :param image_path: The path to the image :return: True if the image is PNG or JPG Collect connected monitors, via xRandr :return: Monitor names # Check monitors # Get currently used wallaper # Get 'LDM GTK greeter' wallpaper # Get available # Set permissions # finally: # config.save() # Inner Loop # Get current wallpaper # Random # DM background # Application The application function Sets proper permissions [400] for all the images and [404] for the DM background image :param avail: Available images :param ldm_bg_path: DM's background image path Collect available images in the specified directory :param current_dir: Current wallpapers directory :return: File-names :return: A Path to the current image of primary monitor :return: index of current image among other images in the same directory # Set current to first # Curses initialization | 2.335074 | 2 |
api/djangorestapp/admin.py | hca-foundation/urbangreenlab | 1 | 6632095 | from . import models
from django.contrib.admin import AdminSite
from django.template.response import TemplateResponse
from django.http import HttpResponse
from django.urls import path
class MyAdminSite(AdminSite):
def get_urls(self):
urls = super().get_urls()
urls += [
path('my_view/', self.admin_view(self.my_view))
]
return urls
def my_view(self, request):
print(request.user, "*********************")
context = dict(
self.each_context(request),
title=('Twsdfsrsr'),
app_path=None,
username=request.user.get_username(),
)
return TemplateResponse(request, "sometemplate.html", context)
admin_site = MyAdminSite(name='myadmin')
admin_site.site_url = None
# Register your models here.
admin_site.register(models.EventQuiz)
admin_site.register(models.Event)
admin_site.register(models.QuestionBankAnswer)
admin_site.register(models.QuestionBank)
admin_site.register(models.QuizBank)
admin_site.register(models.QuizQuestion)
admin_site.register(models.QuizTaker)
| from . import models
from django.contrib.admin import AdminSite
from django.template.response import TemplateResponse
from django.http import HttpResponse
from django.urls import path
class MyAdminSite(AdminSite):
def get_urls(self):
urls = super().get_urls()
urls += [
path('my_view/', self.admin_view(self.my_view))
]
return urls
def my_view(self, request):
print(request.user, "*********************")
context = dict(
self.each_context(request),
title=('Twsdfsrsr'),
app_path=None,
username=request.user.get_username(),
)
return TemplateResponse(request, "sometemplate.html", context)
admin_site = MyAdminSite(name='myadmin')
admin_site.site_url = None
# Register your models here.
admin_site.register(models.EventQuiz)
admin_site.register(models.Event)
admin_site.register(models.QuestionBankAnswer)
admin_site.register(models.QuestionBank)
admin_site.register(models.QuizBank)
admin_site.register(models.QuizQuestion)
admin_site.register(models.QuizTaker)
| en | 0.968259 | # Register your models here. | 2.205112 | 2 |
diplomacy/tests/mila_actions_test.py | maayanorner/diplomacy | 0 | 6632096 | # Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests the action conversions defined in mila_actions.py."""
import collections
from absl.testing import absltest
from deepmind.diplomacy.environment import action_list
from deepmind.diplomacy.environment import action_utils
from deepmind.diplomacy.environment import human_readable_actions
from deepmind.diplomacy.environment import mila_actions
class MilaActionsTest(absltest.TestCase):
def test_inversion_dm_actions(self):
"""Tests converting a DM to MILA to DM action recovers original action."""
for original_action in action_list.POSSIBLE_ACTIONS:
possible_mila_actions = mila_actions.action_to_mila_actions(
original_action)
for mila_action in possible_mila_actions:
self.assertIn(
original_action,
mila_actions.mila_action_to_possible_actions(mila_action),
f'{mila_actions} does not map to set including dm action '
f'{human_readable_actions.action_string(original_action, None)}')
def test_inversion_mila_actions(self):
"""Tests converting a MILA to DM to MILA action recovers original action."""
for original_action in action_list.MILA_ACTIONS_LIST:
possible_dm_actions = mila_actions.mila_action_to_possible_actions(
original_action)
for dm_action in possible_dm_actions:
self.assertIn(
original_action,
mila_actions.action_to_mila_actions(dm_action),
f'{human_readable_actions.action_string(dm_action, None)} '
f'does not map to set including mila action {original_action}')
def test_all_mila_actions_have_dm_action(self):
for mila_action in action_list.MILA_ACTIONS_LIST:
dm_actions = mila_actions.mila_action_to_possible_actions(mila_action)
self.assertNotEmpty(dm_actions,
f'mila_action {mila_action} has no dm_action')
def test_only_disband_remove_ambiguous_mila_actions(self):
for mila_action in action_list.MILA_ACTIONS_LIST:
dm_actions = mila_actions.mila_action_to_possible_actions(mila_action)
if len(dm_actions) > 1:
self.assertLen(dm_actions, 2, f'{mila_action} gives >2 dm_actions')
orders = {action_utils.action_breakdown(dm_action)[0]
for dm_action in dm_actions}
self.assertEqual(
orders, {action_utils.REMOVE, action_utils.DISBAND},
f'{mila_action} ambiguous but not a disband/remove action')
def test_all_dm_actions_have_possible_mila_action_count(self):
"""DM actions correspond to possibly multiple MILA actions.
This is because they do not specify unit type or coast when it is possible
to infer from the board.
There are 1, 2 or 3 possible unit descriptions (for an army and/or a fleet
or two possible fleets in a bicoastal province) and up to 2 units specified
in an action. Furthermore, no action can involve two fleets in bicoastal
provinces, so the possible mila_action counts are 1, 2, 3, 4, or 6.
"""
for action in action_list.POSSIBLE_ACTIONS:
mila_actions_list = mila_actions.action_to_mila_actions(action)
self.assertIn(
len(mila_actions_list), {1, 2, 3, 4, 6},
f'action {action} gives {len(mila_actions_list)} '
'mila_actions, which cannot be correct')
def test_expected_number_missing_mila_actions(self):
"""Tests MILA actions misses no actions except known convoy-related ones.
The Mila actions list does not allow long convoys, or include any convoy
actions that cannot affect the adjudication (e.g. ADR C ALB-TUN)
We test these explain every situation where the actions we make are not in
action_list.MILA_ACTIONS_LIST.
"""
mila_actions_to_dm_actions = collections.defaultdict(list)
long_convoys = set()
for action in action_list.POSSIBLE_ACTIONS:
mila_action_list = mila_actions.action_to_mila_actions(action)
for mila_action in mila_action_list:
mila_actions_to_dm_actions[mila_action].append(action)
if mila_action not in action_list.MILA_ACTIONS_LIST:
order, p1, p2, p3 = action_utils.action_breakdown(action)
if order == action_utils.CONVOY_TO:
long_convoys.add((p1, p2))
reasons_for_illegal_mila_action = {
'Long convoy to': 0,
'Long convoy': 0,
'Other convoy': 0,
'Support long convoy to': 0,
'Support alternative convoy too long': 0,
'Unknown': 0,
}
for mila_action in mila_actions_to_dm_actions:
if mila_action not in action_list.MILA_ACTIONS_LIST:
deepmind_action = mila_actions_to_dm_actions[mila_action][0]
order, p1, p2, p3 = action_utils.action_breakdown(deepmind_action)
if order == action_utils.CONVOY_TO:
# Manually checked that all of these are just long convoys (and
# otherwise are well formatted actions)
reasons_for_illegal_mila_action['Long convoy to'] += 1
elif order == action_utils.CONVOY:
if (p3, p2) in long_convoys:
reasons_for_illegal_mila_action['Long convoy'] += 1
continue
else:
# Manually checked, these are all well formatted.
# They are irrelevant convoys, e.g. ADR C ALB-TUN
# or they are relevant but only on long routes,
# e.g. F IRI C StP-LVP, which is only relevant on the long route
# BAR-NWG-NTH-ECH-IRI
reasons_for_illegal_mila_action['Other convoy'] += 1
continue
elif order == action_utils.SUPPORT_MOVE_TO:
if (p3, p2) in long_convoys:
reasons_for_illegal_mila_action['Support long convoy to'] += 1
else:
# These have all been checked manually. What's happening is
# something like F NAO S A StP - LVP. Mila's convoy rules mean that
# the only way they allow this convoy is if NAO is part of the
# route. The original game allows a longer convoy route, and so the
# support is valid
reasons_for_illegal_mila_action[
'Support alternative convoy too long'] += 1
else:
reasons_for_illegal_mila_action['Unknown'] += 1
expected_counts = {'Long convoy to': 374,
'Long convoy': 4238,
'Other convoy': 2176,
'Support long convoy to': 2565,
'Support alternative convoy too long': 27,
'Unknown': 0}
self.assertEqual(reasons_for_illegal_mila_action, expected_counts,
'unexpected number of actions not in MILA list')
if __name__ == '__main__':
absltest.main()
| # Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests the action conversions defined in mila_actions.py."""
import collections
from absl.testing import absltest
from deepmind.diplomacy.environment import action_list
from deepmind.diplomacy.environment import action_utils
from deepmind.diplomacy.environment import human_readable_actions
from deepmind.diplomacy.environment import mila_actions
class MilaActionsTest(absltest.TestCase):
def test_inversion_dm_actions(self):
"""Tests converting a DM to MILA to DM action recovers original action."""
for original_action in action_list.POSSIBLE_ACTIONS:
possible_mila_actions = mila_actions.action_to_mila_actions(
original_action)
for mila_action in possible_mila_actions:
self.assertIn(
original_action,
mila_actions.mila_action_to_possible_actions(mila_action),
f'{mila_actions} does not map to set including dm action '
f'{human_readable_actions.action_string(original_action, None)}')
def test_inversion_mila_actions(self):
"""Tests converting a MILA to DM to MILA action recovers original action."""
for original_action in action_list.MILA_ACTIONS_LIST:
possible_dm_actions = mila_actions.mila_action_to_possible_actions(
original_action)
for dm_action in possible_dm_actions:
self.assertIn(
original_action,
mila_actions.action_to_mila_actions(dm_action),
f'{human_readable_actions.action_string(dm_action, None)} '
f'does not map to set including mila action {original_action}')
def test_all_mila_actions_have_dm_action(self):
for mila_action in action_list.MILA_ACTIONS_LIST:
dm_actions = mila_actions.mila_action_to_possible_actions(mila_action)
self.assertNotEmpty(dm_actions,
f'mila_action {mila_action} has no dm_action')
def test_only_disband_remove_ambiguous_mila_actions(self):
for mila_action in action_list.MILA_ACTIONS_LIST:
dm_actions = mila_actions.mila_action_to_possible_actions(mila_action)
if len(dm_actions) > 1:
self.assertLen(dm_actions, 2, f'{mila_action} gives >2 dm_actions')
orders = {action_utils.action_breakdown(dm_action)[0]
for dm_action in dm_actions}
self.assertEqual(
orders, {action_utils.REMOVE, action_utils.DISBAND},
f'{mila_action} ambiguous but not a disband/remove action')
def test_all_dm_actions_have_possible_mila_action_count(self):
"""DM actions correspond to possibly multiple MILA actions.
This is because they do not specify unit type or coast when it is possible
to infer from the board.
There are 1, 2 or 3 possible unit descriptions (for an army and/or a fleet
or two possible fleets in a bicoastal province) and up to 2 units specified
in an action. Furthermore, no action can involve two fleets in bicoastal
provinces, so the possible mila_action counts are 1, 2, 3, 4, or 6.
"""
for action in action_list.POSSIBLE_ACTIONS:
mila_actions_list = mila_actions.action_to_mila_actions(action)
self.assertIn(
len(mila_actions_list), {1, 2, 3, 4, 6},
f'action {action} gives {len(mila_actions_list)} '
'mila_actions, which cannot be correct')
def test_expected_number_missing_mila_actions(self):
"""Tests MILA actions misses no actions except known convoy-related ones.
The Mila actions list does not allow long convoys, or include any convoy
actions that cannot affect the adjudication (e.g. ADR C ALB-TUN)
We test these explain every situation where the actions we make are not in
action_list.MILA_ACTIONS_LIST.
"""
mila_actions_to_dm_actions = collections.defaultdict(list)
long_convoys = set()
for action in action_list.POSSIBLE_ACTIONS:
mila_action_list = mila_actions.action_to_mila_actions(action)
for mila_action in mila_action_list:
mila_actions_to_dm_actions[mila_action].append(action)
if mila_action not in action_list.MILA_ACTIONS_LIST:
order, p1, p2, p3 = action_utils.action_breakdown(action)
if order == action_utils.CONVOY_TO:
long_convoys.add((p1, p2))
reasons_for_illegal_mila_action = {
'Long convoy to': 0,
'Long convoy': 0,
'Other convoy': 0,
'Support long convoy to': 0,
'Support alternative convoy too long': 0,
'Unknown': 0,
}
for mila_action in mila_actions_to_dm_actions:
if mila_action not in action_list.MILA_ACTIONS_LIST:
deepmind_action = mila_actions_to_dm_actions[mila_action][0]
order, p1, p2, p3 = action_utils.action_breakdown(deepmind_action)
if order == action_utils.CONVOY_TO:
# Manually checked that all of these are just long convoys (and
# otherwise are well formatted actions)
reasons_for_illegal_mila_action['Long convoy to'] += 1
elif order == action_utils.CONVOY:
if (p3, p2) in long_convoys:
reasons_for_illegal_mila_action['Long convoy'] += 1
continue
else:
# Manually checked, these are all well formatted.
# They are irrelevant convoys, e.g. ADR C ALB-TUN
# or they are relevant but only on long routes,
# e.g. F IRI C StP-LVP, which is only relevant on the long route
# BAR-NWG-NTH-ECH-IRI
reasons_for_illegal_mila_action['Other convoy'] += 1
continue
elif order == action_utils.SUPPORT_MOVE_TO:
if (p3, p2) in long_convoys:
reasons_for_illegal_mila_action['Support long convoy to'] += 1
else:
# These have all been checked manually. What's happening is
# something like F NAO S A StP - LVP. Mila's convoy rules mean that
# the only way they allow this convoy is if NAO is part of the
# route. The original game allows a longer convoy route, and so the
# support is valid
reasons_for_illegal_mila_action[
'Support alternative convoy too long'] += 1
else:
reasons_for_illegal_mila_action['Unknown'] += 1
expected_counts = {'Long convoy to': 374,
'Long convoy': 4238,
'Other convoy': 2176,
'Support long convoy to': 2565,
'Support alternative convoy too long': 27,
'Unknown': 0}
self.assertEqual(reasons_for_illegal_mila_action, expected_counts,
'unexpected number of actions not in MILA list')
if __name__ == '__main__':
absltest.main()
| en | 0.870705 | # Copyright 2021 DeepMind Technologies Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== Tests the action conversions defined in mila_actions.py. Tests converting a DM to MILA to DM action recovers original action. Tests converting a MILA to DM to MILA action recovers original action. DM actions correspond to possibly multiple MILA actions. This is because they do not specify unit type or coast when it is possible to infer from the board. There are 1, 2 or 3 possible unit descriptions (for an army and/or a fleet or two possible fleets in a bicoastal province) and up to 2 units specified in an action. Furthermore, no action can involve two fleets in bicoastal provinces, so the possible mila_action counts are 1, 2, 3, 4, or 6. Tests MILA actions misses no actions except known convoy-related ones. The Mila actions list does not allow long convoys, or include any convoy actions that cannot affect the adjudication (e.g. ADR C ALB-TUN) We test these explain every situation where the actions we make are not in action_list.MILA_ACTIONS_LIST. # Manually checked that all of these are just long convoys (and # otherwise are well formatted actions) # Manually checked, these are all well formatted. # They are irrelevant convoys, e.g. ADR C ALB-TUN # or they are relevant but only on long routes, # e.g. F IRI C StP-LVP, which is only relevant on the long route # BAR-NWG-NTH-ECH-IRI # These have all been checked manually. What's happening is # something like F NAO S A StP - LVP. Mila's convoy rules mean that # the only way they allow this convoy is if NAO is part of the # route. The original game allows a longer convoy route, and so the # support is valid | 2.320423 | 2 |
jupyter_kernel_test/__init__.py | tgb417/jupyter_kernel_test | 0 | 6632097 | """Machinery for testing Jupyter kernels via the messaging protocol.
"""
from unittest import TestCase, SkipTest
from queue import Empty
from jupyter_client.manager import start_new_kernel
from .messagespec import validate_message, MimeBundle
TIMEOUT = 15
__version__ = '0.3'
class KernelTests(TestCase):
kernel_name = ""
@classmethod
def setUpClass(cls):
cls.km, cls.kc = start_new_kernel(kernel_name=cls.kernel_name)
@classmethod
def tearDownClass(cls):
cls.kc.stop_channels()
cls.km.shutdown_kernel()
def flush_channels(self):
for channel in (self.kc.shell_channel, self.kc.iopub_channel):
while True:
try:
msg = channel.get_msg(block=True, timeout=0.1)
except Empty:
break
else:
validate_message(msg)
language_name = ""
file_extension = ""
def test_kernel_info(self):
self.flush_channels()
msg_id = self.kc.kernel_info()
reply = self.kc.get_shell_msg(timeout=TIMEOUT)
validate_message(reply, 'kernel_info_reply', msg_id)
if self.language_name:
self.assertEqual(reply['content']['language_info']['name'],
self.language_name)
if self.file_extension:
self.assertEqual(reply['content']['language_info']['file_extension'],
self.file_extension)
self.assertTrue(reply['content']['language_info']['file_extension'].startswith("."))
def execute_helper(self, code, timeout=TIMEOUT,
silent=False, store_history=True,
stop_on_error=True):
msg_id = self.kc.execute(code=code, silent=silent,
store_history=store_history,
stop_on_error=stop_on_error)
reply = self.kc.get_shell_msg(timeout=timeout)
validate_message(reply, 'execute_reply', msg_id)
busy_msg = self.kc.iopub_channel.get_msg(timeout=1)
validate_message(busy_msg, 'status', msg_id)
self.assertEqual(busy_msg['content']['execution_state'], 'busy')
output_msgs = []
while True:
msg = self.kc.iopub_channel.get_msg(timeout=0.1)
validate_message(msg, msg['msg_type'], msg_id)
if msg['msg_type'] == 'status':
self.assertEqual(msg['content']['execution_state'], 'idle')
break
elif msg['msg_type'] == 'execute_input':
self.assertEqual(msg['content']['code'], code)
continue
output_msgs.append(msg)
return reply, output_msgs
code_hello_world = ""
def test_execute_stdout(self):
if not self.code_hello_world:
raise SkipTest
self.flush_channels()
reply, output_msgs = self.execute_helper(code=self.code_hello_world)
self.assertEqual(reply['content']['status'], 'ok')
self.assertGreaterEqual(len(output_msgs), 1)
for msg in output_msgs:
if (msg['msg_type'] == 'stream') and (msg['content']['name'] == 'stdout'):
self.assertIn('hello, world', msg['content']['text'])
break
else:
self.assertTrue(False, "Expected one output message of type 'stream' and 'content.name'='stdout'")
code_stderr = ""
def test_execute_stderr(self):
if not self.code_stderr:
raise SkipTest
self.flush_channels()
reply, output_msgs = self.execute_helper(code=self.code_stderr)
self.assertEqual(reply['content']['status'], 'ok')
self.assertGreaterEqual(len(output_msgs), 1)
for msg in output_msgs:
if (msg['msg_type'] == 'stream') and (msg['content']['name'] == 'stderr'):
break
else:
self.assertTrue(False, "Expected one output message of type 'stream' and 'content.name'='stderr'")
completion_samples = []
def test_completion(self):
if not self.completion_samples:
raise SkipTest
for sample in self.completion_samples:
with self.subTest(text=sample['text']):
msg_id = self.kc.complete(sample['text'])
reply = self.kc.get_shell_msg()
validate_message(reply, 'complete_reply', msg_id)
if 'matches' in sample:
self.assertEqual(set(reply['content']['matches']),
set(sample['matches']))
complete_code_samples = []
incomplete_code_samples = []
invalid_code_samples = []
def check_is_complete(self, sample, status):
msg_id = self.kc.is_complete(sample)
reply = self.kc.get_shell_msg()
validate_message(reply, 'is_complete_reply', msg_id)
if reply['content']['status'] != status:
msg = "For code sample\n {!r}\nExpected {!r}, got {!r}."
raise AssertionError(msg.format(sample, status,
reply['content']['status']))
def test_is_complete(self):
if not (self.complete_code_samples
or self.incomplete_code_samples
or self.invalid_code_samples):
raise SkipTest
self.flush_channels()
with self.subTest(status="complete"):
for sample in self.complete_code_samples:
self.check_is_complete(sample, 'complete')
with self.subTest(status="incomplete"):
for sample in self.incomplete_code_samples:
self.check_is_complete(sample, 'incomplete')
with self.subTest(status="invalid"):
for sample in self.invalid_code_samples:
self.check_is_complete(sample, 'invalid')
code_page_something = ""
def test_pager(self):
if not self.code_page_something:
raise SkipTest
self.flush_channels()
reply, output_msgs = self.execute_helper(self.code_page_something)
self.assertEqual(reply['content']['status'], 'ok')
payloads = reply['content']['payload']
self.assertEqual(len(payloads), 1)
self.assertEqual(payloads[0]['source'], 'page')
mimebundle = payloads[0]['data']
# Validate the mimebundle
MimeBundle().data = mimebundle
self.assertIn('text/plain', mimebundle)
code_generate_error = ""
def test_error(self):
if not self.code_generate_error:
raise SkipTest
self.flush_channels()
reply, output_msgs = self.execute_helper(self.code_generate_error)
self.assertEqual(reply['content']['status'], 'error')
self.assertEqual(len(output_msgs), 1)
self.assertEqual(output_msgs[0]['msg_type'], 'error')
code_execute_result = []
def test_execute_result(self):
if not self.code_execute_result:
raise SkipTest
for sample in self.code_execute_result:
with self.subTest(code=sample['code']):
self.flush_channels()
reply, output_msgs = self.execute_helper(sample['code'])
self.assertEqual(reply['content']['status'], 'ok')
self.assertGreaterEqual(len(output_msgs), 1)
self.assertEqual(output_msgs[0]['msg_type'], 'execute_result')
self.assertIn('text/plain', output_msgs[0]['content']['data'])
self.assertEqual(output_msgs[0]['content']['data']['text/plain'],
sample['result'])
code_display_data = []
def test_display_data(self):
if not self.code_display_data:
raise SkipTest
for sample in self.code_display_data:
with self.subTest(code=sample['code']):
self.flush_channels()
reply, output_msgs = self.execute_helper(sample['code'])
self.assertEqual(reply['content']['status'], 'ok')
self.assertGreaterEqual(len(output_msgs), 1)
self.assertEqual(output_msgs[0]['msg_type'], 'display_data')
self.assertIn(sample['mime'], output_msgs[0]['content']['data'])
# this should match one of the values in code_execute_result
code_history_pattern = ""
supported_history_operations = ()
def history_helper(self, execute_first, timeout=TIMEOUT, **histargs):
self.flush_channels()
for code in execute_first:
reply, output_msgs = self.execute_helper(code)
self.flush_channels()
msg_id = self.kc.history(**histargs)
reply = self.kc.get_shell_msg(timeout=timeout)
validate_message(reply, 'history_reply', msg_id)
return reply
def test_history(self):
if not self.code_execute_result:
raise SkipTest
codes = [s['code'] for s in self.code_execute_result]
results = [s['result'] for s in self.code_execute_result]
n = len(codes)
session = start = None
with self.subTest(hist_access_type="tail"):
if 'tail' not in self.supported_history_operations:
raise SkipTest
reply = self.history_helper(codes, output=False, raw=True,
hist_access_type="tail", n=n)
self.assertEqual(len(reply['content']['history']), n)
self.assertEqual(len(reply['content']['history'][0]), 3)
self.assertEqual(codes, [h[2] for h in reply['content']['history']])
session, start = reply['content']['history'][0][0:2]
with self.subTest(output=True):
reply = self.history_helper(codes, output=True, raw=True,
hist_access_type="tail", n=n)
self.assertEqual(len(reply['content']['history'][0][2]), 2)
with self.subTest(hist_access_type="range"):
if 'range' not in self.supported_history_operations:
raise SkipTest
if session is None:
raise SkipTest
reply = self.history_helper(codes, output=False, raw=True,
hist_access_type="range",
session=session, start=start,
stop=start+1)
self.assertEqual(len(reply['content']['history']), 1)
self.assertEqual(reply['content']['history'][0][0], session)
self.assertEqual(reply['content']['history'][0][1], start)
with self.subTest(hist_access_type="search"):
if not self.code_history_pattern:
raise SkipTest
if 'search' not in self.supported_history_operations:
raise SkipTest
with self.subTest(subsearch="normal"):
reply = self.history_helper(codes, output=False, raw=True,
hist_access_type="search",
pattern=self.code_history_pattern)
self.assertGreaterEqual(len(reply['content']['history']), 1)
with self.subTest(subsearch="unique"):
reply = self.history_helper(codes, output=False, raw=True,
hist_access_type="search",
pattern=self.code_history_pattern,
unique=True)
self.assertEqual(len(reply['content']['history']), 1)
with self.subTest(subsearch="n"):
reply = self.history_helper(codes, output=False, raw=True,
hist_access_type="search",
pattern=self.code_history_pattern,
n=3)
self.assertEqual(len(reply['content']['history']), 3)
code_inspect_sample = ""
def test_inspect(self):
if not self.code_inspect_sample:
raise SkipTest
self.flush_channels()
msg_id = self.kc.inspect(self.code_inspect_sample)
reply = self.kc.get_shell_msg(timeout=TIMEOUT)
validate_message(reply, 'inspect_reply', msg_id)
self.assertEqual(reply['content']['status'], 'ok')
self.assertTrue(reply['content']['found'])
self.assertGreaterEqual(len(reply['content']['data']), 1)
code_clear_output = ""
def test_clear_output(self):
if not self.code_clear_output:
raise SkipTest
self.flush_channels()
reply, output_msgs = self.execute_helper(code=self.code_clear_output)
self.assertEqual(reply['content']['status'], 'ok')
self.assertGreaterEqual(len(output_msgs), 1)
self.assertEqual(output_msgs[0]['msg_type'], 'clear_output')
| """Machinery for testing Jupyter kernels via the messaging protocol.
"""
from unittest import TestCase, SkipTest
from queue import Empty
from jupyter_client.manager import start_new_kernel
from .messagespec import validate_message, MimeBundle
TIMEOUT = 15
__version__ = '0.3'
class KernelTests(TestCase):
kernel_name = ""
@classmethod
def setUpClass(cls):
cls.km, cls.kc = start_new_kernel(kernel_name=cls.kernel_name)
@classmethod
def tearDownClass(cls):
cls.kc.stop_channels()
cls.km.shutdown_kernel()
def flush_channels(self):
for channel in (self.kc.shell_channel, self.kc.iopub_channel):
while True:
try:
msg = channel.get_msg(block=True, timeout=0.1)
except Empty:
break
else:
validate_message(msg)
language_name = ""
file_extension = ""
def test_kernel_info(self):
self.flush_channels()
msg_id = self.kc.kernel_info()
reply = self.kc.get_shell_msg(timeout=TIMEOUT)
validate_message(reply, 'kernel_info_reply', msg_id)
if self.language_name:
self.assertEqual(reply['content']['language_info']['name'],
self.language_name)
if self.file_extension:
self.assertEqual(reply['content']['language_info']['file_extension'],
self.file_extension)
self.assertTrue(reply['content']['language_info']['file_extension'].startswith("."))
def execute_helper(self, code, timeout=TIMEOUT,
silent=False, store_history=True,
stop_on_error=True):
msg_id = self.kc.execute(code=code, silent=silent,
store_history=store_history,
stop_on_error=stop_on_error)
reply = self.kc.get_shell_msg(timeout=timeout)
validate_message(reply, 'execute_reply', msg_id)
busy_msg = self.kc.iopub_channel.get_msg(timeout=1)
validate_message(busy_msg, 'status', msg_id)
self.assertEqual(busy_msg['content']['execution_state'], 'busy')
output_msgs = []
while True:
msg = self.kc.iopub_channel.get_msg(timeout=0.1)
validate_message(msg, msg['msg_type'], msg_id)
if msg['msg_type'] == 'status':
self.assertEqual(msg['content']['execution_state'], 'idle')
break
elif msg['msg_type'] == 'execute_input':
self.assertEqual(msg['content']['code'], code)
continue
output_msgs.append(msg)
return reply, output_msgs
code_hello_world = ""
def test_execute_stdout(self):
if not self.code_hello_world:
raise SkipTest
self.flush_channels()
reply, output_msgs = self.execute_helper(code=self.code_hello_world)
self.assertEqual(reply['content']['status'], 'ok')
self.assertGreaterEqual(len(output_msgs), 1)
for msg in output_msgs:
if (msg['msg_type'] == 'stream') and (msg['content']['name'] == 'stdout'):
self.assertIn('hello, world', msg['content']['text'])
break
else:
self.assertTrue(False, "Expected one output message of type 'stream' and 'content.name'='stdout'")
code_stderr = ""
def test_execute_stderr(self):
if not self.code_stderr:
raise SkipTest
self.flush_channels()
reply, output_msgs = self.execute_helper(code=self.code_stderr)
self.assertEqual(reply['content']['status'], 'ok')
self.assertGreaterEqual(len(output_msgs), 1)
for msg in output_msgs:
if (msg['msg_type'] == 'stream') and (msg['content']['name'] == 'stderr'):
break
else:
self.assertTrue(False, "Expected one output message of type 'stream' and 'content.name'='stderr'")
completion_samples = []
def test_completion(self):
if not self.completion_samples:
raise SkipTest
for sample in self.completion_samples:
with self.subTest(text=sample['text']):
msg_id = self.kc.complete(sample['text'])
reply = self.kc.get_shell_msg()
validate_message(reply, 'complete_reply', msg_id)
if 'matches' in sample:
self.assertEqual(set(reply['content']['matches']),
set(sample['matches']))
complete_code_samples = []
incomplete_code_samples = []
invalid_code_samples = []
def check_is_complete(self, sample, status):
msg_id = self.kc.is_complete(sample)
reply = self.kc.get_shell_msg()
validate_message(reply, 'is_complete_reply', msg_id)
if reply['content']['status'] != status:
msg = "For code sample\n {!r}\nExpected {!r}, got {!r}."
raise AssertionError(msg.format(sample, status,
reply['content']['status']))
def test_is_complete(self):
if not (self.complete_code_samples
or self.incomplete_code_samples
or self.invalid_code_samples):
raise SkipTest
self.flush_channels()
with self.subTest(status="complete"):
for sample in self.complete_code_samples:
self.check_is_complete(sample, 'complete')
with self.subTest(status="incomplete"):
for sample in self.incomplete_code_samples:
self.check_is_complete(sample, 'incomplete')
with self.subTest(status="invalid"):
for sample in self.invalid_code_samples:
self.check_is_complete(sample, 'invalid')
code_page_something = ""
def test_pager(self):
if not self.code_page_something:
raise SkipTest
self.flush_channels()
reply, output_msgs = self.execute_helper(self.code_page_something)
self.assertEqual(reply['content']['status'], 'ok')
payloads = reply['content']['payload']
self.assertEqual(len(payloads), 1)
self.assertEqual(payloads[0]['source'], 'page')
mimebundle = payloads[0]['data']
# Validate the mimebundle
MimeBundle().data = mimebundle
self.assertIn('text/plain', mimebundle)
code_generate_error = ""
def test_error(self):
if not self.code_generate_error:
raise SkipTest
self.flush_channels()
reply, output_msgs = self.execute_helper(self.code_generate_error)
self.assertEqual(reply['content']['status'], 'error')
self.assertEqual(len(output_msgs), 1)
self.assertEqual(output_msgs[0]['msg_type'], 'error')
code_execute_result = []
def test_execute_result(self):
if not self.code_execute_result:
raise SkipTest
for sample in self.code_execute_result:
with self.subTest(code=sample['code']):
self.flush_channels()
reply, output_msgs = self.execute_helper(sample['code'])
self.assertEqual(reply['content']['status'], 'ok')
self.assertGreaterEqual(len(output_msgs), 1)
self.assertEqual(output_msgs[0]['msg_type'], 'execute_result')
self.assertIn('text/plain', output_msgs[0]['content']['data'])
self.assertEqual(output_msgs[0]['content']['data']['text/plain'],
sample['result'])
code_display_data = []
def test_display_data(self):
if not self.code_display_data:
raise SkipTest
for sample in self.code_display_data:
with self.subTest(code=sample['code']):
self.flush_channels()
reply, output_msgs = self.execute_helper(sample['code'])
self.assertEqual(reply['content']['status'], 'ok')
self.assertGreaterEqual(len(output_msgs), 1)
self.assertEqual(output_msgs[0]['msg_type'], 'display_data')
self.assertIn(sample['mime'], output_msgs[0]['content']['data'])
# this should match one of the values in code_execute_result
code_history_pattern = ""
supported_history_operations = ()
def history_helper(self, execute_first, timeout=TIMEOUT, **histargs):
self.flush_channels()
for code in execute_first:
reply, output_msgs = self.execute_helper(code)
self.flush_channels()
msg_id = self.kc.history(**histargs)
reply = self.kc.get_shell_msg(timeout=timeout)
validate_message(reply, 'history_reply', msg_id)
return reply
def test_history(self):
if not self.code_execute_result:
raise SkipTest
codes = [s['code'] for s in self.code_execute_result]
results = [s['result'] for s in self.code_execute_result]
n = len(codes)
session = start = None
with self.subTest(hist_access_type="tail"):
if 'tail' not in self.supported_history_operations:
raise SkipTest
reply = self.history_helper(codes, output=False, raw=True,
hist_access_type="tail", n=n)
self.assertEqual(len(reply['content']['history']), n)
self.assertEqual(len(reply['content']['history'][0]), 3)
self.assertEqual(codes, [h[2] for h in reply['content']['history']])
session, start = reply['content']['history'][0][0:2]
with self.subTest(output=True):
reply = self.history_helper(codes, output=True, raw=True,
hist_access_type="tail", n=n)
self.assertEqual(len(reply['content']['history'][0][2]), 2)
with self.subTest(hist_access_type="range"):
if 'range' not in self.supported_history_operations:
raise SkipTest
if session is None:
raise SkipTest
reply = self.history_helper(codes, output=False, raw=True,
hist_access_type="range",
session=session, start=start,
stop=start+1)
self.assertEqual(len(reply['content']['history']), 1)
self.assertEqual(reply['content']['history'][0][0], session)
self.assertEqual(reply['content']['history'][0][1], start)
with self.subTest(hist_access_type="search"):
if not self.code_history_pattern:
raise SkipTest
if 'search' not in self.supported_history_operations:
raise SkipTest
with self.subTest(subsearch="normal"):
reply = self.history_helper(codes, output=False, raw=True,
hist_access_type="search",
pattern=self.code_history_pattern)
self.assertGreaterEqual(len(reply['content']['history']), 1)
with self.subTest(subsearch="unique"):
reply = self.history_helper(codes, output=False, raw=True,
hist_access_type="search",
pattern=self.code_history_pattern,
unique=True)
self.assertEqual(len(reply['content']['history']), 1)
with self.subTest(subsearch="n"):
reply = self.history_helper(codes, output=False, raw=True,
hist_access_type="search",
pattern=self.code_history_pattern,
n=3)
self.assertEqual(len(reply['content']['history']), 3)
code_inspect_sample = ""
def test_inspect(self):
if not self.code_inspect_sample:
raise SkipTest
self.flush_channels()
msg_id = self.kc.inspect(self.code_inspect_sample)
reply = self.kc.get_shell_msg(timeout=TIMEOUT)
validate_message(reply, 'inspect_reply', msg_id)
self.assertEqual(reply['content']['status'], 'ok')
self.assertTrue(reply['content']['found'])
self.assertGreaterEqual(len(reply['content']['data']), 1)
code_clear_output = ""
def test_clear_output(self):
if not self.code_clear_output:
raise SkipTest
self.flush_channels()
reply, output_msgs = self.execute_helper(code=self.code_clear_output)
self.assertEqual(reply['content']['status'], 'ok')
self.assertGreaterEqual(len(output_msgs), 1)
self.assertEqual(output_msgs[0]['msg_type'], 'clear_output')
| en | 0.56834 | Machinery for testing Jupyter kernels via the messaging protocol. # Validate the mimebundle # this should match one of the values in code_execute_result | 2.377688 | 2 |
utils/swift_build_support/tests/test_targets.py | EBGToo/swift | 0 | 6632098 | # test_targets.py - Unit tests for swift_build_support.targets -*- python -*-
#
# This source file is part of the Swift.org open source project
#
# Copyright (c) 2014 - 2016 Apple Inc. and the Swift project authors
# Licensed under Apache License v2.0 with Runtime Library Exception
#
# See https://swift.org/LICENSE.txt for license information
# See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
import unittest
from swift_build_support.targets import StdlibDeploymentTarget
class HostTargetTestCase(unittest.TestCase):
def test_is_not_none_on_this_platform(self):
self.assertIsNotNone(StdlibDeploymentTarget.host_target())
class PlatformTargetsTestCase(unittest.TestCase):
def test_platform_contains(self):
"""
Checks that Platform.contains(target_name)
matches all of its targets' names and rejects non-matching names.
"""
# Pick a few platforms with lots of targets
for platform in [StdlibDeploymentTarget.Linux,
StdlibDeploymentTarget.iOS,
StdlibDeploymentTarget.iOSSimulator]:
for target in platform.targets:
self.assertTrue(platform.contains(target.name))
self.assertFalse(platform.contains("fakeCPU-MSDOS"))
self.assertFalse(platform.contains("singleTransistor-fakeOS"))
if __name__ == '__main__':
unittest.main()
| # test_targets.py - Unit tests for swift_build_support.targets -*- python -*-
#
# This source file is part of the Swift.org open source project
#
# Copyright (c) 2014 - 2016 Apple Inc. and the Swift project authors
# Licensed under Apache License v2.0 with Runtime Library Exception
#
# See https://swift.org/LICENSE.txt for license information
# See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
import unittest
from swift_build_support.targets import StdlibDeploymentTarget
class HostTargetTestCase(unittest.TestCase):
def test_is_not_none_on_this_platform(self):
self.assertIsNotNone(StdlibDeploymentTarget.host_target())
class PlatformTargetsTestCase(unittest.TestCase):
def test_platform_contains(self):
"""
Checks that Platform.contains(target_name)
matches all of its targets' names and rejects non-matching names.
"""
# Pick a few platforms with lots of targets
for platform in [StdlibDeploymentTarget.Linux,
StdlibDeploymentTarget.iOS,
StdlibDeploymentTarget.iOSSimulator]:
for target in platform.targets:
self.assertTrue(platform.contains(target.name))
self.assertFalse(platform.contains("fakeCPU-MSDOS"))
self.assertFalse(platform.contains("singleTransistor-fakeOS"))
if __name__ == '__main__':
unittest.main()
| en | 0.773155 | # test_targets.py - Unit tests for swift_build_support.targets -*- python -*- # # This source file is part of the Swift.org open source project # # Copyright (c) 2014 - 2016 Apple Inc. and the Swift project authors # Licensed under Apache License v2.0 with Runtime Library Exception # # See https://swift.org/LICENSE.txt for license information # See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors Checks that Platform.contains(target_name) matches all of its targets' names and rejects non-matching names. # Pick a few platforms with lots of targets | 2.507907 | 3 |
setup.py | kgermain/Qt.py | 800 | 6632099 | <filename>setup.py
import os
from setuptools import setup
os.environ["QT_PREFERRED_BINDING"] = "None"
version = __import__("Qt").__version__
classifiers = [
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.3",
"Programming Language :: Python :: 3.5",
"Topic :: Software Development :: Libraries :: Python Modules",
"Topic :: Utilities"
]
setup(
name="Qt.py",
version=version,
description="Python 2 & 3 compatibility wrapper around all Qt bindings - "
"PySide, PySide2, PyQt4 and PyQt5.",
author="<NAME>",
author_email="<EMAIL>",
url="https://github.com/mottosso/Qt",
license="MIT",
zip_safe=False,
data_files=["LICENSE"],
py_modules=["Qt"],
classifiers=classifiers
)
| <filename>setup.py
import os
from setuptools import setup
os.environ["QT_PREFERRED_BINDING"] = "None"
version = __import__("Qt").__version__
classifiers = [
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.3",
"Programming Language :: Python :: 3.5",
"Topic :: Software Development :: Libraries :: Python Modules",
"Topic :: Utilities"
]
setup(
name="Qt.py",
version=version,
description="Python 2 & 3 compatibility wrapper around all Qt bindings - "
"PySide, PySide2, PyQt4 and PyQt5.",
author="<NAME>",
author_email="<EMAIL>",
url="https://github.com/mottosso/Qt",
license="MIT",
zip_safe=False,
data_files=["LICENSE"],
py_modules=["Qt"],
classifiers=classifiers
)
| none | 1 | 1.258557 | 1 |
|
benchmarks/imports.py | skshetry/dvc-bench | 20 | 6632100 | <filename>benchmarks/imports.py
from benchmarks.base import BaseBench, BaseRemoteBench
class ImportBench(BaseBench):
repeat = 1
timeout = 12000
def setup(self):
super().setup()
self.init_git()
self.init_dvc()
def time_imports(self):
repo = f"file://{self.project_dir}"
path = "data/cats_dogs"
self.dvc("import", repo, path, proc=True)
class ImportUrlBench(BaseRemoteBench):
def setup(self, remote):
super().setup(remote)
self.data_url = self.setup_data("100x1024")
def time_import_url(self, _):
self.dvc("import-url", self.data_url, proc=True)
class ImportUrlToRemoteBench(BaseRemoteBench):
def setup(self, remote):
super().setup(remote)
self.data_url = self.setup_data("100x1024")
def time_import_url_to_remote(self, _):
self.dvc("import-url", self.data_url, "--to-remote", proc=True)
| <filename>benchmarks/imports.py
from benchmarks.base import BaseBench, BaseRemoteBench
class ImportBench(BaseBench):
repeat = 1
timeout = 12000
def setup(self):
super().setup()
self.init_git()
self.init_dvc()
def time_imports(self):
repo = f"file://{self.project_dir}"
path = "data/cats_dogs"
self.dvc("import", repo, path, proc=True)
class ImportUrlBench(BaseRemoteBench):
def setup(self, remote):
super().setup(remote)
self.data_url = self.setup_data("100x1024")
def time_import_url(self, _):
self.dvc("import-url", self.data_url, proc=True)
class ImportUrlToRemoteBench(BaseRemoteBench):
def setup(self, remote):
super().setup(remote)
self.data_url = self.setup_data("100x1024")
def time_import_url_to_remote(self, _):
self.dvc("import-url", self.data_url, "--to-remote", proc=True)
| none | 1 | 2.363415 | 2 |
|
tool/pylib/misc/ExtMap.py | mever/qooxdoo | 1 | 6632101 | <gh_stars>1-10
#!/usr/bin/env python
# -*- coding: utf-8 -*-
################################################################################
#
# qooxdoo - the new era of web development
#
# http://qooxdoo.org
#
# Copyright:
# 2006-2010 1&1 Internet AG, Germany, http://www.1und1.de
#
# License:
# MIT: https://opensource.org/licenses/MIT
# See the LICENSE file in the project's top-level directory for details.
#
# Authors:
# * <NAME> (thron7)
# * <NAME> (d_wagner)
#
################################################################################
import os, sys, re, types, string, copy
##
# Map class with path-like accessor
##
class ExtMap(object):
"Map class with path-like accessor"
def __init__(self, data=None):
if data:
assert isinstance(data, types.DictType)
else:
data = {}
self._data = data
def __getitem__(self, key):
if key in self._data:
return self._data[key]
data = self._data
splits = key.split('/')
for part in splits:
if part == "." or part == "":
pass
elif isinstance(data, types.DictType) and part in data:
data = data[part]
else:
raise KeyError, key
return data
def get(self, key, default=None, confmap=None):
"""Returns a (possibly nested) data element from dict
"""
if confmap:
data = confmap
else:
data = self
try:
return data.__getitem__(key)
except KeyError:
return default
def __setitem__(self, key, value):
data = self._data
if key in data:
data[key] = value
else:
splits = key.split('/')
splitslen = len(splits)
for i in range(splitslen):
part = splits[i]
if part == "." or part == "":
pass
elif isinstance(data, types.DictionaryType):
if i == splitslen-1: # it's the last
data[part] = value
else:
if part not in data:
data[part] = {}
data = data[part]
else: # leaf type map value
raise ValueError("Cannot insert entry in non-dict data value: %r" % data)
return
def __delitem__(self, key):
data = self._data
if key in data:
del data[key]
else:
splits = key.split('/')
splitslen = len(splits)
for i in range(splitslen):
part = splits[i]
if part in (".", ""):
pass
elif isinstance(data, types.DictionaryType):
if i == splitslen-1: # it's the last
del data[part]
else:
if part not in data:
return # nothing to delete
else:
data = data[part]
else: # the given key doesn't lead to a map
raise ValueError("Cannot delete from non-dict data value: %s" % "/".join(splits[:i+1]))
def delete(self, key):
self.__delitem__(key)
def set(self, key, value):
"""Sets a (possibly nested) data element in the dict
"""
return self.__setitem__(key, value)
##
# Rename a map key.
def rename(self, key, newkey):
val = self.get(key)
self.set(newkey, val)
self.delete(key)
return
def __contains__(self, item):
try:
self.__getitem__(item)
return True
except KeyError:
return False
def extract(self, key):
return ExtMap(self.get(key, {}))
def getData(self):
return self._data
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
################################################################################
#
# qooxdoo - the new era of web development
#
# http://qooxdoo.org
#
# Copyright:
# 2006-2010 1&1 Internet AG, Germany, http://www.1und1.de
#
# License:
# MIT: https://opensource.org/licenses/MIT
# See the LICENSE file in the project's top-level directory for details.
#
# Authors:
# * <NAME> (thron7)
# * <NAME> (d_wagner)
#
################################################################################
import os, sys, re, types, string, copy
##
# Map class with path-like accessor
##
class ExtMap(object):
"Map class with path-like accessor"
def __init__(self, data=None):
if data:
assert isinstance(data, types.DictType)
else:
data = {}
self._data = data
def __getitem__(self, key):
if key in self._data:
return self._data[key]
data = self._data
splits = key.split('/')
for part in splits:
if part == "." or part == "":
pass
elif isinstance(data, types.DictType) and part in data:
data = data[part]
else:
raise KeyError, key
return data
def get(self, key, default=None, confmap=None):
"""Returns a (possibly nested) data element from dict
"""
if confmap:
data = confmap
else:
data = self
try:
return data.__getitem__(key)
except KeyError:
return default
def __setitem__(self, key, value):
data = self._data
if key in data:
data[key] = value
else:
splits = key.split('/')
splitslen = len(splits)
for i in range(splitslen):
part = splits[i]
if part == "." or part == "":
pass
elif isinstance(data, types.DictionaryType):
if i == splitslen-1: # it's the last
data[part] = value
else:
if part not in data:
data[part] = {}
data = data[part]
else: # leaf type map value
raise ValueError("Cannot insert entry in non-dict data value: %r" % data)
return
def __delitem__(self, key):
data = self._data
if key in data:
del data[key]
else:
splits = key.split('/')
splitslen = len(splits)
for i in range(splitslen):
part = splits[i]
if part in (".", ""):
pass
elif isinstance(data, types.DictionaryType):
if i == splitslen-1: # it's the last
del data[part]
else:
if part not in data:
return # nothing to delete
else:
data = data[part]
else: # the given key doesn't lead to a map
raise ValueError("Cannot delete from non-dict data value: %s" % "/".join(splits[:i+1]))
def delete(self, key):
self.__delitem__(key)
def set(self, key, value):
"""Sets a (possibly nested) data element in the dict
"""
return self.__setitem__(key, value)
##
# Rename a map key.
def rename(self, key, newkey):
val = self.get(key)
self.set(newkey, val)
self.delete(key)
return
def __contains__(self, item):
try:
self.__getitem__(item)
return True
except KeyError:
return False
def extract(self, key):
return ExtMap(self.get(key, {}))
def getData(self):
return self._data | en | 0.566828 | #!/usr/bin/env python # -*- coding: utf-8 -*- ################################################################################ # # qooxdoo - the new era of web development # # http://qooxdoo.org # # Copyright: # 2006-2010 1&1 Internet AG, Germany, http://www.1und1.de # # License: # MIT: https://opensource.org/licenses/MIT # See the LICENSE file in the project's top-level directory for details. # # Authors: # * <NAME> (thron7) # * <NAME> (d_wagner) # ################################################################################ ## # Map class with path-like accessor ## Returns a (possibly nested) data element from dict # it's the last # leaf type map value # it's the last # nothing to delete # the given key doesn't lead to a map Sets a (possibly nested) data element in the dict ## # Rename a map key. | 2.320555 | 2 |
horovod/unet_training_horovod-60.py | explcre/SHUKUN-Technology-Co.-Ltd-Algorithm-intern | 1 | 6632102 | <gh_stars>1-10
# Copyright 2020 MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This example shows how to execute distributed training based on Horovod APIs.
It can run on several nodes with multiple GPU devices on every node.
Main steps to set up the distributed training:
- Install Horovod referring to the guide: https://github.com/horovod/horovod/blob/master/docs/gpus.rst
If using MONAI docker, which already has NCCL and MPI, can quickly install Horovod with command:
`HOROVOD_NCCL_INCLUDE=/usr/include HOROVOD_NCCL_LIB=/usr/lib/x86_64-linux-gnu HOROVOD_GPU_OPERATIONS=NCCL \
pip install --no-cache-dir horovod`
- Set SSH permissions for root login without password at all nodes except master, referring to:
http://www.linuxproblem.org/art_9.html
- Run `hvd.init()` to initialize Horovod.
- Pin each GPU to a single process to avoid resource contention, use `hvd.local_rank()` to get GPU index.
And use `hvd.rank()` to get the overall rank index.
- Wrap Dataset with `DistributedSampler`, and disable the `shuffle` in DataLoader.
Instead, shuffle data by `train_sampler.set_epoch(epoch)` before every epoch.
- Wrap the optimizer in hvd.DistributedOptimizer. The distributed optimizer delegates gradient
computation to the original optimizer, averages gradients using allreduce or allgather,
and then applies those averaged gradients.
- Broadcast the initial variable states from rank 0 to all other processes.
Note:
Suggest setting exactly the same software environment for every node, especially `mpi`, `nccl`, etc.
A good practice is to use the same MONAI docker image for all nodes directly, if using docker, need
to set SSH permissions both at the node and in docker, referring to Horovod guide for more details:
https://github.com/horovod/horovod/blob/master/docs/docker.rst
Example script to execute this program, only need to run on the master node:
`horovodrun -np 16 -H server1:4,server2:4,server3:4,server4:4 python unet_training_horovod.py -d "./testdata"`
This example was tested with [Ubuntu 16.04/20.04], [NCCL 2.6.3], [horovod 0.19.5].
Referring to: https://github.com/horovod/horovod/blob/master/examples/pytorch_mnist.py
"""
import argparse
import os
import sys
from glob import glob
import horovod.torch as hvd
import nibabel as nib
import numpy as np
import torch
import torch.multiprocessing as mp
from torch.utils.data.distributed import DistributedSampler
import monai
from monai.data import DataLoader, Dataset, create_test_image_3d
from monai.transforms import (
AsChannelFirstd,
Compose,
LoadImaged,
RandCropByPosNegLabeld,
RandRotate90d,
ScaleIntensityd,
ToTensord,
)
import time
#torch.backends.cudnn.enabled = False
def train(args):
time_start=time.time()#measure the time
# initialize Horovod library
hvd.init()
# Horovod limits CPU threads to be used per worker
torch.set_num_threads(1)
# disable logging for processes except 0 on every node
if hvd.local_rank() != 0:
f = open(os.devnull, "w")
sys.stdout = sys.stderr = f
elif not os.path.exists(args.dir):
# create 40 random image, mask paris on master node for training
print(f"generating synthetic data to {args.dir} (this may take a while)")
os.makedirs(args.dir)
# set random seed to generate same random data for every node
np.random.seed(seed=0)
for i in range(40):
im, seg = create_test_image_3d(128, 128, 128, num_seg_classes=1, channel_dim=-1)
n = nib.Nifti1Image(im, np.eye(4))
nib.save(n, os.path.join(args.dir, f"img{i:d}.nii.gz"))
n = nib.Nifti1Image(seg, np.eye(4))
nib.save(n, os.path.join(args.dir, f"seg{i:d}.nii.gz"))
images = sorted(glob(os.path.join(args.dir, "img*.nii.gz")))
segs = sorted(glob(os.path.join(args.dir, "seg*.nii.gz")))
train_files = [{"img": img, "seg": seg} for img, seg in zip(images, segs)]
# define transforms for image and segmentation
train_transforms = Compose(
[
LoadImaged(keys=["img", "seg"]),
AsChannelFirstd(keys=["img", "seg"], channel_dim=-1),
ScaleIntensityd(keys="img"),
RandCropByPosNegLabeld(
keys=["img", "seg"], label_key="seg", spatial_size=[96, 96, 96], pos=1, neg=1, num_samples=4
),
RandRotate90d(keys=["img", "seg"], prob=0.5, spatial_axes=[0, 2]),
ToTensord(keys=["img", "seg"]),
]
)
# create a training data loader
train_ds = Dataset(data=train_files, transform=train_transforms)
# create a training data sampler
train_sampler = DistributedSampler(train_ds, num_replicas=hvd.size(), rank=hvd.rank())
# when supported, use "forkserver" to spawn dataloader workers instead of "fork" to prevent
# issues with Infiniband implementations that are not fork-safe
multiprocessing_context = None
if hasattr(mp, "_supports_context") and mp._supports_context and "forkserver" in mp.get_all_start_methods():
multiprocessing_context = "forkserver"
# use batch_size=2 to load images and use RandCropByPosNegLabeld to generate 2 x 4 images for network training
train_loader = DataLoader(
train_ds,
batch_size=2,
shuffle=False,
num_workers=2,
pin_memory=True,
sampler=train_sampler,
multiprocessing_context=multiprocessing_context,
)
# create UNet, DiceLoss and Adam optimizer
device = torch.device(f"cuda:{hvd.local_rank()}")
torch.cuda.set_device(device)
model = monai.networks.nets.UNet(
dimensions=3,
in_channels=1,
out_channels=1,
channels=(16, 32, 64, 128, 256),
strides=(2, 2, 2, 2),
num_res_units=2,
).to(device)
loss_function = monai.losses.DiceLoss(sigmoid=True).to(device)
optimizer = torch.optim.Adam(model.parameters(), 1e-3)
# Horovod broadcasts parameters & optimizer state
hvd.broadcast_parameters(model.state_dict(), root_rank=0)
hvd.broadcast_optimizer_state(optimizer, root_rank=0)
# Horovod wraps optimizer with DistributedOptimizer
optimizer = hvd.DistributedOptimizer(optimizer, named_parameters=model.named_parameters())
# start a typical PyTorch training
epoch_loss_values = list()
EPOCH_SIZE=60
for epoch in range(EPOCH_SIZE):
print("-" * 10)
print(f"epoch {epoch + 1}/{EPOCH_SIZE}")
model.train()
epoch_loss = 0
step = 0
train_sampler.set_epoch(epoch)
for batch_data in train_loader:
step += 1
inputs, labels = batch_data["img"].to(device), batch_data["seg"].to(device)
optimizer.zero_grad()
outputs = model(inputs)
loss = loss_function(outputs, labels)
loss.backward()
optimizer.step()
epoch_loss += loss.item()
epoch_len = len(train_ds) // train_loader.batch_size
print(f"{step}/{epoch_len}, train_loss: {loss.item():.4f}")
epoch_loss /= step
epoch_loss_values.append(epoch_loss)
print(f"epoch {epoch + 1} average loss: {epoch_loss:.4f}")
print(f"train completed, epoch losses: {epoch_loss_values}")
if hvd.rank() == 0:
# all processes should see same parameters as they all start from same
# random parameters and gradients are synchronized in backward passes,
# therefore, saving it in one process is sufficient
torch.save(model.state_dict(), "final_model.pth")
time_end=time.time()
print('totally time cost',time_end-time_start)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-d", "--dir", default="./testdata", type=str, help="directory to create random data")
args = parser.parse_args()
train(args=args)
# Example script to execute this program only on the master node:
# horovodrun -np 16 -H server1:4,server2:4,server3:4,server4:4 python unet_training_horovod.py -d "./testdata"
if __name__ == "__main__":
main()
| # Copyright 2020 MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This example shows how to execute distributed training based on Horovod APIs.
It can run on several nodes with multiple GPU devices on every node.
Main steps to set up the distributed training:
- Install Horovod referring to the guide: https://github.com/horovod/horovod/blob/master/docs/gpus.rst
If using MONAI docker, which already has NCCL and MPI, can quickly install Horovod with command:
`HOROVOD_NCCL_INCLUDE=/usr/include HOROVOD_NCCL_LIB=/usr/lib/x86_64-linux-gnu HOROVOD_GPU_OPERATIONS=NCCL \
pip install --no-cache-dir horovod`
- Set SSH permissions for root login without password at all nodes except master, referring to:
http://www.linuxproblem.org/art_9.html
- Run `hvd.init()` to initialize Horovod.
- Pin each GPU to a single process to avoid resource contention, use `hvd.local_rank()` to get GPU index.
And use `hvd.rank()` to get the overall rank index.
- Wrap Dataset with `DistributedSampler`, and disable the `shuffle` in DataLoader.
Instead, shuffle data by `train_sampler.set_epoch(epoch)` before every epoch.
- Wrap the optimizer in hvd.DistributedOptimizer. The distributed optimizer delegates gradient
computation to the original optimizer, averages gradients using allreduce or allgather,
and then applies those averaged gradients.
- Broadcast the initial variable states from rank 0 to all other processes.
Note:
Suggest setting exactly the same software environment for every node, especially `mpi`, `nccl`, etc.
A good practice is to use the same MONAI docker image for all nodes directly, if using docker, need
to set SSH permissions both at the node and in docker, referring to Horovod guide for more details:
https://github.com/horovod/horovod/blob/master/docs/docker.rst
Example script to execute this program, only need to run on the master node:
`horovodrun -np 16 -H server1:4,server2:4,server3:4,server4:4 python unet_training_horovod.py -d "./testdata"`
This example was tested with [Ubuntu 16.04/20.04], [NCCL 2.6.3], [horovod 0.19.5].
Referring to: https://github.com/horovod/horovod/blob/master/examples/pytorch_mnist.py
"""
import argparse
import os
import sys
from glob import glob
import horovod.torch as hvd
import nibabel as nib
import numpy as np
import torch
import torch.multiprocessing as mp
from torch.utils.data.distributed import DistributedSampler
import monai
from monai.data import DataLoader, Dataset, create_test_image_3d
from monai.transforms import (
AsChannelFirstd,
Compose,
LoadImaged,
RandCropByPosNegLabeld,
RandRotate90d,
ScaleIntensityd,
ToTensord,
)
import time
#torch.backends.cudnn.enabled = False
def train(args):
time_start=time.time()#measure the time
# initialize Horovod library
hvd.init()
# Horovod limits CPU threads to be used per worker
torch.set_num_threads(1)
# disable logging for processes except 0 on every node
if hvd.local_rank() != 0:
f = open(os.devnull, "w")
sys.stdout = sys.stderr = f
elif not os.path.exists(args.dir):
# create 40 random image, mask paris on master node for training
print(f"generating synthetic data to {args.dir} (this may take a while)")
os.makedirs(args.dir)
# set random seed to generate same random data for every node
np.random.seed(seed=0)
for i in range(40):
im, seg = create_test_image_3d(128, 128, 128, num_seg_classes=1, channel_dim=-1)
n = nib.Nifti1Image(im, np.eye(4))
nib.save(n, os.path.join(args.dir, f"img{i:d}.nii.gz"))
n = nib.Nifti1Image(seg, np.eye(4))
nib.save(n, os.path.join(args.dir, f"seg{i:d}.nii.gz"))
images = sorted(glob(os.path.join(args.dir, "img*.nii.gz")))
segs = sorted(glob(os.path.join(args.dir, "seg*.nii.gz")))
train_files = [{"img": img, "seg": seg} for img, seg in zip(images, segs)]
# define transforms for image and segmentation
train_transforms = Compose(
[
LoadImaged(keys=["img", "seg"]),
AsChannelFirstd(keys=["img", "seg"], channel_dim=-1),
ScaleIntensityd(keys="img"),
RandCropByPosNegLabeld(
keys=["img", "seg"], label_key="seg", spatial_size=[96, 96, 96], pos=1, neg=1, num_samples=4
),
RandRotate90d(keys=["img", "seg"], prob=0.5, spatial_axes=[0, 2]),
ToTensord(keys=["img", "seg"]),
]
)
# create a training data loader
train_ds = Dataset(data=train_files, transform=train_transforms)
# create a training data sampler
train_sampler = DistributedSampler(train_ds, num_replicas=hvd.size(), rank=hvd.rank())
# when supported, use "forkserver" to spawn dataloader workers instead of "fork" to prevent
# issues with Infiniband implementations that are not fork-safe
multiprocessing_context = None
if hasattr(mp, "_supports_context") and mp._supports_context and "forkserver" in mp.get_all_start_methods():
multiprocessing_context = "forkserver"
# use batch_size=2 to load images and use RandCropByPosNegLabeld to generate 2 x 4 images for network training
train_loader = DataLoader(
train_ds,
batch_size=2,
shuffle=False,
num_workers=2,
pin_memory=True,
sampler=train_sampler,
multiprocessing_context=multiprocessing_context,
)
# create UNet, DiceLoss and Adam optimizer
device = torch.device(f"cuda:{hvd.local_rank()}")
torch.cuda.set_device(device)
model = monai.networks.nets.UNet(
dimensions=3,
in_channels=1,
out_channels=1,
channels=(16, 32, 64, 128, 256),
strides=(2, 2, 2, 2),
num_res_units=2,
).to(device)
loss_function = monai.losses.DiceLoss(sigmoid=True).to(device)
optimizer = torch.optim.Adam(model.parameters(), 1e-3)
# Horovod broadcasts parameters & optimizer state
hvd.broadcast_parameters(model.state_dict(), root_rank=0)
hvd.broadcast_optimizer_state(optimizer, root_rank=0)
# Horovod wraps optimizer with DistributedOptimizer
optimizer = hvd.DistributedOptimizer(optimizer, named_parameters=model.named_parameters())
# start a typical PyTorch training
epoch_loss_values = list()
EPOCH_SIZE=60
for epoch in range(EPOCH_SIZE):
print("-" * 10)
print(f"epoch {epoch + 1}/{EPOCH_SIZE}")
model.train()
epoch_loss = 0
step = 0
train_sampler.set_epoch(epoch)
for batch_data in train_loader:
step += 1
inputs, labels = batch_data["img"].to(device), batch_data["seg"].to(device)
optimizer.zero_grad()
outputs = model(inputs)
loss = loss_function(outputs, labels)
loss.backward()
optimizer.step()
epoch_loss += loss.item()
epoch_len = len(train_ds) // train_loader.batch_size
print(f"{step}/{epoch_len}, train_loss: {loss.item():.4f}")
epoch_loss /= step
epoch_loss_values.append(epoch_loss)
print(f"epoch {epoch + 1} average loss: {epoch_loss:.4f}")
print(f"train completed, epoch losses: {epoch_loss_values}")
if hvd.rank() == 0:
# all processes should see same parameters as they all start from same
# random parameters and gradients are synchronized in backward passes,
# therefore, saving it in one process is sufficient
torch.save(model.state_dict(), "final_model.pth")
time_end=time.time()
print('totally time cost',time_end-time_start)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-d", "--dir", default="./testdata", type=str, help="directory to create random data")
args = parser.parse_args()
train(args=args)
# Example script to execute this program only on the master node:
# horovodrun -np 16 -H server1:4,server2:4,server3:4,server4:4 python unet_training_horovod.py -d "./testdata"
if __name__ == "__main__":
main() | en | 0.748371 | # Copyright 2020 MONAI Consortium # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. This example shows how to execute distributed training based on Horovod APIs. It can run on several nodes with multiple GPU devices on every node. Main steps to set up the distributed training: - Install Horovod referring to the guide: https://github.com/horovod/horovod/blob/master/docs/gpus.rst If using MONAI docker, which already has NCCL and MPI, can quickly install Horovod with command: `HOROVOD_NCCL_INCLUDE=/usr/include HOROVOD_NCCL_LIB=/usr/lib/x86_64-linux-gnu HOROVOD_GPU_OPERATIONS=NCCL \ pip install --no-cache-dir horovod` - Set SSH permissions for root login without password at all nodes except master, referring to: http://www.linuxproblem.org/art_9.html - Run `hvd.init()` to initialize Horovod. - Pin each GPU to a single process to avoid resource contention, use `hvd.local_rank()` to get GPU index. And use `hvd.rank()` to get the overall rank index. - Wrap Dataset with `DistributedSampler`, and disable the `shuffle` in DataLoader. Instead, shuffle data by `train_sampler.set_epoch(epoch)` before every epoch. - Wrap the optimizer in hvd.DistributedOptimizer. The distributed optimizer delegates gradient computation to the original optimizer, averages gradients using allreduce or allgather, and then applies those averaged gradients. - Broadcast the initial variable states from rank 0 to all other processes. Note: Suggest setting exactly the same software environment for every node, especially `mpi`, `nccl`, etc. A good practice is to use the same MONAI docker image for all nodes directly, if using docker, need to set SSH permissions both at the node and in docker, referring to Horovod guide for more details: https://github.com/horovod/horovod/blob/master/docs/docker.rst Example script to execute this program, only need to run on the master node: `horovodrun -np 16 -H server1:4,server2:4,server3:4,server4:4 python unet_training_horovod.py -d "./testdata"` This example was tested with [Ubuntu 16.04/20.04], [NCCL 2.6.3], [horovod 0.19.5]. Referring to: https://github.com/horovod/horovod/blob/master/examples/pytorch_mnist.py #torch.backends.cudnn.enabled = False #measure the time # initialize Horovod library # Horovod limits CPU threads to be used per worker # disable logging for processes except 0 on every node # create 40 random image, mask paris on master node for training # set random seed to generate same random data for every node # define transforms for image and segmentation # create a training data loader # create a training data sampler # when supported, use "forkserver" to spawn dataloader workers instead of "fork" to prevent # issues with Infiniband implementations that are not fork-safe # use batch_size=2 to load images and use RandCropByPosNegLabeld to generate 2 x 4 images for network training # create UNet, DiceLoss and Adam optimizer # Horovod broadcasts parameters & optimizer state # Horovod wraps optimizer with DistributedOptimizer # start a typical PyTorch training # all processes should see same parameters as they all start from same # random parameters and gradients are synchronized in backward passes, # therefore, saving it in one process is sufficient # Example script to execute this program only on the master node: # horovodrun -np 16 -H server1:4,server2:4,server3:4,server4:4 python unet_training_horovod.py -d "./testdata" | 1.888156 | 2 |
oscar/lib/python2.7/site-packages/PIL/XpmImagePlugin.py | sainjusajan/django-oscar | 0 | 6632103 | #
# The Python Imaging Library.
# $Id$
#
# XPM File handling
#
# History:
# 1996-12-29 fl Created
# 2001-02-17 fl Use 're' instead of 'regex' (Python 2.1) (0.7)
#
# Copyright (c) Secret Labs AB 1997-2001.
# Copyright (c) <NAME> 1996-2001.
#
# See the README file for information on usage and redistribution.
#
import re
from . import Image, ImageFile, ImagePalette
from ._binary import i8, o8
__version__ = "0.2"
# XPM header
xpm_head = re.compile(b"\"([0-9]*) ([0-9]*) ([0-9]*) ([0-9]*)")
def _accept(prefix):
return prefix[:9] == b"/* XPM */"
##
# Image plugin for X11 pixel maps.
class XpmImageFile(ImageFile.ImageFile):
format = "XPM"
format_description = "X11 Pixel Map"
def _open(self):
if not _accept(self.fp.read(9)):
raise SyntaxError("not an XPM file")
# skip forward to next string
while True:
s = self.fp.readline()
if not s:
raise SyntaxError("broken XPM file")
m = xpm_head.match(s)
if m:
break
self.size = int(m.group(1)), int(m.group(2))
pal = int(m.group(3))
bpp = int(m.group(4))
if pal > 256 or bpp != 1:
raise ValueError("cannot read this XPM file")
#
# load palette description
palette = [b"\0\0\0"] * 256
for i in range(pal):
s = self.fp.readline()
if s[-2:] == b'\r\n':
s = s[:-2]
elif s[-1:] in b'\r\n':
s = s[:-1]
c = i8(s[1])
s = s[2:-2].split()
for i in range(0, len(s), 2):
if s[i] == b"c":
# process colour key
rgb = s[i+1]
if rgb == b"None":
self.info["transparency"] = c
elif rgb[0:1] == b"#":
# FIXME: handle colour names (see ImagePalette.py)
rgb = int(rgb[1:], 16)
palette[c] = (o8((rgb >> 16) & 255) +
o8((rgb >> 8) & 255) +
o8(rgb & 255))
else:
# unknown colour
raise ValueError("cannot read this XPM file")
break
else:
# missing colour key
raise ValueError("cannot read this XPM file")
self.mode = "P"
self.palette = ImagePalette.raw("RGB", b"".join(palette))
self.tile = [("raw", (0, 0)+self.size, self.fp.tell(), ("P", 0, 1))]
def load_read(self, bytes):
#
# load all image data in one chunk
xsize, ysize = self.size
s = [None] * ysize
for i in range(ysize):
s[i] = self.fp.readline()[1:xsize+1].ljust(xsize)
return b"".join(s)
#
# Registry
Image.register_open(XpmImageFile.format, XpmImageFile, _accept)
Image.register_extension(XpmImageFile.format, ".xpm")
Image.register_mime(XpmImageFile.format, "image/xpm")
| #
# The Python Imaging Library.
# $Id$
#
# XPM File handling
#
# History:
# 1996-12-29 fl Created
# 2001-02-17 fl Use 're' instead of 'regex' (Python 2.1) (0.7)
#
# Copyright (c) Secret Labs AB 1997-2001.
# Copyright (c) <NAME> 1996-2001.
#
# See the README file for information on usage and redistribution.
#
import re
from . import Image, ImageFile, ImagePalette
from ._binary import i8, o8
__version__ = "0.2"
# XPM header
xpm_head = re.compile(b"\"([0-9]*) ([0-9]*) ([0-9]*) ([0-9]*)")
def _accept(prefix):
return prefix[:9] == b"/* XPM */"
##
# Image plugin for X11 pixel maps.
class XpmImageFile(ImageFile.ImageFile):
format = "XPM"
format_description = "X11 Pixel Map"
def _open(self):
if not _accept(self.fp.read(9)):
raise SyntaxError("not an XPM file")
# skip forward to next string
while True:
s = self.fp.readline()
if not s:
raise SyntaxError("broken XPM file")
m = xpm_head.match(s)
if m:
break
self.size = int(m.group(1)), int(m.group(2))
pal = int(m.group(3))
bpp = int(m.group(4))
if pal > 256 or bpp != 1:
raise ValueError("cannot read this XPM file")
#
# load palette description
palette = [b"\0\0\0"] * 256
for i in range(pal):
s = self.fp.readline()
if s[-2:] == b'\r\n':
s = s[:-2]
elif s[-1:] in b'\r\n':
s = s[:-1]
c = i8(s[1])
s = s[2:-2].split()
for i in range(0, len(s), 2):
if s[i] == b"c":
# process colour key
rgb = s[i+1]
if rgb == b"None":
self.info["transparency"] = c
elif rgb[0:1] == b"#":
# FIXME: handle colour names (see ImagePalette.py)
rgb = int(rgb[1:], 16)
palette[c] = (o8((rgb >> 16) & 255) +
o8((rgb >> 8) & 255) +
o8(rgb & 255))
else:
# unknown colour
raise ValueError("cannot read this XPM file")
break
else:
# missing colour key
raise ValueError("cannot read this XPM file")
self.mode = "P"
self.palette = ImagePalette.raw("RGB", b"".join(palette))
self.tile = [("raw", (0, 0)+self.size, self.fp.tell(), ("P", 0, 1))]
def load_read(self, bytes):
#
# load all image data in one chunk
xsize, ysize = self.size
s = [None] * ysize
for i in range(ysize):
s[i] = self.fp.readline()[1:xsize+1].ljust(xsize)
return b"".join(s)
#
# Registry
Image.register_open(XpmImageFile.format, XpmImageFile, _accept)
Image.register_extension(XpmImageFile.format, ".xpm")
Image.register_mime(XpmImageFile.format, "image/xpm")
| en | 0.583133 | # # The Python Imaging Library. # $Id$ # # XPM File handling # # History: # 1996-12-29 fl Created # 2001-02-17 fl Use 're' instead of 'regex' (Python 2.1) (0.7) # # Copyright (c) Secret Labs AB 1997-2001. # Copyright (c) <NAME> 1996-2001. # # See the README file for information on usage and redistribution. # # XPM header ## # Image plugin for X11 pixel maps. # skip forward to next string # # load palette description # process colour key # FIXME: handle colour names (see ImagePalette.py) # unknown colour # missing colour key # # load all image data in one chunk # # Registry | 2.53224 | 3 |
main_app/migrations/0014_auto_20211114_2331.py | barkev2009/eurovision-app | 1 | 6632104 | # Generated by Django 2.2.12 on 2021-11-14 20:31
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('main_app', '0013_auto_20211114_2326'),
]
operations = [
migrations.RemoveField(
model_name='entry',
name='score',
),
migrations.AddField(
model_name='entry',
name='difficulty',
field=models.BooleanField(default=False, verbose_name='Сложность исполнения песни'),
),
migrations.AddField(
model_name='entry',
name='originality',
field=models.BooleanField(default=False, verbose_name='Оригинальность исполнения песни'),
),
migrations.AddField(
model_name='entry',
name='purity',
field=models.BooleanField(default=False, verbose_name='Чистота исполнения песни'),
),
migrations.AddField(
model_name='entry',
name='show',
field=models.BooleanField(default=False, verbose_name='Наличие шоу в выступлении'),
),
migrations.AddField(
model_name='entry',
name='sympathy',
field=models.BooleanField(default=False, verbose_name='Личная симпатия'),
),
migrations.DeleteModel(
name='Star',
),
]
| # Generated by Django 2.2.12 on 2021-11-14 20:31
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('main_app', '0013_auto_20211114_2326'),
]
operations = [
migrations.RemoveField(
model_name='entry',
name='score',
),
migrations.AddField(
model_name='entry',
name='difficulty',
field=models.BooleanField(default=False, verbose_name='Сложность исполнения песни'),
),
migrations.AddField(
model_name='entry',
name='originality',
field=models.BooleanField(default=False, verbose_name='Оригинальность исполнения песни'),
),
migrations.AddField(
model_name='entry',
name='purity',
field=models.BooleanField(default=False, verbose_name='Чистота исполнения песни'),
),
migrations.AddField(
model_name='entry',
name='show',
field=models.BooleanField(default=False, verbose_name='Наличие шоу в выступлении'),
),
migrations.AddField(
model_name='entry',
name='sympathy',
field=models.BooleanField(default=False, verbose_name='Личная симпатия'),
),
migrations.DeleteModel(
name='Star',
),
]
| en | 0.774066 | # Generated by Django 2.2.12 on 2021-11-14 20:31 | 1.75183 | 2 |
chainer/training/triggers/minmax_value_trigger.py | Evanc123/chainer | 0 | 6632105 | from chainer import reporter
from chainer.training import util
class BestValueTrigger(object):
"""Trigger invoked when specific value becomes best.
Args:
key (str): Key of value.
compare (callable): Compare function which takes current best value and
new value and returns whether new value is better than current
best.
trigger: Trigger that decides the comparison interval between current
best value and new value. This must be a tuple in the form of
``<int>, 'epoch'`` or ``<int>, 'iteration'`` which is passed to
:class:`~chainer.training.triggers.IntervalTrigger`.
"""
def __init__(self, key, compare, trigger=(1, 'epoch')):
self._key = key
self._best_value = None
self._interval_trigger = util.get_trigger(trigger)
self._init_summary()
self._compare = compare
def __call__(self, trainer):
"""Decides whether the extension should be called on this iteration.
Args:
trainer (~chainer.training.Trainer): Trainer object that this
trigger is associated with. The ``observation`` of this trainer
is used to determine if the trigger should fire.
Returns:
bool: ``True`` if the corresponding extension should be invoked in
this iteration.
"""
observation = trainer.observation
summary = self._summary
key = self._key
if key in observation:
summary.add({key: observation[key]})
if not self._interval_trigger(trainer):
return False
stats = summary.compute_mean()
value = float(stats[key]) # copy to CPU
self._init_summary()
if self._best_value is None or self._compare(self._best_value, value):
self._best_value = value
return True
return False
def _init_summary(self):
self._summary = reporter.DictSummary()
class MaxValueTrigger(BestValueTrigger):
"""Trigger invoked when specific value becomes maximum.
For example you can use this trigger to take snapshot on the epoch the
validation accuracy is maximum.
Args:
key (str): Key of value. The trigger fires when the value associated
with this key becomes maximum.
trigger: Trigger that decides the comparison interval between current
best value and new value. This must be a tuple in the form of
``<int>, 'epoch'`` or ``<int>, 'iteration'`` which is passed to
:class:`~chainer.training.triggers.IntervalTrigger`.
"""
def __init__(self, key, trigger=(1, 'epoch')):
super(MaxValueTrigger, self).__init__(
key, lambda max_value, new_value: new_value > max_value, trigger)
class MinValueTrigger(BestValueTrigger):
"""Trigger invoked when specific value becomes minimum.
For example you can use this trigger to take snapshot on the epoch the
validation loss is minimum.
Args:
key (str): Key of value. The trigger fires when the value associated
with this key becomes minimum.
trigger: Trigger that decides the comparison interval between current
best value and new value. This must be a tuple in the form of
``<int>, 'epoch'`` or ``<int>, 'iteration'`` which is passed to
:class:`~chainer.training.triggers.IntervalTrigger`.
"""
def __init__(self, key, trigger=(1, 'epoch')):
super(MinValueTrigger, self).__init__(
key, lambda min_value, new_value: new_value < min_value, trigger)
| from chainer import reporter
from chainer.training import util
class BestValueTrigger(object):
"""Trigger invoked when specific value becomes best.
Args:
key (str): Key of value.
compare (callable): Compare function which takes current best value and
new value and returns whether new value is better than current
best.
trigger: Trigger that decides the comparison interval between current
best value and new value. This must be a tuple in the form of
``<int>, 'epoch'`` or ``<int>, 'iteration'`` which is passed to
:class:`~chainer.training.triggers.IntervalTrigger`.
"""
def __init__(self, key, compare, trigger=(1, 'epoch')):
self._key = key
self._best_value = None
self._interval_trigger = util.get_trigger(trigger)
self._init_summary()
self._compare = compare
def __call__(self, trainer):
"""Decides whether the extension should be called on this iteration.
Args:
trainer (~chainer.training.Trainer): Trainer object that this
trigger is associated with. The ``observation`` of this trainer
is used to determine if the trigger should fire.
Returns:
bool: ``True`` if the corresponding extension should be invoked in
this iteration.
"""
observation = trainer.observation
summary = self._summary
key = self._key
if key in observation:
summary.add({key: observation[key]})
if not self._interval_trigger(trainer):
return False
stats = summary.compute_mean()
value = float(stats[key]) # copy to CPU
self._init_summary()
if self._best_value is None or self._compare(self._best_value, value):
self._best_value = value
return True
return False
def _init_summary(self):
self._summary = reporter.DictSummary()
class MaxValueTrigger(BestValueTrigger):
"""Trigger invoked when specific value becomes maximum.
For example you can use this trigger to take snapshot on the epoch the
validation accuracy is maximum.
Args:
key (str): Key of value. The trigger fires when the value associated
with this key becomes maximum.
trigger: Trigger that decides the comparison interval between current
best value and new value. This must be a tuple in the form of
``<int>, 'epoch'`` or ``<int>, 'iteration'`` which is passed to
:class:`~chainer.training.triggers.IntervalTrigger`.
"""
def __init__(self, key, trigger=(1, 'epoch')):
super(MaxValueTrigger, self).__init__(
key, lambda max_value, new_value: new_value > max_value, trigger)
class MinValueTrigger(BestValueTrigger):
"""Trigger invoked when specific value becomes minimum.
For example you can use this trigger to take snapshot on the epoch the
validation loss is minimum.
Args:
key (str): Key of value. The trigger fires when the value associated
with this key becomes minimum.
trigger: Trigger that decides the comparison interval between current
best value and new value. This must be a tuple in the form of
``<int>, 'epoch'`` or ``<int>, 'iteration'`` which is passed to
:class:`~chainer.training.triggers.IntervalTrigger`.
"""
def __init__(self, key, trigger=(1, 'epoch')):
super(MinValueTrigger, self).__init__(
key, lambda min_value, new_value: new_value < min_value, trigger)
| en | 0.806624 | Trigger invoked when specific value becomes best. Args: key (str): Key of value. compare (callable): Compare function which takes current best value and new value and returns whether new value is better than current best. trigger: Trigger that decides the comparison interval between current best value and new value. This must be a tuple in the form of ``<int>, 'epoch'`` or ``<int>, 'iteration'`` which is passed to :class:`~chainer.training.triggers.IntervalTrigger`. Decides whether the extension should be called on this iteration. Args: trainer (~chainer.training.Trainer): Trainer object that this trigger is associated with. The ``observation`` of this trainer is used to determine if the trigger should fire. Returns: bool: ``True`` if the corresponding extension should be invoked in this iteration. # copy to CPU Trigger invoked when specific value becomes maximum. For example you can use this trigger to take snapshot on the epoch the validation accuracy is maximum. Args: key (str): Key of value. The trigger fires when the value associated with this key becomes maximum. trigger: Trigger that decides the comparison interval between current best value and new value. This must be a tuple in the form of ``<int>, 'epoch'`` or ``<int>, 'iteration'`` which is passed to :class:`~chainer.training.triggers.IntervalTrigger`. Trigger invoked when specific value becomes minimum. For example you can use this trigger to take snapshot on the epoch the validation loss is minimum. Args: key (str): Key of value. The trigger fires when the value associated with this key becomes minimum. trigger: Trigger that decides the comparison interval between current best value and new value. This must be a tuple in the form of ``<int>, 'epoch'`` or ``<int>, 'iteration'`` which is passed to :class:`~chainer.training.triggers.IntervalTrigger`. | 2.847794 | 3 |
bin/hist.py | CoderVikas/mindotfiles | 0 | 6632106 | #!/usr/bin/env python
# x.startswith("_")
f = open('/Users/yadvika/.bash_history')
l = f.readlines()
l.reverse()
short = []
cmdline_included = 0
for s in l:
if s.startswith("#"):
if cmdline_included == 1:
short.append(s.rstrip())
continue
# marked not-include unless we really add it to short
cmdline_included = 0
if s.rstrip() not in short:
cmdline_included = 1
short.append(s.rstrip())
short.reverse()
for s in short:
print s
| #!/usr/bin/env python
# x.startswith("_")
f = open('/Users/yadvika/.bash_history')
l = f.readlines()
l.reverse()
short = []
cmdline_included = 0
for s in l:
if s.startswith("#"):
if cmdline_included == 1:
short.append(s.rstrip())
continue
# marked not-include unless we really add it to short
cmdline_included = 0
if s.rstrip() not in short:
cmdline_included = 1
short.append(s.rstrip())
short.reverse()
for s in short:
print s
| en | 0.749135 | #!/usr/bin/env python # x.startswith("_") # marked not-include unless we really add it to short | 2.706033 | 3 |
tcevent.py | metocean/tcrm | 0 | 6632107 | <filename>tcevent.py
"""
:mod:`tcevent` -- run the windfield module for a single TC track
================================================================
.. module:: tcevent
:synopsis: Run the wind field module for a single TC track.
Run the :mod:`wind.windmodels` module to calculate the wind field for a
single TC event. The track of the TC is interpolated to a fine
temporal resolution, then the maximum wind field evaluated.
Data at selected points within the model domain can be extracted
at each time step, giving a time history of wind speed, direction
and estimated sea level pressure for the location(s).
See the :ref:`Scenario modelling <scenariomodelling>` section of
the TCRM User Guide for details on running this script.
"""
import logging as log
log.getLogger('matplotlib').setLevel(log.WARNING)
from functools import reduce
import os
import time
import argparse
import traceback
from functools import wraps
from os.path import join as pjoin, realpath, isdir, dirname
from Utilities import pathLocator
from Utilities.config import ConfigParser
from Utilities.files import flStartLog
from Utilities.version import version
from Utilities.progressbar import SimpleProgressBar as ProgressBar
from Evaluate import interpolateTracks
__version__ = version()
def timer(f):
"""
Basic timing functions for entire process
"""
@wraps(f)
def wrap(*args, **kwargs):
t1 = time.time()
res = f(*args, **kwargs)
tottime = time.time() - t1
msg = "%02d:%02d:%02d " % \
reduce(lambda ll, b : divmod(ll[0], b) + ll[1:],
[(tottime,), 60, 60])
log.info("Time for {0}: {1}".format(f.__name__, msg) )
return res
return wrap
def doOutputDirectoryCreation(configFile):
"""
Create all the necessary output folders.
:param str configFile: Name of configuration file.
:raises OSError: If the directory tree cannot be created.
"""
config = ConfigParser()
config.read(configFile)
outputPath = config.get('Output', 'Path')
log.info('Output will be stored under %s', outputPath)
subdirs = ['tracks', 'windfield', 'plots', 'plots/timeseries',
'log', 'process', 'process/timeseries']
if not isdir(outputPath):
try:
os.makedirs(outputPath)
except OSError:
raise
for subdir in subdirs:
if not isdir(realpath(pjoin(outputPath, subdir))):
try:
os.makedirs(realpath(pjoin(outputPath, subdir)))
except OSError:
raise
def doTimeseriesPlotting(configFile):
"""
Run functions to plot time series output.
:param str configFile: Path to configuration file.
"""
config = ConfigParser()
config.read(configFile)
outputPath = config.get('Output', 'Path')
timeseriesPath = pjoin(outputPath, 'process', 'timeseries')
plotPath = pjoin(outputPath, 'plots', 'timeseries')
log.info("Plotting time series data to {0}".format(plotPath))
from PlotInterface.plotTimeseries import plotTimeseries
plotTimeseries(timeseriesPath, plotPath)
def doWindfieldPlotting(configFile):
"""
Plot the wind field on a map.
:param str configFile: Path to the configuration file.
:Note: the file name is assumed to be 'gust.interp.nc'
"""
from netCDF4 import Dataset
import numpy as np
from PlotInterface.maps import saveWindfieldMap
config = ConfigParser()
config.read(configFile)
outputPath = config.get('Output', 'Path')
windfieldPath = pjoin(outputPath, 'windfield')
inputFile = config.get('DataProcess', 'InputFile')
if inputFile.endswith(".nc"):
# We have a netcdf track file. Work under the assumption it is
# drawn directly from TCRM.
trackFile = os.path.basename(inputFile)
trackId = trackFile.split('.')[1]
gustFile = 'gust.{0}.nc'.format(trackId)
outputWindFile = pjoin(windfieldPath, gustFile)
else:
# Note the assumption about the file name!
outputWindFile = pjoin(windfieldPath, 'gust.001-00001.nc')
plotPath = pjoin(outputPath, 'plots', 'maxwind.png')
f = Dataset(outputWindFile, 'r')
xdata = f.variables['lon'][:]
ydata = f.variables['lat'][:]
vdata = f.variables['vmax'][:]
gridLimit = None
if config.has_option('Region','gridLimit'):
gridLimit = config.geteval('Region', 'gridLimit')
ii = np.where((xdata >= gridLimit['xMin']) &
(xdata <= gridLimit['xMax']))
jj = np.where((ydata >= gridLimit['yMin']) &
(ydata <= gridLimit['yMax']))
[xgrid, ygrid] = np.meshgrid(xdata[ii], ydata[jj])
ig, jg = np.meshgrid(ii, jj)
vdata = vdata[jg, ig]
else:
[xgrid, ygrid] = np.meshgrid(xdata, ydata)
map_kwargs = dict(llcrnrlon=xgrid.min(),
llcrnrlat=ygrid.min(),
urcrnrlon=xgrid.max(),
urcrnrlat=ygrid.max(),
projection='merc',
resolution='i')
title = "Maximum wind speed"
cbarlabel = "Wind speed ({0})".format(f.variables['vmax'].units)
levels = np.arange(30, 101., 5.)
saveWindfieldMap(vdata, xgrid, ygrid, title, levels,
cbarlabel, map_kwargs, plotPath)
@timer
def main(configFile):
"""
Main function to execute the :mod:`wind`.
:param str configFile: Path to configuration file.
"""
config = ConfigParser()
config.read(configFile)
doOutputDirectoryCreation(configFile)
trackFile = config.get('DataProcess', 'InputFile')
source = config.get('DataProcess', 'Source')
delta = 1/12.
outputPath = pjoin(config.get('Output','Path'), 'tracks')
outputTrackFile = pjoin(outputPath, "tracks.interp.nc")
# This will save interpolated track data in TCRM format:
interpTrack = interpolateTracks.parseTracks(configFile, trackFile,
source, delta,
outputTrackFile,
interpolation_type='akima')
showProgressBar = config.get('Logging', 'ProgressBar')
pbar = ProgressBar('Calculating wind fields: ', showProgressBar)
def status(done, total):
pbar.update(float(done)/total)
import wind
wind.run(configFile, status)
import impact
impact.run_optional(config)
if config.getboolean('WindfieldInterface', 'PlotOutput'):
doWindfieldPlotting(configFile)
if config.getboolean('Timeseries', 'Extract'):
doTimeseriesPlotting(configFile)
def startup():
"""
Parse the command line arguments and call the :func:`main`
function.
"""
parser = argparse.ArgumentParser()
parser.add_argument('-c', '--config_file',
help='Path to configuration file')
parser.add_argument('-v', '--verbose', help='Verbose output',
action='store_true')
parser.add_argument('-d', '--debug', help='Allow pdb traces',
action='store_true')
args = parser.parse_args()
configFile = args.config_file
config = ConfigParser()
config.read(configFile)
rootdir = pathLocator.getRootDirectory()
os.chdir(rootdir)
logfile = config.get('Logging','LogFile')
logdir = dirname(realpath(logfile))
# If log file directory does not exist, create it
if not isdir(logdir):
try:
os.makedirs(logdir)
except OSError:
logfile = pjoin(os.getcwd(), 'tcrm.log')
logLevel = config.get('Logging', 'LogLevel')
verbose = config.getboolean('Logging', 'Verbose')
datestamp = config.getboolean('Logging', 'Datestamp')
debug = False
if args.verbose:
verbose = True
if args.debug:
debug = True
flStartLog(logfile, logLevel, verbose, datestamp)
# Switch off minor warning messages
import warnings
warnings.filterwarnings("ignore", category=DeprecationWarning)
warnings.filterwarnings("ignore", category=UserWarning, module="pytz")
warnings.filterwarnings("ignore", category=UserWarning, module="numpy")
warnings.filterwarnings("ignore", category=UserWarning,
module="matplotlib")
warnings.filterwarnings("ignore", category=RuntimeWarning)
if debug:
main(configFile)
else:
try:
main(configFile)
except ImportError as e:
log.critical("Missing module: {0}".format(e))
except Exception: # pylint: disable=W0703
# Catch any exceptions that occur and log them (nicely):
tblines = traceback.format_exc().splitlines()
for line in tblines:
log.critical(line.lstrip())
if __name__ == "__main__":
startup()
| <filename>tcevent.py
"""
:mod:`tcevent` -- run the windfield module for a single TC track
================================================================
.. module:: tcevent
:synopsis: Run the wind field module for a single TC track.
Run the :mod:`wind.windmodels` module to calculate the wind field for a
single TC event. The track of the TC is interpolated to a fine
temporal resolution, then the maximum wind field evaluated.
Data at selected points within the model domain can be extracted
at each time step, giving a time history of wind speed, direction
and estimated sea level pressure for the location(s).
See the :ref:`Scenario modelling <scenariomodelling>` section of
the TCRM User Guide for details on running this script.
"""
import logging as log
log.getLogger('matplotlib').setLevel(log.WARNING)
from functools import reduce
import os
import time
import argparse
import traceback
from functools import wraps
from os.path import join as pjoin, realpath, isdir, dirname
from Utilities import pathLocator
from Utilities.config import ConfigParser
from Utilities.files import flStartLog
from Utilities.version import version
from Utilities.progressbar import SimpleProgressBar as ProgressBar
from Evaluate import interpolateTracks
__version__ = version()
def timer(f):
"""
Basic timing functions for entire process
"""
@wraps(f)
def wrap(*args, **kwargs):
t1 = time.time()
res = f(*args, **kwargs)
tottime = time.time() - t1
msg = "%02d:%02d:%02d " % \
reduce(lambda ll, b : divmod(ll[0], b) + ll[1:],
[(tottime,), 60, 60])
log.info("Time for {0}: {1}".format(f.__name__, msg) )
return res
return wrap
def doOutputDirectoryCreation(configFile):
"""
Create all the necessary output folders.
:param str configFile: Name of configuration file.
:raises OSError: If the directory tree cannot be created.
"""
config = ConfigParser()
config.read(configFile)
outputPath = config.get('Output', 'Path')
log.info('Output will be stored under %s', outputPath)
subdirs = ['tracks', 'windfield', 'plots', 'plots/timeseries',
'log', 'process', 'process/timeseries']
if not isdir(outputPath):
try:
os.makedirs(outputPath)
except OSError:
raise
for subdir in subdirs:
if not isdir(realpath(pjoin(outputPath, subdir))):
try:
os.makedirs(realpath(pjoin(outputPath, subdir)))
except OSError:
raise
def doTimeseriesPlotting(configFile):
"""
Run functions to plot time series output.
:param str configFile: Path to configuration file.
"""
config = ConfigParser()
config.read(configFile)
outputPath = config.get('Output', 'Path')
timeseriesPath = pjoin(outputPath, 'process', 'timeseries')
plotPath = pjoin(outputPath, 'plots', 'timeseries')
log.info("Plotting time series data to {0}".format(plotPath))
from PlotInterface.plotTimeseries import plotTimeseries
plotTimeseries(timeseriesPath, plotPath)
def doWindfieldPlotting(configFile):
"""
Plot the wind field on a map.
:param str configFile: Path to the configuration file.
:Note: the file name is assumed to be 'gust.interp.nc'
"""
from netCDF4 import Dataset
import numpy as np
from PlotInterface.maps import saveWindfieldMap
config = ConfigParser()
config.read(configFile)
outputPath = config.get('Output', 'Path')
windfieldPath = pjoin(outputPath, 'windfield')
inputFile = config.get('DataProcess', 'InputFile')
if inputFile.endswith(".nc"):
# We have a netcdf track file. Work under the assumption it is
# drawn directly from TCRM.
trackFile = os.path.basename(inputFile)
trackId = trackFile.split('.')[1]
gustFile = 'gust.{0}.nc'.format(trackId)
outputWindFile = pjoin(windfieldPath, gustFile)
else:
# Note the assumption about the file name!
outputWindFile = pjoin(windfieldPath, 'gust.001-00001.nc')
plotPath = pjoin(outputPath, 'plots', 'maxwind.png')
f = Dataset(outputWindFile, 'r')
xdata = f.variables['lon'][:]
ydata = f.variables['lat'][:]
vdata = f.variables['vmax'][:]
gridLimit = None
if config.has_option('Region','gridLimit'):
gridLimit = config.geteval('Region', 'gridLimit')
ii = np.where((xdata >= gridLimit['xMin']) &
(xdata <= gridLimit['xMax']))
jj = np.where((ydata >= gridLimit['yMin']) &
(ydata <= gridLimit['yMax']))
[xgrid, ygrid] = np.meshgrid(xdata[ii], ydata[jj])
ig, jg = np.meshgrid(ii, jj)
vdata = vdata[jg, ig]
else:
[xgrid, ygrid] = np.meshgrid(xdata, ydata)
map_kwargs = dict(llcrnrlon=xgrid.min(),
llcrnrlat=ygrid.min(),
urcrnrlon=xgrid.max(),
urcrnrlat=ygrid.max(),
projection='merc',
resolution='i')
title = "Maximum wind speed"
cbarlabel = "Wind speed ({0})".format(f.variables['vmax'].units)
levels = np.arange(30, 101., 5.)
saveWindfieldMap(vdata, xgrid, ygrid, title, levels,
cbarlabel, map_kwargs, plotPath)
@timer
def main(configFile):
"""
Main function to execute the :mod:`wind`.
:param str configFile: Path to configuration file.
"""
config = ConfigParser()
config.read(configFile)
doOutputDirectoryCreation(configFile)
trackFile = config.get('DataProcess', 'InputFile')
source = config.get('DataProcess', 'Source')
delta = 1/12.
outputPath = pjoin(config.get('Output','Path'), 'tracks')
outputTrackFile = pjoin(outputPath, "tracks.interp.nc")
# This will save interpolated track data in TCRM format:
interpTrack = interpolateTracks.parseTracks(configFile, trackFile,
source, delta,
outputTrackFile,
interpolation_type='akima')
showProgressBar = config.get('Logging', 'ProgressBar')
pbar = ProgressBar('Calculating wind fields: ', showProgressBar)
def status(done, total):
pbar.update(float(done)/total)
import wind
wind.run(configFile, status)
import impact
impact.run_optional(config)
if config.getboolean('WindfieldInterface', 'PlotOutput'):
doWindfieldPlotting(configFile)
if config.getboolean('Timeseries', 'Extract'):
doTimeseriesPlotting(configFile)
def startup():
"""
Parse the command line arguments and call the :func:`main`
function.
"""
parser = argparse.ArgumentParser()
parser.add_argument('-c', '--config_file',
help='Path to configuration file')
parser.add_argument('-v', '--verbose', help='Verbose output',
action='store_true')
parser.add_argument('-d', '--debug', help='Allow pdb traces',
action='store_true')
args = parser.parse_args()
configFile = args.config_file
config = ConfigParser()
config.read(configFile)
rootdir = pathLocator.getRootDirectory()
os.chdir(rootdir)
logfile = config.get('Logging','LogFile')
logdir = dirname(realpath(logfile))
# If log file directory does not exist, create it
if not isdir(logdir):
try:
os.makedirs(logdir)
except OSError:
logfile = pjoin(os.getcwd(), 'tcrm.log')
logLevel = config.get('Logging', 'LogLevel')
verbose = config.getboolean('Logging', 'Verbose')
datestamp = config.getboolean('Logging', 'Datestamp')
debug = False
if args.verbose:
verbose = True
if args.debug:
debug = True
flStartLog(logfile, logLevel, verbose, datestamp)
# Switch off minor warning messages
import warnings
warnings.filterwarnings("ignore", category=DeprecationWarning)
warnings.filterwarnings("ignore", category=UserWarning, module="pytz")
warnings.filterwarnings("ignore", category=UserWarning, module="numpy")
warnings.filterwarnings("ignore", category=UserWarning,
module="matplotlib")
warnings.filterwarnings("ignore", category=RuntimeWarning)
if debug:
main(configFile)
else:
try:
main(configFile)
except ImportError as e:
log.critical("Missing module: {0}".format(e))
except Exception: # pylint: disable=W0703
# Catch any exceptions that occur and log them (nicely):
tblines = traceback.format_exc().splitlines()
for line in tblines:
log.critical(line.lstrip())
if __name__ == "__main__":
startup()
| en | 0.708444 | :mod:`tcevent` -- run the windfield module for a single TC track ================================================================ .. module:: tcevent :synopsis: Run the wind field module for a single TC track. Run the :mod:`wind.windmodels` module to calculate the wind field for a single TC event. The track of the TC is interpolated to a fine temporal resolution, then the maximum wind field evaluated. Data at selected points within the model domain can be extracted at each time step, giving a time history of wind speed, direction and estimated sea level pressure for the location(s). See the :ref:`Scenario modelling <scenariomodelling>` section of the TCRM User Guide for details on running this script. Basic timing functions for entire process Create all the necessary output folders. :param str configFile: Name of configuration file. :raises OSError: If the directory tree cannot be created. Run functions to plot time series output. :param str configFile: Path to configuration file. Plot the wind field on a map. :param str configFile: Path to the configuration file. :Note: the file name is assumed to be 'gust.interp.nc' # We have a netcdf track file. Work under the assumption it is # drawn directly from TCRM. # Note the assumption about the file name! Main function to execute the :mod:`wind`. :param str configFile: Path to configuration file. # This will save interpolated track data in TCRM format: Parse the command line arguments and call the :func:`main` function. # If log file directory does not exist, create it # Switch off minor warning messages # pylint: disable=W0703 # Catch any exceptions that occur and log them (nicely): | 2.581006 | 3 |
example/python/tx-example.py | laSinteZ/iroha | 1 | 6632108 | from __future__ import print_function
import sys
# import iroha library from nested folder
sys.path.insert(0, 'build/shared_model/bindings')
import iroha
import time
import block_pb2
import endpoint_pb2
import endpoint_pb2_grpc
import queries_pb2
import grpc
tx_builder = iroha.ModelTransactionBuilder()
query_builder = iroha.ModelQueryBuilder()
crypto = iroha.ModelCrypto()
proto_tx_helper = iroha.ModelProtoTransaction()
proto_query_helper = iroha.ModelProtoQuery()
admin_priv = open("../<EMAIL>", "r").read()
admin_pub = open("../<EMAIL>", "r").read()
me_kp = crypto.convertFromExisting(admin_pub, admin_priv)
current_time = int(round(time.time() * 1000)) - 10**5
start_tx_counter = 1
start_query_counter = 1
creator = "admin@test"
# build transaction
tx = tx_builder.creatorAccountId(creator) \
.txCounter(start_tx_counter) \
.createdTime(current_time) \
.createDomain("ru", "user") \
.createAsset("dollar", "ru", 2).build()
tx_blob = proto_tx_helper.signAndAddSignature(tx, me_kp).blob()
# create proto object and send to iroha
proto_tx = block_pb2.Transaction()
proto_tx.ParseFromString(''.join(map(chr, tx_blob)))
channel = grpc.insecure_channel('127.0.0.1:50051')
stub = endpoint_pb2_grpc.CommandServiceStub(channel)
stub.Torii(proto_tx)
time.sleep(5)
# create status request
print("Hash of the transaction: ", tx.hash().hex())
tx_hash = tx.hash().blob()
tx_hash = ''.join(map(chr, tx_hash))
request = endpoint_pb2.TxStatusRequest()
request.tx_hash = tx_hash
response = stub.Status(request)
status = endpoint_pb2.TxStatus.Name(response.tx_status)
print("Status of transaction is:", status)
if status != "COMMITTED":
print("Your transaction wasn't committed")
exit(1)
query = query_builder.creatorAccountId(creator) \
.createdTime(current_time) \
.queryCounter(start_query_counter) \
.getAssetInfo("dollar#ru") \
.build()
query_blob = proto_query_helper.signAndAddSignature(query, me_kp).blob()
proto_query = queries_pb2.Query()
proto_query.ParseFromString(''.join(map(chr, query_blob)))
query_stub = endpoint_pb2_grpc.QueryServiceStub(channel)
query_response = query_stub.Find(proto_query)
if not query_response.HasField("asset_response"):
print("Query response error")
exit(1)
else:
print("Query responded with asset response")
asset_info = query_response.asset_response.asset
print("Asset Id =", asset_info.asset_id)
print("Precision =", asset_info.precision)
print("done!")
| from __future__ import print_function
import sys
# import iroha library from nested folder
sys.path.insert(0, 'build/shared_model/bindings')
import iroha
import time
import block_pb2
import endpoint_pb2
import endpoint_pb2_grpc
import queries_pb2
import grpc
tx_builder = iroha.ModelTransactionBuilder()
query_builder = iroha.ModelQueryBuilder()
crypto = iroha.ModelCrypto()
proto_tx_helper = iroha.ModelProtoTransaction()
proto_query_helper = iroha.ModelProtoQuery()
admin_priv = open("../<EMAIL>", "r").read()
admin_pub = open("../<EMAIL>", "r").read()
me_kp = crypto.convertFromExisting(admin_pub, admin_priv)
current_time = int(round(time.time() * 1000)) - 10**5
start_tx_counter = 1
start_query_counter = 1
creator = "admin@test"
# build transaction
tx = tx_builder.creatorAccountId(creator) \
.txCounter(start_tx_counter) \
.createdTime(current_time) \
.createDomain("ru", "user") \
.createAsset("dollar", "ru", 2).build()
tx_blob = proto_tx_helper.signAndAddSignature(tx, me_kp).blob()
# create proto object and send to iroha
proto_tx = block_pb2.Transaction()
proto_tx.ParseFromString(''.join(map(chr, tx_blob)))
channel = grpc.insecure_channel('127.0.0.1:50051')
stub = endpoint_pb2_grpc.CommandServiceStub(channel)
stub.Torii(proto_tx)
time.sleep(5)
# create status request
print("Hash of the transaction: ", tx.hash().hex())
tx_hash = tx.hash().blob()
tx_hash = ''.join(map(chr, tx_hash))
request = endpoint_pb2.TxStatusRequest()
request.tx_hash = tx_hash
response = stub.Status(request)
status = endpoint_pb2.TxStatus.Name(response.tx_status)
print("Status of transaction is:", status)
if status != "COMMITTED":
print("Your transaction wasn't committed")
exit(1)
query = query_builder.creatorAccountId(creator) \
.createdTime(current_time) \
.queryCounter(start_query_counter) \
.getAssetInfo("dollar#ru") \
.build()
query_blob = proto_query_helper.signAndAddSignature(query, me_kp).blob()
proto_query = queries_pb2.Query()
proto_query.ParseFromString(''.join(map(chr, query_blob)))
query_stub = endpoint_pb2_grpc.QueryServiceStub(channel)
query_response = query_stub.Find(proto_query)
if not query_response.HasField("asset_response"):
print("Query response error")
exit(1)
else:
print("Query responded with asset response")
asset_info = query_response.asset_response.asset
print("Asset Id =", asset_info.asset_id)
print("Precision =", asset_info.precision)
print("done!")
| en | 0.603712 | # import iroha library from nested folder # build transaction # create proto object and send to iroha # create status request #ru") \ | 2.054396 | 2 |
os_migrate/plugins/module_utils/workload_common.py | rbrady/os-migrate | 0 | 6632109 | <gh_stars>0
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import json
import logging
import os
import subprocess
import time
# Default timeout for OpenStack operations
DEFAULT_TIMEOUT = 600
# Lock to serialize volume attachments. This helps prevent device path
# mismatches between the OpenStack SDK and /dev in the VM.
ATTACH_LOCK_FILE_SOURCE = '/var/lock/v2v-source-volume-lock'
ATTACH_LOCK_FILE_DESTINATION = '/var/lock/v2v-destination-volume-lock'
# File containing ports used by all the nbdkit processes running on the source
# conversion host. There is a check to see if the port is available, but this
# should speed things up.
PORT_MAP_FILE = '/var/run/v2v-migration-ports'
PORT_LOCK_FILE = '/var/lock/v2v-migration-lock' # Lock for the port map
try:
from subprocess import DEVNULL
except ImportError:
DEVNULL = open(os.devnull, 'r+')
def use_lock(lock_file):
""" Boilerplate for functions that need to take a lock. """
def _decorate_lock(function):
def wait_for_lock(self):
for second in range(DEFAULT_TIMEOUT):
try:
self.log.info('Waiting for lock %s...', lock_file)
lock = lock_file + '.lock'
cmd = ['sudo', 'flock', '--timeout', '1',
'--conflict-exit-code', '16', lock_file, '-c',
'"( test ! -e ' + lock + ' || exit 17 ) ' +
'&& touch ' + lock + '"']
result = self.shell.cmd_val(cmd)
if result == 16:
self.log.info('Another conversion has the lock.')
elif result == 17:
self.log.info('Another conversion is holding the lock.')
elif result == 0:
break
except subprocess.CalledProcessError as err:
self.log.info('Error waiting for lock: %s', str(err))
time.sleep(1)
else:
raise RuntimeError('Unable to acquire lock ' + lock_file)
try:
return function(self)
finally:
try:
lock = lock_file + '.lock'
result = self.shell.cmd_out(['sudo', 'rm', '-f', lock])
self.log.debug('Released lock: %s', result)
except subprocess.CalledProcessError as err:
self.log.error('Error releasing lock: %s', str(err))
return wait_for_lock
return _decorate_lock
class OpenStackHostBase():
def __init__(self, openstack_connection, conversion_host_id, ssh_key_path,
ssh_user, transfer_uuid, conversion_host_address=None,
state_file=None, log_file=None):
# Required common parameters:
# openstack_connection: OpenStack connection object
# conversion_host_id: ID of conversion host instance
# ssh_key_path: Path to SSH key authorized on conversion host
# ssh_user: Username to create the SSH connection
# transfer_uuid: UUID to mark processes on conversion hosts
self.conn = openstack_connection
self.conversion_host_id = conversion_host_id
self.ssh_key_path = ssh_key_path
self.ssh_user = ssh_user
self.transfer_uuid = transfer_uuid
# Optional parameters:
# conversion_host_address: Optional address used to override 'accessIPv4'
# state_file: File to hold current disk transfer state
# log_file: Debug log path for workload migration
self.conversion_host_address = conversion_host_address
self.state_file = state_file
self.log_file = log_file
# Configure logging
self.log = logging.getLogger('osp-osp')
log_format = logging.Formatter('%(asctime)s:%(levelname)s: ' +
'%(message)s (%(module)s:%(lineno)d)')
if log_file:
log_handler = logging.FileHandler(log_file)
else:
log_handler = logging.NullHandler()
log_handler.setFormatter(log_format)
self.log.addHandler(log_handler)
self.log.setLevel(logging.DEBUG)
if self._converter() is None:
raise RuntimeError('Cannot find instance {0}'.format(
self.conversion_host_id))
self.shell = RemoteShell(self._converter_address(), ssh_user, ssh_key_path)
self.shell.test_ssh_connection()
# Ports chosen for NBD export
self.claimed_ports = []
def _converter(self):
""" Refresh server object to pick up any changes. """
return self.conn.get_server_by_id(self.conversion_host_id)
def _converter_address(self):
""" Get IP address of conversion host. """
if self.conversion_host_address:
return self.conversion_host_address
else:
return self._converter().accessIPv4
def _update_progress(self, dev_path, progress):
self.log.info('Transfer progress for %s: %s%%', dev_path, str(progress))
if self.state_file is None:
return
self.volume_map[dev_path]['progress'] = progress
with open(self.state_file, 'w') as state:
all_progress = {}
for path, mapping in self.volume_map.items():
all_progress[path] = mapping['progress']
json.dump(all_progress, state)
def _attach_volumes(self, conn, name, funcs):
"""
Attach all volumes in the volume map to the specified conversion host.
Check the list of disks before and after attaching to be absolutely
sure the right source data gets copied to the right destination disk.
This is here because _attach_destination_volumes and
_attach_volumes_to_converter looked almost identical.
"""
self.log.info('Attaching volumes to %s wrapper', name)
host_func, ssh_func, update_func, volume_id_func = funcs
for path, mapping in sorted(self.volume_map.items()):
volume_id = volume_id_func(mapping)
volume = conn.get_volume_by_id(volume_id)
self.log.info('Attaching %s to %s conversion host', volume_id, name)
disks_before = ssh_func(['lsblk', '--noheadings', '--list',
'--paths', '--nodeps', '--output NAME'])
disks_before = set(disks_before.split())
self.log.debug('Initial disk list: %s', disks_before)
conn.attach_volume(volume=volume, wait=True, server=host_func(),
timeout=DEFAULT_TIMEOUT)
self.log.info('Waiting for volume to appear in %s wrapper', name)
self._wait_for_volume_dev_path(conn, volume, host_func(),
DEFAULT_TIMEOUT)
disks_after = ssh_func(['lsblk', '--noheadings', '--list',
'--paths', '--nodeps', '--output NAME'])
disks_after = set(disks_after.split())
self.log.debug('Updated disk list: %s', disks_after)
new_disks = disks_after - disks_before
volume = conn.get_volume_by_id(volume_id)
attachment = self._get_attachment(volume, host_func())
dev_path = attachment.device
if len(new_disks) == 1:
if dev_path in new_disks:
self.log.debug('Successfully attached new disk %s, and %s '
'conversion host path matches OpenStack.',
dev_path, name)
else:
dev_path = new_disks.pop()
self.log.debug('Successfully attached new disk %s, but %s '
'conversion host path does not match the '
'result from OpenStack. Using internal '
'device path %s.', attachment.device,
name, dev_path)
else:
raise RuntimeError('Got unexpected disk list after attaching '
'volume to {0} conversion host instance. '
'Failing migration procedure to avoid '
'assigning volumes incorrectly. New '
'disks(s) inside VM: {1}, disk provided by '
'OpenStack: {2}'.format(name, new_disks,
dev_path))
self.volume_map[path] = update_func(mapping, dev_path)
def _get_attachment(self, volume, vm):
"""
Get the attachment object from the volume with the matching server ID.
Convenience method for use only when the attachment is already certain.
"""
for attachment in volume.attachments:
if attachment.server_id == vm.id:
return attachment
raise RuntimeError('Volume is not attached to the specified instance!')
def _wait_for_volume_dev_path(self, conn, volume, vm, timeout):
volume_id = volume.id
for second in range(timeout):
volume = conn.get_volume_by_id(volume_id)
if volume.attachments:
attachment = self._get_attachment(volume, vm)
if attachment.device.startswith('/dev/'):
return
time.sleep(1)
raise RuntimeError('Timed out waiting for volume device path!')
def __read_used_ports(self):
"""
Should only be called from functions locking the port list file, e.g.
_find_free_port and _release_ports. Returns a set containing the ports
currently used by all the migrations running on this conversion host.
"""
try:
cmd = ['sudo', 'bash', '-c',
'"test -e {0} || echo [] > {0}"'.format(PORT_MAP_FILE)]
result = self.shell.cmd_out(cmd)
if result:
self.log.debug('Port write result: %s', result)
except subprocess.CalledProcessError as err:
raise RuntimeError('Unable to initialize port map file!') from err
try: # Try to read in the set of used ports
cmd = ['sudo', 'cat', PORT_MAP_FILE]
result = self.shell.cmd_out(cmd)
used_ports = set(json.loads(result))
except ValueError:
self.log.info('Unable to read port map from %s, re-initializing '
'it...', PORT_MAP_FILE)
used_ports = set()
except subprocess.CalledProcessError as err:
self.log.debug('Unable to get port map! %s', str(err))
self.log.info('Currently used ports: %s', str(list(used_ports)))
return used_ports
def __write_used_ports(self, used_ports):
"""
Should only be called from functions locking the port list file, e.g.
_find_free_port and _release_ports. Writes out the given port list to
the port list file on the current conversion host.
"""
try: # Write out port map to destination conversion host
cmd = ['-T', 'sudo', 'bash', '-c', '"cat > ' + PORT_MAP_FILE + '"']
input_json = json.dumps(list(used_ports))
sub = self.shell.cmd_sub(cmd, stdin=subprocess.PIPE,
stderr=subprocess.PIPE,
stdout=subprocess.PIPE,
universal_newlines=True)
out, err = sub.communicate(input_json)
if out:
self.log.debug('Wrote port file, stdout: %s', out)
if err:
self.log.debug('Wrote port file, stderr: %s', err)
except subprocess.CalledProcessError as err:
self.log.debug('Unable to write port map to conversion host! '
'Error was: %s', str(err))
@use_lock(PORT_LOCK_FILE)
def _find_free_port(self):
"""
Reserve ports on the current conversion host. Lock a file containing
the used ports, select some ports from the range that is unused, and
check that the port is available on the conversion host. Add this to
the locked file and unlock it for the next conversion.
"""
used_ports = self.__read_used_ports()
# Choose ports from the available possibilities, and try to bind
ephemeral_ports = set(range(49152, 65535))
available_ports = ephemeral_ports - used_ports
try:
port = available_ports.pop()
while not self._test_port_available(port):
self.log.info('Port %d not available, trying another.', port)
used_ports.add(port) # Mark used to avoid trying again
port = available_ports.pop()
except KeyError as err:
raise RuntimeError('No free ports on conversion host!') from err
used_ports.add(port)
self.__write_used_ports(used_ports)
self.log.info('Allocated port %d, all used: %s', port, used_ports)
self.claimed_ports.append(port)
return port
@use_lock(PORT_LOCK_FILE)
def _release_ports(self):
used_ports = self.__read_used_ports()
for port in self.claimed_ports:
try:
used_ports.remove(port)
except KeyError:
self.log.debug('Port already released? %d', port)
self.log.info('Cleaning used ports: %s', used_ports)
self.__write_used_ports(used_ports)
def _test_port_available(self, port):
"""
See if a port is open on the source conversion host by trying to listen
on it.
"""
result = self.shell.cmd_val(['timeout', '1', 'nc', '-l', str(port)])
# The 'timeout' command returns 124 when the command times out, meaning
# nc was successful and the port is free.
return result == 124
class RemoteShell():
def __init__(self, address, ssh_user, key_path=None):
self.address = address
self.ssh_user = ssh_user
self.key_path = key_path
def _default_options(self):
options = [
'-o', 'BatchMode=yes',
'-o', 'StrictHostKeyChecking=no',
'-o', 'ConnectTimeout=10',
]
if self.key_path:
options.extend(['-i', self.key_path])
return options
def ssh_preamble(self):
""" Common options to SSH into a conversion host. """
preamble = ['ssh']
preamble.extend(self._default_options())
preamble.extend([self.ssh_user + '@' + self.address])
return preamble
def cmd_out(self, command, **kwargs):
""" Run command on the target conversion host and get the output. """
args = self.ssh_preamble()
args.extend(command)
return subprocess.check_output(args, **kwargs).decode('utf-8').strip()
def cmd_val(self, command, **kwargs):
""" Run command on the target conversion host and get return code. """
args = self.ssh_preamble()
args.extend(command)
return subprocess.call(args, **kwargs)
def cmd_sub(self, command, **kwargs):
""" Start a long-running command on the target conversion host. """
args = self.ssh_preamble()
args.extend(command)
return subprocess.Popen(args, **kwargs)
def scp_to(self, source, destination):
""" Copy a file to the target conversion host. """
command = ['scp']
command.extend(self._default_options())
remote_path = self.ssh_user + '@' + self.address + ':' + destination
command.extend([source, remote_path])
return subprocess.call(command)
def scp_from(self, source, destination, recursive=False):
""" Copy a file from the source conversion host. """
command = ['scp']
command.extend(self._default_options())
if recursive:
command.extend(['-r'])
remote_path = self.ssh_user + '@' + self.address + ':' + source
command.extend([remote_path, destination])
return subprocess.call(command)
def test_ssh_connection(self):
""" Quick SSH connectivity check. """
out = self.cmd_out(['echo connected'])
if out != 'connected':
raise RuntimeError(self.address + ': SSH test unsuccessful!')
| from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import json
import logging
import os
import subprocess
import time
# Default timeout for OpenStack operations
DEFAULT_TIMEOUT = 600
# Lock to serialize volume attachments. This helps prevent device path
# mismatches between the OpenStack SDK and /dev in the VM.
ATTACH_LOCK_FILE_SOURCE = '/var/lock/v2v-source-volume-lock'
ATTACH_LOCK_FILE_DESTINATION = '/var/lock/v2v-destination-volume-lock'
# File containing ports used by all the nbdkit processes running on the source
# conversion host. There is a check to see if the port is available, but this
# should speed things up.
PORT_MAP_FILE = '/var/run/v2v-migration-ports'
PORT_LOCK_FILE = '/var/lock/v2v-migration-lock' # Lock for the port map
try:
from subprocess import DEVNULL
except ImportError:
DEVNULL = open(os.devnull, 'r+')
def use_lock(lock_file):
""" Boilerplate for functions that need to take a lock. """
def _decorate_lock(function):
def wait_for_lock(self):
for second in range(DEFAULT_TIMEOUT):
try:
self.log.info('Waiting for lock %s...', lock_file)
lock = lock_file + '.lock'
cmd = ['sudo', 'flock', '--timeout', '1',
'--conflict-exit-code', '16', lock_file, '-c',
'"( test ! -e ' + lock + ' || exit 17 ) ' +
'&& touch ' + lock + '"']
result = self.shell.cmd_val(cmd)
if result == 16:
self.log.info('Another conversion has the lock.')
elif result == 17:
self.log.info('Another conversion is holding the lock.')
elif result == 0:
break
except subprocess.CalledProcessError as err:
self.log.info('Error waiting for lock: %s', str(err))
time.sleep(1)
else:
raise RuntimeError('Unable to acquire lock ' + lock_file)
try:
return function(self)
finally:
try:
lock = lock_file + '.lock'
result = self.shell.cmd_out(['sudo', 'rm', '-f', lock])
self.log.debug('Released lock: %s', result)
except subprocess.CalledProcessError as err:
self.log.error('Error releasing lock: %s', str(err))
return wait_for_lock
return _decorate_lock
class OpenStackHostBase():
def __init__(self, openstack_connection, conversion_host_id, ssh_key_path,
ssh_user, transfer_uuid, conversion_host_address=None,
state_file=None, log_file=None):
# Required common parameters:
# openstack_connection: OpenStack connection object
# conversion_host_id: ID of conversion host instance
# ssh_key_path: Path to SSH key authorized on conversion host
# ssh_user: Username to create the SSH connection
# transfer_uuid: UUID to mark processes on conversion hosts
self.conn = openstack_connection
self.conversion_host_id = conversion_host_id
self.ssh_key_path = ssh_key_path
self.ssh_user = ssh_user
self.transfer_uuid = transfer_uuid
# Optional parameters:
# conversion_host_address: Optional address used to override 'accessIPv4'
# state_file: File to hold current disk transfer state
# log_file: Debug log path for workload migration
self.conversion_host_address = conversion_host_address
self.state_file = state_file
self.log_file = log_file
# Configure logging
self.log = logging.getLogger('osp-osp')
log_format = logging.Formatter('%(asctime)s:%(levelname)s: ' +
'%(message)s (%(module)s:%(lineno)d)')
if log_file:
log_handler = logging.FileHandler(log_file)
else:
log_handler = logging.NullHandler()
log_handler.setFormatter(log_format)
self.log.addHandler(log_handler)
self.log.setLevel(logging.DEBUG)
if self._converter() is None:
raise RuntimeError('Cannot find instance {0}'.format(
self.conversion_host_id))
self.shell = RemoteShell(self._converter_address(), ssh_user, ssh_key_path)
self.shell.test_ssh_connection()
# Ports chosen for NBD export
self.claimed_ports = []
def _converter(self):
""" Refresh server object to pick up any changes. """
return self.conn.get_server_by_id(self.conversion_host_id)
def _converter_address(self):
""" Get IP address of conversion host. """
if self.conversion_host_address:
return self.conversion_host_address
else:
return self._converter().accessIPv4
def _update_progress(self, dev_path, progress):
self.log.info('Transfer progress for %s: %s%%', dev_path, str(progress))
if self.state_file is None:
return
self.volume_map[dev_path]['progress'] = progress
with open(self.state_file, 'w') as state:
all_progress = {}
for path, mapping in self.volume_map.items():
all_progress[path] = mapping['progress']
json.dump(all_progress, state)
def _attach_volumes(self, conn, name, funcs):
"""
Attach all volumes in the volume map to the specified conversion host.
Check the list of disks before and after attaching to be absolutely
sure the right source data gets copied to the right destination disk.
This is here because _attach_destination_volumes and
_attach_volumes_to_converter looked almost identical.
"""
self.log.info('Attaching volumes to %s wrapper', name)
host_func, ssh_func, update_func, volume_id_func = funcs
for path, mapping in sorted(self.volume_map.items()):
volume_id = volume_id_func(mapping)
volume = conn.get_volume_by_id(volume_id)
self.log.info('Attaching %s to %s conversion host', volume_id, name)
disks_before = ssh_func(['lsblk', '--noheadings', '--list',
'--paths', '--nodeps', '--output NAME'])
disks_before = set(disks_before.split())
self.log.debug('Initial disk list: %s', disks_before)
conn.attach_volume(volume=volume, wait=True, server=host_func(),
timeout=DEFAULT_TIMEOUT)
self.log.info('Waiting for volume to appear in %s wrapper', name)
self._wait_for_volume_dev_path(conn, volume, host_func(),
DEFAULT_TIMEOUT)
disks_after = ssh_func(['lsblk', '--noheadings', '--list',
'--paths', '--nodeps', '--output NAME'])
disks_after = set(disks_after.split())
self.log.debug('Updated disk list: %s', disks_after)
new_disks = disks_after - disks_before
volume = conn.get_volume_by_id(volume_id)
attachment = self._get_attachment(volume, host_func())
dev_path = attachment.device
if len(new_disks) == 1:
if dev_path in new_disks:
self.log.debug('Successfully attached new disk %s, and %s '
'conversion host path matches OpenStack.',
dev_path, name)
else:
dev_path = new_disks.pop()
self.log.debug('Successfully attached new disk %s, but %s '
'conversion host path does not match the '
'result from OpenStack. Using internal '
'device path %s.', attachment.device,
name, dev_path)
else:
raise RuntimeError('Got unexpected disk list after attaching '
'volume to {0} conversion host instance. '
'Failing migration procedure to avoid '
'assigning volumes incorrectly. New '
'disks(s) inside VM: {1}, disk provided by '
'OpenStack: {2}'.format(name, new_disks,
dev_path))
self.volume_map[path] = update_func(mapping, dev_path)
def _get_attachment(self, volume, vm):
"""
Get the attachment object from the volume with the matching server ID.
Convenience method for use only when the attachment is already certain.
"""
for attachment in volume.attachments:
if attachment.server_id == vm.id:
return attachment
raise RuntimeError('Volume is not attached to the specified instance!')
def _wait_for_volume_dev_path(self, conn, volume, vm, timeout):
volume_id = volume.id
for second in range(timeout):
volume = conn.get_volume_by_id(volume_id)
if volume.attachments:
attachment = self._get_attachment(volume, vm)
if attachment.device.startswith('/dev/'):
return
time.sleep(1)
raise RuntimeError('Timed out waiting for volume device path!')
def __read_used_ports(self):
"""
Should only be called from functions locking the port list file, e.g.
_find_free_port and _release_ports. Returns a set containing the ports
currently used by all the migrations running on this conversion host.
"""
try:
cmd = ['sudo', 'bash', '-c',
'"test -e {0} || echo [] > {0}"'.format(PORT_MAP_FILE)]
result = self.shell.cmd_out(cmd)
if result:
self.log.debug('Port write result: %s', result)
except subprocess.CalledProcessError as err:
raise RuntimeError('Unable to initialize port map file!') from err
try: # Try to read in the set of used ports
cmd = ['sudo', 'cat', PORT_MAP_FILE]
result = self.shell.cmd_out(cmd)
used_ports = set(json.loads(result))
except ValueError:
self.log.info('Unable to read port map from %s, re-initializing '
'it...', PORT_MAP_FILE)
used_ports = set()
except subprocess.CalledProcessError as err:
self.log.debug('Unable to get port map! %s', str(err))
self.log.info('Currently used ports: %s', str(list(used_ports)))
return used_ports
def __write_used_ports(self, used_ports):
"""
Should only be called from functions locking the port list file, e.g.
_find_free_port and _release_ports. Writes out the given port list to
the port list file on the current conversion host.
"""
try: # Write out port map to destination conversion host
cmd = ['-T', 'sudo', 'bash', '-c', '"cat > ' + PORT_MAP_FILE + '"']
input_json = json.dumps(list(used_ports))
sub = self.shell.cmd_sub(cmd, stdin=subprocess.PIPE,
stderr=subprocess.PIPE,
stdout=subprocess.PIPE,
universal_newlines=True)
out, err = sub.communicate(input_json)
if out:
self.log.debug('Wrote port file, stdout: %s', out)
if err:
self.log.debug('Wrote port file, stderr: %s', err)
except subprocess.CalledProcessError as err:
self.log.debug('Unable to write port map to conversion host! '
'Error was: %s', str(err))
@use_lock(PORT_LOCK_FILE)
def _find_free_port(self):
"""
Reserve ports on the current conversion host. Lock a file containing
the used ports, select some ports from the range that is unused, and
check that the port is available on the conversion host. Add this to
the locked file and unlock it for the next conversion.
"""
used_ports = self.__read_used_ports()
# Choose ports from the available possibilities, and try to bind
ephemeral_ports = set(range(49152, 65535))
available_ports = ephemeral_ports - used_ports
try:
port = available_ports.pop()
while not self._test_port_available(port):
self.log.info('Port %d not available, trying another.', port)
used_ports.add(port) # Mark used to avoid trying again
port = available_ports.pop()
except KeyError as err:
raise RuntimeError('No free ports on conversion host!') from err
used_ports.add(port)
self.__write_used_ports(used_ports)
self.log.info('Allocated port %d, all used: %s', port, used_ports)
self.claimed_ports.append(port)
return port
@use_lock(PORT_LOCK_FILE)
def _release_ports(self):
used_ports = self.__read_used_ports()
for port in self.claimed_ports:
try:
used_ports.remove(port)
except KeyError:
self.log.debug('Port already released? %d', port)
self.log.info('Cleaning used ports: %s', used_ports)
self.__write_used_ports(used_ports)
def _test_port_available(self, port):
"""
See if a port is open on the source conversion host by trying to listen
on it.
"""
result = self.shell.cmd_val(['timeout', '1', 'nc', '-l', str(port)])
# The 'timeout' command returns 124 when the command times out, meaning
# nc was successful and the port is free.
return result == 124
class RemoteShell():
def __init__(self, address, ssh_user, key_path=None):
self.address = address
self.ssh_user = ssh_user
self.key_path = key_path
def _default_options(self):
options = [
'-o', 'BatchMode=yes',
'-o', 'StrictHostKeyChecking=no',
'-o', 'ConnectTimeout=10',
]
if self.key_path:
options.extend(['-i', self.key_path])
return options
def ssh_preamble(self):
""" Common options to SSH into a conversion host. """
preamble = ['ssh']
preamble.extend(self._default_options())
preamble.extend([self.ssh_user + '@' + self.address])
return preamble
def cmd_out(self, command, **kwargs):
""" Run command on the target conversion host and get the output. """
args = self.ssh_preamble()
args.extend(command)
return subprocess.check_output(args, **kwargs).decode('utf-8').strip()
def cmd_val(self, command, **kwargs):
""" Run command on the target conversion host and get return code. """
args = self.ssh_preamble()
args.extend(command)
return subprocess.call(args, **kwargs)
def cmd_sub(self, command, **kwargs):
""" Start a long-running command on the target conversion host. """
args = self.ssh_preamble()
args.extend(command)
return subprocess.Popen(args, **kwargs)
def scp_to(self, source, destination):
""" Copy a file to the target conversion host. """
command = ['scp']
command.extend(self._default_options())
remote_path = self.ssh_user + '@' + self.address + ':' + destination
command.extend([source, remote_path])
return subprocess.call(command)
def scp_from(self, source, destination, recursive=False):
""" Copy a file from the source conversion host. """
command = ['scp']
command.extend(self._default_options())
if recursive:
command.extend(['-r'])
remote_path = self.ssh_user + '@' + self.address + ':' + source
command.extend([remote_path, destination])
return subprocess.call(command)
def test_ssh_connection(self):
""" Quick SSH connectivity check. """
out = self.cmd_out(['echo connected'])
if out != 'connected':
raise RuntimeError(self.address + ': SSH test unsuccessful!') | en | 0.851649 | # Default timeout for OpenStack operations # Lock to serialize volume attachments. This helps prevent device path # mismatches between the OpenStack SDK and /dev in the VM. # File containing ports used by all the nbdkit processes running on the source # conversion host. There is a check to see if the port is available, but this # should speed things up. # Lock for the port map Boilerplate for functions that need to take a lock. # Required common parameters: # openstack_connection: OpenStack connection object # conversion_host_id: ID of conversion host instance # ssh_key_path: Path to SSH key authorized on conversion host # ssh_user: Username to create the SSH connection # transfer_uuid: UUID to mark processes on conversion hosts # Optional parameters: # conversion_host_address: Optional address used to override 'accessIPv4' # state_file: File to hold current disk transfer state # log_file: Debug log path for workload migration # Configure logging # Ports chosen for NBD export Refresh server object to pick up any changes. Get IP address of conversion host. Attach all volumes in the volume map to the specified conversion host. Check the list of disks before and after attaching to be absolutely sure the right source data gets copied to the right destination disk. This is here because _attach_destination_volumes and _attach_volumes_to_converter looked almost identical. Get the attachment object from the volume with the matching server ID. Convenience method for use only when the attachment is already certain. Should only be called from functions locking the port list file, e.g. _find_free_port and _release_ports. Returns a set containing the ports currently used by all the migrations running on this conversion host. # Try to read in the set of used ports Should only be called from functions locking the port list file, e.g. _find_free_port and _release_ports. Writes out the given port list to the port list file on the current conversion host. # Write out port map to destination conversion host Reserve ports on the current conversion host. Lock a file containing the used ports, select some ports from the range that is unused, and check that the port is available on the conversion host. Add this to the locked file and unlock it for the next conversion. # Choose ports from the available possibilities, and try to bind # Mark used to avoid trying again See if a port is open on the source conversion host by trying to listen on it. # The 'timeout' command returns 124 when the command times out, meaning # nc was successful and the port is free. Common options to SSH into a conversion host. Run command on the target conversion host and get the output. Run command on the target conversion host and get return code. Start a long-running command on the target conversion host. Copy a file to the target conversion host. Copy a file from the source conversion host. Quick SSH connectivity check. | 2.169196 | 2 |
python/CRUD-examples/delete/foreign_keys.py | alanfeng99/oracle-db-examples | 1,071 | 6632110 | # Code Sample from the tutorial at https://learncodeshare.net/2015/07/09/delete-crud-using-cx_oracle/
# section titled "Deleting records referenced by Foreign Keys" 1st example
# Using the base template, the example code executes a simple delete using named bind variables.
# When following the tutorial with default data this section intentionally throws an error
# to demonstrate foreign key functionality.
import cx_Oracle
import os
connectString = os.getenv('DB_CONNECT') # The environment variable for the connect string: DB_CONNECT=user/password@database
con = cx_Oracle.connect(connectString)
def get_all_rows(label, data_type='people'):
# Query all rows
cur = con.cursor()
if (data_type == 'pets'):
statement = 'select id, name, owner, type from lcs_pets order by owner, id'
else:
statement = 'select id, name, age, notes from lcs_people order by id'
cur.execute(statement)
res = cur.fetchall()
print(label + ': ')
print (res)
print(' ')
cur.close()
get_all_rows('Original People Data', 'people')
get_all_rows('Original Pet Data', 'pets')
cur = con.cursor()
statement = 'delete from lcs_people where id = :id'
cur.execute(statement, {'id':1})
con.commit()
get_all_rows('New People Data', 'people')
get_all_rows('New Pet Data', 'pets')
| # Code Sample from the tutorial at https://learncodeshare.net/2015/07/09/delete-crud-using-cx_oracle/
# section titled "Deleting records referenced by Foreign Keys" 1st example
# Using the base template, the example code executes a simple delete using named bind variables.
# When following the tutorial with default data this section intentionally throws an error
# to demonstrate foreign key functionality.
import cx_Oracle
import os
connectString = os.getenv('DB_CONNECT') # The environment variable for the connect string: DB_CONNECT=user/password@database
con = cx_Oracle.connect(connectString)
def get_all_rows(label, data_type='people'):
# Query all rows
cur = con.cursor()
if (data_type == 'pets'):
statement = 'select id, name, owner, type from lcs_pets order by owner, id'
else:
statement = 'select id, name, age, notes from lcs_people order by id'
cur.execute(statement)
res = cur.fetchall()
print(label + ': ')
print (res)
print(' ')
cur.close()
get_all_rows('Original People Data', 'people')
get_all_rows('Original Pet Data', 'pets')
cur = con.cursor()
statement = 'delete from lcs_people where id = :id'
cur.execute(statement, {'id':1})
con.commit()
get_all_rows('New People Data', 'people')
get_all_rows('New Pet Data', 'pets')
| en | 0.768067 | # Code Sample from the tutorial at https://learncodeshare.net/2015/07/09/delete-crud-using-cx_oracle/ # section titled "Deleting records referenced by Foreign Keys" 1st example # Using the base template, the example code executes a simple delete using named bind variables. # When following the tutorial with default data this section intentionally throws an error # to demonstrate foreign key functionality. # The environment variable for the connect string: DB_CONNECT=user/password@database # Query all rows | 3.461673 | 3 |
GUI_projects/alarm clock.py | renataeva/python-basics | 1 | 6632111 | from tkinter import *
import random
window = Tk()
window.geometry('250x350')
window.title('Будильник')
window.iconbitmap('alarm.ico')
def deletef():
indexes = alarmbox.curselection()
for i in reversed(indexes):
alarmbox.delete(i)
status.config(text=f'Всего будильников: {alarmbox.size()}')
def open_change_window():
change = Toplevel()
change.title('Изменить')
change.geometry('300x125')
change.iconbitmap('alarm.ico')
label_change = Label(change, text='Введите новое значение будильника')
label_change.pack(padx=10, pady=10)
newalarm = Entry(change, justify=CENTER)
newalarm.pack(padx=10, pady=10)
a = alarmbox.curselection()[0]
al = alarmbox.get(a)
newalarm.insert(0, al)
def update_alarm():
na = newalarm.get()
print(a)
alarmbox.delete(a)
alarmbox.insert(a, na)
change.destroy()
saveb = Button(change, text='Сохранить', command=update_alarm)
saveb.pack(pady=5, padx=10)
def generate_alarms():
alarms = []
a = 0
b = 5
for _ in range(10):
m = random.randint(a, b)
if m >= 10:
alarms.append(f'07:{m}')
else:
alarms.append(f'07:0{m}')
a += 6
b += 6
alarmbox.insert(END, *alarms)
status.config(text=f'Всего будильников: {alarmbox.size()}')
label1 = Label(text='Будилники')
label1.pack(pady=5, padx=10)
alarmbox = Listbox(width=30, height=10, justify=CENTER, selectmode=EXTENDED)
status = Label(text=f'Всего будильников: 0')
status.pack(pady=5, padx=10)
alarmbox.pack()
randomb = Button(text='Случайные будильники', command=generate_alarms)
randomb.pack(pady=5, padx=10)
deleteb = Button(text='Удалить будильник', command=deletef)
deleteb.pack(pady=5, padx=10)
config = Button(text='Изменить будильники', command=open_change_window)
config.pack(pady=5, padx=10)
window.mainloop()
| from tkinter import *
import random
window = Tk()
window.geometry('250x350')
window.title('Будильник')
window.iconbitmap('alarm.ico')
def deletef():
indexes = alarmbox.curselection()
for i in reversed(indexes):
alarmbox.delete(i)
status.config(text=f'Всего будильников: {alarmbox.size()}')
def open_change_window():
change = Toplevel()
change.title('Изменить')
change.geometry('300x125')
change.iconbitmap('alarm.ico')
label_change = Label(change, text='Введите новое значение будильника')
label_change.pack(padx=10, pady=10)
newalarm = Entry(change, justify=CENTER)
newalarm.pack(padx=10, pady=10)
a = alarmbox.curselection()[0]
al = alarmbox.get(a)
newalarm.insert(0, al)
def update_alarm():
na = newalarm.get()
print(a)
alarmbox.delete(a)
alarmbox.insert(a, na)
change.destroy()
saveb = Button(change, text='Сохранить', command=update_alarm)
saveb.pack(pady=5, padx=10)
def generate_alarms():
alarms = []
a = 0
b = 5
for _ in range(10):
m = random.randint(a, b)
if m >= 10:
alarms.append(f'07:{m}')
else:
alarms.append(f'07:0{m}')
a += 6
b += 6
alarmbox.insert(END, *alarms)
status.config(text=f'Всего будильников: {alarmbox.size()}')
label1 = Label(text='Будилники')
label1.pack(pady=5, padx=10)
alarmbox = Listbox(width=30, height=10, justify=CENTER, selectmode=EXTENDED)
status = Label(text=f'Всего будильников: 0')
status.pack(pady=5, padx=10)
alarmbox.pack()
randomb = Button(text='Случайные будильники', command=generate_alarms)
randomb.pack(pady=5, padx=10)
deleteb = Button(text='Удалить будильник', command=deletef)
deleteb.pack(pady=5, padx=10)
config = Button(text='Изменить будильники', command=open_change_window)
config.pack(pady=5, padx=10)
window.mainloop()
| none | 1 | 3.374881 | 3 |
|
src/sensors/sensor_benchmark.py | adeo/benchmark-tipboard | 0 | 6632112 | import time
from src.sensors.matomo_utils import valueFromAction, getDevices
from src.sensors.utils import end, sendDataToTipboard
from src.tipboard.app.properties import COLOR_TAB
def updateNormChartTipBoard(bench, tile, isTest=False):
if not "values" in bench[0]:
return
datasetLength = len(bench)
data = dict()
data['title'] = dict(display=False)
data['datasets'] = list()
for index in range(datasetLength):
data['datasets'].append(
dict(label=bench[index]["device"],
data=bench[index]["values"],
borderColor=COLOR_TAB[index]))
tipboardAnswer = sendDataToTipboard(data=data, tile_template='norm_chart', tile_id=tile, isTest=isTest)
end(title=f'{tile} -> {tile}', start_time=time.time(), tipboardAnswer=tipboardAnswer, TILE_ID=tile)
def updateListingTipBoard(list, tile, isTest=False):
data = {'items': list}
tipboardAnswer = sendDataToTipboard(data=data, tile_template='norm_chart', tile_id=tile, isTest=isTest)
end(title=f'{tile} -> {tile}', start_time=time.time(), tipboardAnswer=tipboardAnswer, TILE_ID=tile)
def updateCPUTipBoard(isTest=False):
cpu_bench = valueFromAction("ackleyBenchmark")
print(f'CPU = {cpu_bench}')
updateNormChartTipBoard(cpu_bench, 'cpu', isTest)
def updateGPUTipBoard(isTest=False):
gpu_bench = valueFromAction("scroll")
print(f'GPU = {gpu_bench}')
updateNormChartTipBoard(gpu_bench, 'gpu', isTest)
def updateNetworkTipBoard(isTest=False):
network_bench = valueFromAction("dowloadFile")
updateNormChartTipBoard(network_bench, 'network', isTest)
def updateDevicesTipBoard(isTest=False):
list = getDevices()
updateListingTipBoard(list, 'list_devices', isTest)
def sonde_bench(isTest=False):
updateCPUTipBoard(isTest)
updateGPUTipBoard(isTest)
updateNetworkTipBoard(isTest)
updateDevicesTipBoard(isTest)
cpu_bench = valueFromAction("ackleyBenchmark")
print(f'CPU = {cpu_bench}')
gpu_bench = valueFromAction("scroll")
print(f'GPU = {gpu_bench}')
| import time
from src.sensors.matomo_utils import valueFromAction, getDevices
from src.sensors.utils import end, sendDataToTipboard
from src.tipboard.app.properties import COLOR_TAB
def updateNormChartTipBoard(bench, tile, isTest=False):
if not "values" in bench[0]:
return
datasetLength = len(bench)
data = dict()
data['title'] = dict(display=False)
data['datasets'] = list()
for index in range(datasetLength):
data['datasets'].append(
dict(label=bench[index]["device"],
data=bench[index]["values"],
borderColor=COLOR_TAB[index]))
tipboardAnswer = sendDataToTipboard(data=data, tile_template='norm_chart', tile_id=tile, isTest=isTest)
end(title=f'{tile} -> {tile}', start_time=time.time(), tipboardAnswer=tipboardAnswer, TILE_ID=tile)
def updateListingTipBoard(list, tile, isTest=False):
data = {'items': list}
tipboardAnswer = sendDataToTipboard(data=data, tile_template='norm_chart', tile_id=tile, isTest=isTest)
end(title=f'{tile} -> {tile}', start_time=time.time(), tipboardAnswer=tipboardAnswer, TILE_ID=tile)
def updateCPUTipBoard(isTest=False):
cpu_bench = valueFromAction("ackleyBenchmark")
print(f'CPU = {cpu_bench}')
updateNormChartTipBoard(cpu_bench, 'cpu', isTest)
def updateGPUTipBoard(isTest=False):
gpu_bench = valueFromAction("scroll")
print(f'GPU = {gpu_bench}')
updateNormChartTipBoard(gpu_bench, 'gpu', isTest)
def updateNetworkTipBoard(isTest=False):
network_bench = valueFromAction("dowloadFile")
updateNormChartTipBoard(network_bench, 'network', isTest)
def updateDevicesTipBoard(isTest=False):
list = getDevices()
updateListingTipBoard(list, 'list_devices', isTest)
def sonde_bench(isTest=False):
updateCPUTipBoard(isTest)
updateGPUTipBoard(isTest)
updateNetworkTipBoard(isTest)
updateDevicesTipBoard(isTest)
cpu_bench = valueFromAction("ackleyBenchmark")
print(f'CPU = {cpu_bench}')
gpu_bench = valueFromAction("scroll")
print(f'GPU = {gpu_bench}')
| none | 1 | 2.296878 | 2 |
|
python/paddle/fluid/tests/unittests/test_parallel_executor_fix_op_run_order.py | xingjing1/Paddle | 0 | 6632113 | # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import paddle
import paddle.fluid as fluid
import unittest
import numpy as np
from paddle.vision.models import resnet50
from paddle.nn import CrossEntropyLoss
class TestFixOpRunOrder(unittest.TestCase):
def setUp(self):
paddle.enable_static()
paddle.seed(1)
paddle.framework.random._manual_program_seed(1)
if paddle.is_compiled_with_cuda():
fluid.set_flags({'FLAGS_cudnn_deterministic': 1})
def get_place(self):
return paddle.CUDAPlace(0) if paddle.is_compiled_with_cuda(
) else paddle.CPUPlace()
def get_feed(self):
batch_size = 32
image = np.random.random([batch_size, 3, 224, 224]).astype('float32')
label = np.random.randint(0, 1000, [batch_size, 1]).astype('int64')
return {"image": image, "label": label}
def create_model(self, fix_op_run_order):
main_prog = paddle.static.Program()
startup_prog = paddle.static.Program()
scope = paddle.static.Scope()
with paddle.static.program_guard(main_prog, startup_prog):
image = paddle.static.data(
name="image", shape=[None, 3, 224, 224], dtype="float32")
label = paddle.static.data(
name="label", shape=[None, 1], dtype="int64")
model = resnet50()
pred = model(image)
loss_fn = CrossEntropyLoss()
loss = loss_fn(pred, label)
optimizer = paddle.optimizer.SGD(learning_rate=1e-3)
optimizer.minimize(loss)
build_strategy = paddle.static.BuildStrategy()
build_strategy.fix_op_run_order = fix_op_run_order
build_strategy.fuse_bn_act_ops = True
build_strategy.fuse_bn_add_act_ops = True
main_prog = paddle.static.CompiledProgram(main_prog).with_data_parallel(
loss_name=loss.name,
build_strategy=build_strategy,
places=[self.get_place()])
exe = paddle.static.Executor(self.get_place())
with paddle.static.scope_guard(scope):
exe.run(startup_prog)
return main_prog, scope, loss
def run_and_fetch_loss(self, main_prog, scope, loss, feed):
with paddle.static.scope_guard(scope):
exe = paddle.static.Executor(self.get_place())
loss_value = exe.run(main_prog, feed=feed, fetch_list=[loss])[0]
return loss_value
def test_main(self):
if not paddle.is_compiled_with_cuda():
return
main1, scope1, loss1 = self.create_model(True)
main2, scope2, loss2 = self.create_model(False)
for i in range(10):
feed = self.get_feed()
loss_val1 = self.run_and_fetch_loss(main1, scope1, loss1, feed)
loss_val2 = self.run_and_fetch_loss(main2, scope2, loss2, feed)
self.assertEqual(loss_val1, loss_val2)
if __name__ == "__main__":
unittest.main()
| # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import paddle
import paddle.fluid as fluid
import unittest
import numpy as np
from paddle.vision.models import resnet50
from paddle.nn import CrossEntropyLoss
class TestFixOpRunOrder(unittest.TestCase):
def setUp(self):
paddle.enable_static()
paddle.seed(1)
paddle.framework.random._manual_program_seed(1)
if paddle.is_compiled_with_cuda():
fluid.set_flags({'FLAGS_cudnn_deterministic': 1})
def get_place(self):
return paddle.CUDAPlace(0) if paddle.is_compiled_with_cuda(
) else paddle.CPUPlace()
def get_feed(self):
batch_size = 32
image = np.random.random([batch_size, 3, 224, 224]).astype('float32')
label = np.random.randint(0, 1000, [batch_size, 1]).astype('int64')
return {"image": image, "label": label}
def create_model(self, fix_op_run_order):
main_prog = paddle.static.Program()
startup_prog = paddle.static.Program()
scope = paddle.static.Scope()
with paddle.static.program_guard(main_prog, startup_prog):
image = paddle.static.data(
name="image", shape=[None, 3, 224, 224], dtype="float32")
label = paddle.static.data(
name="label", shape=[None, 1], dtype="int64")
model = resnet50()
pred = model(image)
loss_fn = CrossEntropyLoss()
loss = loss_fn(pred, label)
optimizer = paddle.optimizer.SGD(learning_rate=1e-3)
optimizer.minimize(loss)
build_strategy = paddle.static.BuildStrategy()
build_strategy.fix_op_run_order = fix_op_run_order
build_strategy.fuse_bn_act_ops = True
build_strategy.fuse_bn_add_act_ops = True
main_prog = paddle.static.CompiledProgram(main_prog).with_data_parallel(
loss_name=loss.name,
build_strategy=build_strategy,
places=[self.get_place()])
exe = paddle.static.Executor(self.get_place())
with paddle.static.scope_guard(scope):
exe.run(startup_prog)
return main_prog, scope, loss
def run_and_fetch_loss(self, main_prog, scope, loss, feed):
with paddle.static.scope_guard(scope):
exe = paddle.static.Executor(self.get_place())
loss_value = exe.run(main_prog, feed=feed, fetch_list=[loss])[0]
return loss_value
def test_main(self):
if not paddle.is_compiled_with_cuda():
return
main1, scope1, loss1 = self.create_model(True)
main2, scope2, loss2 = self.create_model(False)
for i in range(10):
feed = self.get_feed()
loss_val1 = self.run_and_fetch_loss(main1, scope1, loss1, feed)
loss_val2 = self.run_and_fetch_loss(main2, scope2, loss2, feed)
self.assertEqual(loss_val1, loss_val2)
if __name__ == "__main__":
unittest.main()
| en | 0.856067 | # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. | 1.998102 | 2 |
migrations/versions/672fc4e15fc5_.py | mytopdog/fndash | 0 | 6632114 | """empty message
Revision ID: 672fc4e15fc5
Revises: <PASSWORD>
Create Date: 2019-03-09 06:41:05.722187
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '672fc4e15fc5'
down_revision = '<PASSWORD>'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('game', sa.Column('playlist', sa.String(), nullable=True))
op.alter_column('game', 'game_type', type_=sa.String(), new_column_name='mode')
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.alter_column('game', 'mode', type_=sa.VARCHAR(length=6), new_column_name='game_type')
op.drop_column('game', 'playlist')
# ### end Alembic commands ###
| """empty message
Revision ID: 672fc4e15fc5
Revises: <PASSWORD>
Create Date: 2019-03-09 06:41:05.722187
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '672fc4e15fc5'
down_revision = '<PASSWORD>'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('game', sa.Column('playlist', sa.String(), nullable=True))
op.alter_column('game', 'game_type', type_=sa.String(), new_column_name='mode')
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.alter_column('game', 'mode', type_=sa.VARCHAR(length=6), new_column_name='game_type')
op.drop_column('game', 'playlist')
# ### end Alembic commands ###
| en | 0.427171 | empty message Revision ID: 672fc4e15fc5 Revises: <PASSWORD> Create Date: 2019-03-09 06:41:05.722187 # revision identifiers, used by Alembic. # ### commands auto generated by Alembic - please adjust! ### # ### end Alembic commands ### # ### commands auto generated by Alembic - please adjust! ### # ### end Alembic commands ### | 1.414971 | 1 |
clld/web/datatables/unitvalue.py | Woseseltops/clld | 1 | 6632115 | <filename>clld/web/datatables/unitvalue.py
from sqlalchemy.orm import joinedload
from clld.db.models import common
from clld.web.datatables.base import DataTable, LinkCol
class UnitValueNameCol(LinkCol):
def order(self):
return common.UnitDomainElement.id \
if self.dt.unitparameter and self.dt.unitparameter.domain \
else [common.UnitValue.name, common.UnitValue.id]
def search(self, qs):
if self.dt.unitparameter and self.dt.unitparameter.domain:
return common.UnitDomainElement.name.contains(qs)
return common.UnitValue.name.contains(qs)
class Unitvalues(DataTable):
__constraints__ = [common.UnitParameter, common.Contribution, common.Unit]
def base_query(self, query):
query = query\
.join(common.Unit)\
.outerjoin(common.UnitDomainElement)\
.options(joinedload(common.UnitValue.unit))
if self.unit:
#query = query.join(common.UnitParameter, common.Contribution)
return query.filter(common.UnitValue.unit_pk == self.unit.pk)
if self.unitparameter:
#query = query.join(common.Contribution, common.Unit)
return query.filter(
common.UnitValue.unitparameter_pk == self.unitparameter.pk)
if self.contribution:
#query = query.join(common.Unit, common.UnitParameter)
return query.filter(common.UnitValue.contribution_pk == self.contribution.pk)
return query
def col_defs(self):
name_col = UnitValueNameCol(self, 'value')
if self.unitparameter and self.unitparameter.domain:
name_col.choices = sorted([de.name for de in self.unitparameter.domain])
return [
name_col,
LinkCol(self, 'unit', get_obj=lambda i: i.unit, model_col=common.Unit.name),
]
def toolbar(self):
return ''
| <filename>clld/web/datatables/unitvalue.py
from sqlalchemy.orm import joinedload
from clld.db.models import common
from clld.web.datatables.base import DataTable, LinkCol
class UnitValueNameCol(LinkCol):
def order(self):
return common.UnitDomainElement.id \
if self.dt.unitparameter and self.dt.unitparameter.domain \
else [common.UnitValue.name, common.UnitValue.id]
def search(self, qs):
if self.dt.unitparameter and self.dt.unitparameter.domain:
return common.UnitDomainElement.name.contains(qs)
return common.UnitValue.name.contains(qs)
class Unitvalues(DataTable):
__constraints__ = [common.UnitParameter, common.Contribution, common.Unit]
def base_query(self, query):
query = query\
.join(common.Unit)\
.outerjoin(common.UnitDomainElement)\
.options(joinedload(common.UnitValue.unit))
if self.unit:
#query = query.join(common.UnitParameter, common.Contribution)
return query.filter(common.UnitValue.unit_pk == self.unit.pk)
if self.unitparameter:
#query = query.join(common.Contribution, common.Unit)
return query.filter(
common.UnitValue.unitparameter_pk == self.unitparameter.pk)
if self.contribution:
#query = query.join(common.Unit, common.UnitParameter)
return query.filter(common.UnitValue.contribution_pk == self.contribution.pk)
return query
def col_defs(self):
name_col = UnitValueNameCol(self, 'value')
if self.unitparameter and self.unitparameter.domain:
name_col.choices = sorted([de.name for de in self.unitparameter.domain])
return [
name_col,
LinkCol(self, 'unit', get_obj=lambda i: i.unit, model_col=common.Unit.name),
]
def toolbar(self):
return ''
| zh | 0.077338 | #query = query.join(common.UnitParameter, common.Contribution) #query = query.join(common.Contribution, common.Unit) #query = query.join(common.Unit, common.UnitParameter) | 2.137566 | 2 |
scrape.py | Brok-Bucholtz/Grad-Job-Classification | 0 | 6632116 | <gh_stars>0
import csv
from os import path, makedirs
import ipgetter
import requests
from dateutil import parser
from pymongo import DESCENDING
from feature_extraction import degree_classification
def _update_array_fields(model, current_values, new_field_values):
"""
Update all array fields if they don't contain the new values
:param model: DB Base Model
:param current_values: Dictionary of current values for model
:param new_field_values: Dictionary of new values that should be in arrays
:return:
"""
update_array_fields = {}
for field, value in new_field_values.items():
if value not in current_values[field]:
update_array_fields[field] = value
if update_array_fields:
model.update_one({'_id': current_values['_id']}, {'$push': update_array_fields})
def _finish_processing(database, job):
"""
Finish processing scraped jobs
:param database: Database to update the job
:param job: Job to continue processing
:return:
"""
html_posting = requests.get(job['url']).content
database.jobs.update_one(
{'_id': job['_id']},
{'$set': {
'html_posting': html_posting,
'degree_classification': degree_classification(html_posting),
'finished_processing': True}})
def scrape_cities():
"""
Get list of cities in the United States with a population of at least 15,000
:return: Cities
"""
cities = []
cities_file_path = './submodule/world-cities/data/world-cities.csv'
cache_folder_path = './cache/'
cities_cache_filename = 'world-cities.csv'
if not path.exists(cache_folder_path):
makedirs(cache_folder_path)
if not path.exists(cache_folder_path + cities_cache_filename):
# Read raw city data
with open(cities_file_path) as file:
reader = csv.reader(file)
for row in reader:
if row[1] == 'United States':
cities.append(row[0] + ', ' + row[2])
# Cache formatted data
with open(cache_folder_path + cities_cache_filename, 'w+') as file:
writer = csv.writer(file)
for city in cities:
writer.writerow([city])
else:
# Read from cache
with open(cache_folder_path + cities_cache_filename) as file:
reader = csv.reader(file)
cities = [row[0] for row in reader]
return cities
def scrape_indeed(database, indeed_client, logger, job_title, locations):
"""
Scrape job data from indeed and save it to the database
:param database: Database to save the indeed data
:param indeed_client: Indeed API client
:param logger: Logger to log activity
:param job_title: Job title to search for
:param locations: Job locations to search for
:return:
"""
max_indeed_limit = 25
sample_max_city_name_length = 35
indeed_params = {
'q': job_title,
'limit': max_indeed_limit,
'latlong': 1,
'sort': 'date',
'userip': ipgetter.myip(),
'useragent': 'Python'
}
for location in locations:
# Using a dicts instead of a list will prevent from adding duplicates
new_jobs = {}
update_jobs = {}
result_start = 0
newest_job = database.jobs.find_one({'search_title': job_title, 'search_location': location},
sort=[('date', DESCENDING)])
indeed_response = indeed_client.search(**indeed_params, l=location, start=result_start)
if 'error' in indeed_response:
raise Exception('Indeed Error - {}'.format(indeed_response['error']))
jobs = indeed_response['results']
total_jobs = indeed_response['totalResults']
while result_start < total_jobs and\
(not newest_job or newest_job['date'] < parser.parse(jobs[0]['date']).timestamp()):
for job in jobs:
found_job = database.jobs.find_one({'jobkey': job['jobkey']})
if found_job:
update_jobs[found_job['jobkey']] = found_job
else:
job['search_location'] = [location]
job['search_title'] = [job_title]
job['date'] = parser.parse(job['date']).timestamp()
job['finished_processing'] = False
new_jobs[job['jobkey']] = job
result_start += indeed_params['limit']
jobs = indeed_client.search(**indeed_params, l=location, start=result_start)['results']
try:
if new_jobs:
debug_log_string = 'Scraped location {:<' + str(sample_max_city_name_length) + '} found {:>3} jobs.'
logger.debug(debug_log_string.format(location, len(new_jobs)))
database.jobs.insert_many(new_jobs.values())
for job_key, update_job in update_jobs.items():
_update_array_fields(
database.jobs,
update_job,
{'search_location': location, 'search_title': job_title})
except Exception as error:
logger.error('Updating db for search_location {} scrape data failed: {}'.format(location, error))
unprocessed_jobs = database.jobs.find({'finished_processing': False})
total_jobs = unprocessed_jobs.count()
for job_i, job in enumerate(unprocessed_jobs):
logger.debug('Processing job {:>3}/{:<3}'.format(job_i+1, total_jobs))
_finish_processing(database, job)
| import csv
from os import path, makedirs
import ipgetter
import requests
from dateutil import parser
from pymongo import DESCENDING
from feature_extraction import degree_classification
def _update_array_fields(model, current_values, new_field_values):
"""
Update all array fields if they don't contain the new values
:param model: DB Base Model
:param current_values: Dictionary of current values for model
:param new_field_values: Dictionary of new values that should be in arrays
:return:
"""
update_array_fields = {}
for field, value in new_field_values.items():
if value not in current_values[field]:
update_array_fields[field] = value
if update_array_fields:
model.update_one({'_id': current_values['_id']}, {'$push': update_array_fields})
def _finish_processing(database, job):
"""
Finish processing scraped jobs
:param database: Database to update the job
:param job: Job to continue processing
:return:
"""
html_posting = requests.get(job['url']).content
database.jobs.update_one(
{'_id': job['_id']},
{'$set': {
'html_posting': html_posting,
'degree_classification': degree_classification(html_posting),
'finished_processing': True}})
def scrape_cities():
"""
Get list of cities in the United States with a population of at least 15,000
:return: Cities
"""
cities = []
cities_file_path = './submodule/world-cities/data/world-cities.csv'
cache_folder_path = './cache/'
cities_cache_filename = 'world-cities.csv'
if not path.exists(cache_folder_path):
makedirs(cache_folder_path)
if not path.exists(cache_folder_path + cities_cache_filename):
# Read raw city data
with open(cities_file_path) as file:
reader = csv.reader(file)
for row in reader:
if row[1] == 'United States':
cities.append(row[0] + ', ' + row[2])
# Cache formatted data
with open(cache_folder_path + cities_cache_filename, 'w+') as file:
writer = csv.writer(file)
for city in cities:
writer.writerow([city])
else:
# Read from cache
with open(cache_folder_path + cities_cache_filename) as file:
reader = csv.reader(file)
cities = [row[0] for row in reader]
return cities
def scrape_indeed(database, indeed_client, logger, job_title, locations):
"""
Scrape job data from indeed and save it to the database
:param database: Database to save the indeed data
:param indeed_client: Indeed API client
:param logger: Logger to log activity
:param job_title: Job title to search for
:param locations: Job locations to search for
:return:
"""
max_indeed_limit = 25
sample_max_city_name_length = 35
indeed_params = {
'q': job_title,
'limit': max_indeed_limit,
'latlong': 1,
'sort': 'date',
'userip': ipgetter.myip(),
'useragent': 'Python'
}
for location in locations:
# Using a dicts instead of a list will prevent from adding duplicates
new_jobs = {}
update_jobs = {}
result_start = 0
newest_job = database.jobs.find_one({'search_title': job_title, 'search_location': location},
sort=[('date', DESCENDING)])
indeed_response = indeed_client.search(**indeed_params, l=location, start=result_start)
if 'error' in indeed_response:
raise Exception('Indeed Error - {}'.format(indeed_response['error']))
jobs = indeed_response['results']
total_jobs = indeed_response['totalResults']
while result_start < total_jobs and\
(not newest_job or newest_job['date'] < parser.parse(jobs[0]['date']).timestamp()):
for job in jobs:
found_job = database.jobs.find_one({'jobkey': job['jobkey']})
if found_job:
update_jobs[found_job['jobkey']] = found_job
else:
job['search_location'] = [location]
job['search_title'] = [job_title]
job['date'] = parser.parse(job['date']).timestamp()
job['finished_processing'] = False
new_jobs[job['jobkey']] = job
result_start += indeed_params['limit']
jobs = indeed_client.search(**indeed_params, l=location, start=result_start)['results']
try:
if new_jobs:
debug_log_string = 'Scraped location {:<' + str(sample_max_city_name_length) + '} found {:>3} jobs.'
logger.debug(debug_log_string.format(location, len(new_jobs)))
database.jobs.insert_many(new_jobs.values())
for job_key, update_job in update_jobs.items():
_update_array_fields(
database.jobs,
update_job,
{'search_location': location, 'search_title': job_title})
except Exception as error:
logger.error('Updating db for search_location {} scrape data failed: {}'.format(location, error))
unprocessed_jobs = database.jobs.find({'finished_processing': False})
total_jobs = unprocessed_jobs.count()
for job_i, job in enumerate(unprocessed_jobs):
logger.debug('Processing job {:>3}/{:<3}'.format(job_i+1, total_jobs))
_finish_processing(database, job) | en | 0.712809 | Update all array fields if they don't contain the new values :param model: DB Base Model :param current_values: Dictionary of current values for model :param new_field_values: Dictionary of new values that should be in arrays :return: Finish processing scraped jobs :param database: Database to update the job :param job: Job to continue processing :return: Get list of cities in the United States with a population of at least 15,000 :return: Cities # Read raw city data # Cache formatted data # Read from cache Scrape job data from indeed and save it to the database :param database: Database to save the indeed data :param indeed_client: Indeed API client :param logger: Logger to log activity :param job_title: Job title to search for :param locations: Job locations to search for :return: # Using a dicts instead of a list will prevent from adding duplicates | 3.00033 | 3 |
google/cloud/datastore_v1/types/datastore.py | Vincent-Weng/python-datastore | 0 | 6632117 | <reponame>Vincent-Weng/python-datastore
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
from google.cloud.datastore_v1.types import entity
from google.cloud.datastore_v1.types import query as gd_query
from google.protobuf import timestamp_pb2 # type: ignore
__protobuf__ = proto.module(
package="google.datastore.v1",
manifest={
"LookupRequest",
"LookupResponse",
"RunQueryRequest",
"RunQueryResponse",
"BeginTransactionRequest",
"BeginTransactionResponse",
"RollbackRequest",
"RollbackResponse",
"CommitRequest",
"CommitResponse",
"AllocateIdsRequest",
"AllocateIdsResponse",
"ReserveIdsRequest",
"ReserveIdsResponse",
"Mutation",
"MutationResult",
"ReadOptions",
"TransactionOptions",
},
)
class LookupRequest(proto.Message):
r"""The request for
[Datastore.Lookup][google.datastore.v1.Datastore.Lookup].
Attributes:
project_id (str):
Required. The ID of the project against which
to make the request.
read_options (google.cloud.datastore_v1.types.ReadOptions):
The options for this lookup request.
keys (Sequence[google.cloud.datastore_v1.types.Key]):
Required. Keys of entities to look up.
"""
project_id = proto.Field(
proto.STRING,
number=8,
)
read_options = proto.Field(
proto.MESSAGE,
number=1,
message="ReadOptions",
)
keys = proto.RepeatedField(
proto.MESSAGE,
number=3,
message=entity.Key,
)
class LookupResponse(proto.Message):
r"""The response for
[Datastore.Lookup][google.datastore.v1.Datastore.Lookup].
Attributes:
found (Sequence[google.cloud.datastore_v1.types.EntityResult]):
Entities found as ``ResultType.FULL`` entities. The order of
results in this field is undefined and has no relation to
the order of the keys in the input.
missing (Sequence[google.cloud.datastore_v1.types.EntityResult]):
Entities not found as ``ResultType.KEY_ONLY`` entities. The
order of results in this field is undefined and has no
relation to the order of the keys in the input.
deferred (Sequence[google.cloud.datastore_v1.types.Key]):
A list of keys that were not looked up due to
resource constraints. The order of results in
this field is undefined and has no relation to
the order of the keys in the input.
read_time (google.protobuf.timestamp_pb2.Timestamp):
The time at which these entities were read or
found missing.
"""
found = proto.RepeatedField(
proto.MESSAGE,
number=1,
message=gd_query.EntityResult,
)
missing = proto.RepeatedField(
proto.MESSAGE,
number=2,
message=gd_query.EntityResult,
)
deferred = proto.RepeatedField(
proto.MESSAGE,
number=3,
message=entity.Key,
)
read_time = proto.Field(
proto.MESSAGE,
number=7,
message=timestamp_pb2.Timestamp,
)
class RunQueryRequest(proto.Message):
r"""The request for
[Datastore.RunQuery][google.datastore.v1.Datastore.RunQuery].
This message has `oneof`_ fields (mutually exclusive fields).
For each oneof, at most one member field can be set at the same time.
Setting any member of the oneof automatically clears all other
members.
.. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields
Attributes:
project_id (str):
Required. The ID of the project against which
to make the request.
partition_id (google.cloud.datastore_v1.types.PartitionId):
Entities are partitioned into subsets,
identified by a partition ID. Queries are scoped
to a single partition. This partition ID is
normalized with the standard default context
partition ID.
read_options (google.cloud.datastore_v1.types.ReadOptions):
The options for this query.
query (google.cloud.datastore_v1.types.Query):
The query to run.
This field is a member of `oneof`_ ``query_type``.
gql_query (google.cloud.datastore_v1.types.GqlQuery):
The GQL query to run.
This field is a member of `oneof`_ ``query_type``.
"""
project_id = proto.Field(
proto.STRING,
number=8,
)
partition_id = proto.Field(
proto.MESSAGE,
number=2,
message=entity.PartitionId,
)
read_options = proto.Field(
proto.MESSAGE,
number=1,
message="ReadOptions",
)
query = proto.Field(
proto.MESSAGE,
number=3,
oneof="query_type",
message=gd_query.Query,
)
gql_query = proto.Field(
proto.MESSAGE,
number=7,
oneof="query_type",
message=gd_query.GqlQuery,
)
class RunQueryResponse(proto.Message):
r"""The response for
[Datastore.RunQuery][google.datastore.v1.Datastore.RunQuery].
Attributes:
batch (google.cloud.datastore_v1.types.QueryResultBatch):
A batch of query results (always present).
query (google.cloud.datastore_v1.types.Query):
The parsed form of the ``GqlQuery`` from the request, if it
was set.
"""
batch = proto.Field(
proto.MESSAGE,
number=1,
message=gd_query.QueryResultBatch,
)
query = proto.Field(
proto.MESSAGE,
number=2,
message=gd_query.Query,
)
class BeginTransactionRequest(proto.Message):
r"""The request for
[Datastore.BeginTransaction][google.datastore.v1.Datastore.BeginTransaction].
Attributes:
project_id (str):
Required. The ID of the project against which
to make the request.
transaction_options (google.cloud.datastore_v1.types.TransactionOptions):
Options for a new transaction.
"""
project_id = proto.Field(
proto.STRING,
number=8,
)
transaction_options = proto.Field(
proto.MESSAGE,
number=10,
message="TransactionOptions",
)
class BeginTransactionResponse(proto.Message):
r"""The response for
[Datastore.BeginTransaction][google.datastore.v1.Datastore.BeginTransaction].
Attributes:
transaction (bytes):
The transaction identifier (always present).
"""
transaction = proto.Field(
proto.BYTES,
number=1,
)
class RollbackRequest(proto.Message):
r"""The request for
[Datastore.Rollback][google.datastore.v1.Datastore.Rollback].
Attributes:
project_id (str):
Required. The ID of the project against which
to make the request.
transaction (bytes):
Required. The transaction identifier, returned by a call to
[Datastore.BeginTransaction][google.datastore.v1.Datastore.BeginTransaction].
"""
project_id = proto.Field(
proto.STRING,
number=8,
)
transaction = proto.Field(
proto.BYTES,
number=1,
)
class RollbackResponse(proto.Message):
r"""The response for
[Datastore.Rollback][google.datastore.v1.Datastore.Rollback]. (an
empty message).
"""
class CommitRequest(proto.Message):
r"""The request for
[Datastore.Commit][google.datastore.v1.Datastore.Commit].
.. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields
Attributes:
project_id (str):
Required. The ID of the project against which
to make the request.
mode (google.cloud.datastore_v1.types.CommitRequest.Mode):
The type of commit to perform. Defaults to
``TRANSACTIONAL``.
transaction (bytes):
The identifier of the transaction associated with the
commit. A transaction identifier is returned by a call to
[Datastore.BeginTransaction][google.datastore.v1.Datastore.BeginTransaction].
This field is a member of `oneof`_ ``transaction_selector``.
mutations (Sequence[google.cloud.datastore_v1.types.Mutation]):
The mutations to perform.
When mode is ``TRANSACTIONAL``, mutations affecting a single
entity are applied in order. The following sequences of
mutations affecting a single entity are not permitted in a
single ``Commit`` request:
- ``insert`` followed by ``insert``
- ``update`` followed by ``insert``
- ``upsert`` followed by ``insert``
- ``delete`` followed by ``update``
When mode is ``NON_TRANSACTIONAL``, no two mutations may
affect a single entity.
"""
class Mode(proto.Enum):
r"""The modes available for commits."""
MODE_UNSPECIFIED = 0
TRANSACTIONAL = 1
NON_TRANSACTIONAL = 2
project_id = proto.Field(
proto.STRING,
number=8,
)
mode = proto.Field(
proto.ENUM,
number=5,
enum=Mode,
)
transaction = proto.Field(
proto.BYTES,
number=1,
oneof="transaction_selector",
)
mutations = proto.RepeatedField(
proto.MESSAGE,
number=6,
message="Mutation",
)
class CommitResponse(proto.Message):
r"""The response for
[Datastore.Commit][google.datastore.v1.Datastore.Commit].
Attributes:
mutation_results (Sequence[google.cloud.datastore_v1.types.MutationResult]):
The result of performing the mutations.
The i-th mutation result corresponds to the i-th
mutation in the request.
index_updates (int):
The number of index entries updated during
the commit, or zero if none were updated.
commit_time (google.protobuf.timestamp_pb2.Timestamp):
The transaction commit timestamp. Not set for
non-transactional commits.
"""
mutation_results = proto.RepeatedField(
proto.MESSAGE,
number=3,
message="MutationResult",
)
index_updates = proto.Field(
proto.INT32,
number=4,
)
commit_time = proto.Field(
proto.MESSAGE,
number=8,
message=timestamp_pb2.Timestamp,
)
class AllocateIdsRequest(proto.Message):
r"""The request for
[Datastore.AllocateIds][google.datastore.v1.Datastore.AllocateIds].
Attributes:
project_id (str):
Required. The ID of the project against which
to make the request.
keys (Sequence[google.cloud.datastore_v1.types.Key]):
Required. A list of keys with incomplete key
paths for which to allocate IDs. No key may be
reserved/read-only.
"""
project_id = proto.Field(
proto.STRING,
number=8,
)
keys = proto.RepeatedField(
proto.MESSAGE,
number=1,
message=entity.Key,
)
class AllocateIdsResponse(proto.Message):
r"""The response for
[Datastore.AllocateIds][google.datastore.v1.Datastore.AllocateIds].
Attributes:
keys (Sequence[google.cloud.datastore_v1.types.Key]):
The keys specified in the request (in the
same order), each with its key path completed
with a newly allocated ID.
"""
keys = proto.RepeatedField(
proto.MESSAGE,
number=1,
message=entity.Key,
)
class ReserveIdsRequest(proto.Message):
r"""The request for
[Datastore.ReserveIds][google.datastore.v1.Datastore.ReserveIds].
Attributes:
project_id (str):
Required. The ID of the project against which
to make the request.
database_id (str):
If not empty, the ID of the database against
which to make the request.
keys (Sequence[google.cloud.datastore_v1.types.Key]):
Required. A list of keys with complete key
paths whose numeric IDs should not be
auto-allocated.
"""
project_id = proto.Field(
proto.STRING,
number=8,
)
database_id = proto.Field(
proto.STRING,
number=9,
)
keys = proto.RepeatedField(
proto.MESSAGE,
number=1,
message=entity.Key,
)
class ReserveIdsResponse(proto.Message):
r"""The response for
[Datastore.ReserveIds][google.datastore.v1.Datastore.ReserveIds].
"""
class Mutation(proto.Message):
r"""A mutation to apply to an entity.
This message has `oneof`_ fields (mutually exclusive fields).
For each oneof, at most one member field can be set at the same time.
Setting any member of the oneof automatically clears all other
members.
.. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields
Attributes:
insert (google.cloud.datastore_v1.types.Entity):
The entity to insert. The entity must not
already exist. The entity key's final path
element may be incomplete.
This field is a member of `oneof`_ ``operation``.
update (google.cloud.datastore_v1.types.Entity):
The entity to update. The entity must already
exist. Must have a complete key path.
This field is a member of `oneof`_ ``operation``.
upsert (google.cloud.datastore_v1.types.Entity):
The entity to upsert. The entity may or may
not already exist. The entity key's final path
element may be incomplete.
This field is a member of `oneof`_ ``operation``.
delete (google.cloud.datastore_v1.types.Key):
The key of the entity to delete. The entity
may or may not already exist. Must have a
complete key path and must not be
reserved/read-only.
This field is a member of `oneof`_ ``operation``.
base_version (int):
The version of the entity that this mutation
is being applied to. If this does not match the
current version on the server, the mutation
conflicts.
This field is a member of `oneof`_ ``conflict_detection_strategy``.
update_time (google.protobuf.timestamp_pb2.Timestamp):
The update time of the entity that this
mutation is being applied to. If this does not
match the current update time on the server, the
mutation conflicts.
This field is a member of `oneof`_ ``conflict_detection_strategy``.
"""
insert = proto.Field(
proto.MESSAGE,
number=4,
oneof="operation",
message=entity.Entity,
)
update = proto.Field(
proto.MESSAGE,
number=5,
oneof="operation",
message=entity.Entity,
)
upsert = proto.Field(
proto.MESSAGE,
number=6,
oneof="operation",
message=entity.Entity,
)
delete = proto.Field(
proto.MESSAGE,
number=7,
oneof="operation",
message=entity.Key,
)
base_version = proto.Field(
proto.INT64,
number=8,
oneof="conflict_detection_strategy",
)
update_time = proto.Field(
proto.MESSAGE,
number=11,
oneof="conflict_detection_strategy",
message=timestamp_pb2.Timestamp,
)
class MutationResult(proto.Message):
r"""The result of applying a mutation.
Attributes:
key (google.cloud.datastore_v1.types.Key):
The automatically allocated key.
Set only when the mutation allocated a key.
version (int):
The version of the entity on the server after
processing the mutation. If the mutation doesn't
change anything on the server, then the version
will be the version of the current entity or, if
no entity is present, a version that is strictly
greater than the version of any previous entity
and less than the version of any possible future
entity.
update_time (google.protobuf.timestamp_pb2.Timestamp):
The update time of the entity on the server
after processing the mutation. If the mutation
doesn't change anything on the server, then the
timestamp will be the update timestamp of the
current entity. This field will not be set after
a 'delete'.
conflict_detected (bool):
Whether a conflict was detected for this
mutation. Always false when a conflict detection
strategy field is not set in the mutation.
"""
key = proto.Field(
proto.MESSAGE,
number=3,
message=entity.Key,
)
version = proto.Field(
proto.INT64,
number=4,
)
update_time = proto.Field(
proto.MESSAGE,
number=6,
message=timestamp_pb2.Timestamp,
)
conflict_detected = proto.Field(
proto.BOOL,
number=5,
)
class ReadOptions(proto.Message):
r"""The options shared by read requests.
This message has `oneof`_ fields (mutually exclusive fields).
For each oneof, at most one member field can be set at the same time.
Setting any member of the oneof automatically clears all other
members.
.. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields
Attributes:
read_consistency (google.cloud.datastore_v1.types.ReadOptions.ReadConsistency):
The non-transactional read consistency to use. Cannot be set
to ``STRONG`` for global queries.
This field is a member of `oneof`_ ``consistency_type``.
transaction (bytes):
The identifier of the transaction in which to read. A
transaction identifier is returned by a call to
[Datastore.BeginTransaction][google.datastore.v1.Datastore.BeginTransaction].
This field is a member of `oneof`_ ``consistency_type``.
read_time (google.protobuf.timestamp_pb2.Timestamp):
Reads entities as they were at the given
time. This may not be older than 270 seconds.
This value is only supported for Cloud Firestore
in Datastore mode.
This field is a member of `oneof`_ ``consistency_type``.
"""
class ReadConsistency(proto.Enum):
r"""The possible values for read consistencies."""
READ_CONSISTENCY_UNSPECIFIED = 0
STRONG = 1
EVENTUAL = 2
read_consistency = proto.Field(
proto.ENUM,
number=1,
oneof="consistency_type",
enum=ReadConsistency,
)
transaction = proto.Field(
proto.BYTES,
number=2,
oneof="consistency_type",
)
read_time = proto.Field(
proto.MESSAGE,
number=4,
oneof="consistency_type",
message=timestamp_pb2.Timestamp,
)
class TransactionOptions(proto.Message):
r"""Options for beginning a new transaction.
Transactions can be created explicitly with calls to
[Datastore.BeginTransaction][google.datastore.v1.Datastore.BeginTransaction]
or implicitly by setting
[ReadOptions.new_transaction][google.datastore.v1.ReadOptions.new_transaction]
in read requests.
This message has `oneof`_ fields (mutually exclusive fields).
For each oneof, at most one member field can be set at the same time.
Setting any member of the oneof automatically clears all other
members.
.. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields
Attributes:
read_write (google.cloud.datastore_v1.types.TransactionOptions.ReadWrite):
The transaction should allow both reads and
writes.
This field is a member of `oneof`_ ``mode``.
read_only (google.cloud.datastore_v1.types.TransactionOptions.ReadOnly):
The transaction should only allow reads.
This field is a member of `oneof`_ ``mode``.
"""
class ReadWrite(proto.Message):
r"""Options specific to read / write transactions.
Attributes:
previous_transaction (bytes):
The transaction identifier of the transaction
being retried.
"""
previous_transaction = proto.Field(
proto.BYTES,
number=1,
)
class ReadOnly(proto.Message):
r"""Options specific to read-only transactions.
Attributes:
read_time (google.protobuf.timestamp_pb2.Timestamp):
Reads entities at the given time.
This may not be older than 60 seconds.
"""
read_time = proto.Field(
proto.MESSAGE,
number=1,
message=timestamp_pb2.Timestamp,
)
read_write = proto.Field(
proto.MESSAGE,
number=1,
oneof="mode",
message=ReadWrite,
)
read_only = proto.Field(
proto.MESSAGE,
number=2,
oneof="mode",
message=ReadOnly,
)
__all__ = tuple(sorted(__protobuf__.manifest))
| # -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
from google.cloud.datastore_v1.types import entity
from google.cloud.datastore_v1.types import query as gd_query
from google.protobuf import timestamp_pb2 # type: ignore
__protobuf__ = proto.module(
package="google.datastore.v1",
manifest={
"LookupRequest",
"LookupResponse",
"RunQueryRequest",
"RunQueryResponse",
"BeginTransactionRequest",
"BeginTransactionResponse",
"RollbackRequest",
"RollbackResponse",
"CommitRequest",
"CommitResponse",
"AllocateIdsRequest",
"AllocateIdsResponse",
"ReserveIdsRequest",
"ReserveIdsResponse",
"Mutation",
"MutationResult",
"ReadOptions",
"TransactionOptions",
},
)
class LookupRequest(proto.Message):
r"""The request for
[Datastore.Lookup][google.datastore.v1.Datastore.Lookup].
Attributes:
project_id (str):
Required. The ID of the project against which
to make the request.
read_options (google.cloud.datastore_v1.types.ReadOptions):
The options for this lookup request.
keys (Sequence[google.cloud.datastore_v1.types.Key]):
Required. Keys of entities to look up.
"""
project_id = proto.Field(
proto.STRING,
number=8,
)
read_options = proto.Field(
proto.MESSAGE,
number=1,
message="ReadOptions",
)
keys = proto.RepeatedField(
proto.MESSAGE,
number=3,
message=entity.Key,
)
class LookupResponse(proto.Message):
r"""The response for
[Datastore.Lookup][google.datastore.v1.Datastore.Lookup].
Attributes:
found (Sequence[google.cloud.datastore_v1.types.EntityResult]):
Entities found as ``ResultType.FULL`` entities. The order of
results in this field is undefined and has no relation to
the order of the keys in the input.
missing (Sequence[google.cloud.datastore_v1.types.EntityResult]):
Entities not found as ``ResultType.KEY_ONLY`` entities. The
order of results in this field is undefined and has no
relation to the order of the keys in the input.
deferred (Sequence[google.cloud.datastore_v1.types.Key]):
A list of keys that were not looked up due to
resource constraints. The order of results in
this field is undefined and has no relation to
the order of the keys in the input.
read_time (google.protobuf.timestamp_pb2.Timestamp):
The time at which these entities were read or
found missing.
"""
found = proto.RepeatedField(
proto.MESSAGE,
number=1,
message=gd_query.EntityResult,
)
missing = proto.RepeatedField(
proto.MESSAGE,
number=2,
message=gd_query.EntityResult,
)
deferred = proto.RepeatedField(
proto.MESSAGE,
number=3,
message=entity.Key,
)
read_time = proto.Field(
proto.MESSAGE,
number=7,
message=timestamp_pb2.Timestamp,
)
class RunQueryRequest(proto.Message):
r"""The request for
[Datastore.RunQuery][google.datastore.v1.Datastore.RunQuery].
This message has `oneof`_ fields (mutually exclusive fields).
For each oneof, at most one member field can be set at the same time.
Setting any member of the oneof automatically clears all other
members.
.. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields
Attributes:
project_id (str):
Required. The ID of the project against which
to make the request.
partition_id (google.cloud.datastore_v1.types.PartitionId):
Entities are partitioned into subsets,
identified by a partition ID. Queries are scoped
to a single partition. This partition ID is
normalized with the standard default context
partition ID.
read_options (google.cloud.datastore_v1.types.ReadOptions):
The options for this query.
query (google.cloud.datastore_v1.types.Query):
The query to run.
This field is a member of `oneof`_ ``query_type``.
gql_query (google.cloud.datastore_v1.types.GqlQuery):
The GQL query to run.
This field is a member of `oneof`_ ``query_type``.
"""
project_id = proto.Field(
proto.STRING,
number=8,
)
partition_id = proto.Field(
proto.MESSAGE,
number=2,
message=entity.PartitionId,
)
read_options = proto.Field(
proto.MESSAGE,
number=1,
message="ReadOptions",
)
query = proto.Field(
proto.MESSAGE,
number=3,
oneof="query_type",
message=gd_query.Query,
)
gql_query = proto.Field(
proto.MESSAGE,
number=7,
oneof="query_type",
message=gd_query.GqlQuery,
)
class RunQueryResponse(proto.Message):
r"""The response for
[Datastore.RunQuery][google.datastore.v1.Datastore.RunQuery].
Attributes:
batch (google.cloud.datastore_v1.types.QueryResultBatch):
A batch of query results (always present).
query (google.cloud.datastore_v1.types.Query):
The parsed form of the ``GqlQuery`` from the request, if it
was set.
"""
batch = proto.Field(
proto.MESSAGE,
number=1,
message=gd_query.QueryResultBatch,
)
query = proto.Field(
proto.MESSAGE,
number=2,
message=gd_query.Query,
)
class BeginTransactionRequest(proto.Message):
r"""The request for
[Datastore.BeginTransaction][google.datastore.v1.Datastore.BeginTransaction].
Attributes:
project_id (str):
Required. The ID of the project against which
to make the request.
transaction_options (google.cloud.datastore_v1.types.TransactionOptions):
Options for a new transaction.
"""
project_id = proto.Field(
proto.STRING,
number=8,
)
transaction_options = proto.Field(
proto.MESSAGE,
number=10,
message="TransactionOptions",
)
class BeginTransactionResponse(proto.Message):
r"""The response for
[Datastore.BeginTransaction][google.datastore.v1.Datastore.BeginTransaction].
Attributes:
transaction (bytes):
The transaction identifier (always present).
"""
transaction = proto.Field(
proto.BYTES,
number=1,
)
class RollbackRequest(proto.Message):
r"""The request for
[Datastore.Rollback][google.datastore.v1.Datastore.Rollback].
Attributes:
project_id (str):
Required. The ID of the project against which
to make the request.
transaction (bytes):
Required. The transaction identifier, returned by a call to
[Datastore.BeginTransaction][google.datastore.v1.Datastore.BeginTransaction].
"""
project_id = proto.Field(
proto.STRING,
number=8,
)
transaction = proto.Field(
proto.BYTES,
number=1,
)
class RollbackResponse(proto.Message):
r"""The response for
[Datastore.Rollback][google.datastore.v1.Datastore.Rollback]. (an
empty message).
"""
class CommitRequest(proto.Message):
r"""The request for
[Datastore.Commit][google.datastore.v1.Datastore.Commit].
.. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields
Attributes:
project_id (str):
Required. The ID of the project against which
to make the request.
mode (google.cloud.datastore_v1.types.CommitRequest.Mode):
The type of commit to perform. Defaults to
``TRANSACTIONAL``.
transaction (bytes):
The identifier of the transaction associated with the
commit. A transaction identifier is returned by a call to
[Datastore.BeginTransaction][google.datastore.v1.Datastore.BeginTransaction].
This field is a member of `oneof`_ ``transaction_selector``.
mutations (Sequence[google.cloud.datastore_v1.types.Mutation]):
The mutations to perform.
When mode is ``TRANSACTIONAL``, mutations affecting a single
entity are applied in order. The following sequences of
mutations affecting a single entity are not permitted in a
single ``Commit`` request:
- ``insert`` followed by ``insert``
- ``update`` followed by ``insert``
- ``upsert`` followed by ``insert``
- ``delete`` followed by ``update``
When mode is ``NON_TRANSACTIONAL``, no two mutations may
affect a single entity.
"""
class Mode(proto.Enum):
r"""The modes available for commits."""
MODE_UNSPECIFIED = 0
TRANSACTIONAL = 1
NON_TRANSACTIONAL = 2
project_id = proto.Field(
proto.STRING,
number=8,
)
mode = proto.Field(
proto.ENUM,
number=5,
enum=Mode,
)
transaction = proto.Field(
proto.BYTES,
number=1,
oneof="transaction_selector",
)
mutations = proto.RepeatedField(
proto.MESSAGE,
number=6,
message="Mutation",
)
class CommitResponse(proto.Message):
r"""The response for
[Datastore.Commit][google.datastore.v1.Datastore.Commit].
Attributes:
mutation_results (Sequence[google.cloud.datastore_v1.types.MutationResult]):
The result of performing the mutations.
The i-th mutation result corresponds to the i-th
mutation in the request.
index_updates (int):
The number of index entries updated during
the commit, or zero if none were updated.
commit_time (google.protobuf.timestamp_pb2.Timestamp):
The transaction commit timestamp. Not set for
non-transactional commits.
"""
mutation_results = proto.RepeatedField(
proto.MESSAGE,
number=3,
message="MutationResult",
)
index_updates = proto.Field(
proto.INT32,
number=4,
)
commit_time = proto.Field(
proto.MESSAGE,
number=8,
message=timestamp_pb2.Timestamp,
)
class AllocateIdsRequest(proto.Message):
r"""The request for
[Datastore.AllocateIds][google.datastore.v1.Datastore.AllocateIds].
Attributes:
project_id (str):
Required. The ID of the project against which
to make the request.
keys (Sequence[google.cloud.datastore_v1.types.Key]):
Required. A list of keys with incomplete key
paths for which to allocate IDs. No key may be
reserved/read-only.
"""
project_id = proto.Field(
proto.STRING,
number=8,
)
keys = proto.RepeatedField(
proto.MESSAGE,
number=1,
message=entity.Key,
)
class AllocateIdsResponse(proto.Message):
r"""The response for
[Datastore.AllocateIds][google.datastore.v1.Datastore.AllocateIds].
Attributes:
keys (Sequence[google.cloud.datastore_v1.types.Key]):
The keys specified in the request (in the
same order), each with its key path completed
with a newly allocated ID.
"""
keys = proto.RepeatedField(
proto.MESSAGE,
number=1,
message=entity.Key,
)
class ReserveIdsRequest(proto.Message):
r"""The request for
[Datastore.ReserveIds][google.datastore.v1.Datastore.ReserveIds].
Attributes:
project_id (str):
Required. The ID of the project against which
to make the request.
database_id (str):
If not empty, the ID of the database against
which to make the request.
keys (Sequence[google.cloud.datastore_v1.types.Key]):
Required. A list of keys with complete key
paths whose numeric IDs should not be
auto-allocated.
"""
project_id = proto.Field(
proto.STRING,
number=8,
)
database_id = proto.Field(
proto.STRING,
number=9,
)
keys = proto.RepeatedField(
proto.MESSAGE,
number=1,
message=entity.Key,
)
class ReserveIdsResponse(proto.Message):
r"""The response for
[Datastore.ReserveIds][google.datastore.v1.Datastore.ReserveIds].
"""
class Mutation(proto.Message):
r"""A mutation to apply to an entity.
This message has `oneof`_ fields (mutually exclusive fields).
For each oneof, at most one member field can be set at the same time.
Setting any member of the oneof automatically clears all other
members.
.. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields
Attributes:
insert (google.cloud.datastore_v1.types.Entity):
The entity to insert. The entity must not
already exist. The entity key's final path
element may be incomplete.
This field is a member of `oneof`_ ``operation``.
update (google.cloud.datastore_v1.types.Entity):
The entity to update. The entity must already
exist. Must have a complete key path.
This field is a member of `oneof`_ ``operation``.
upsert (google.cloud.datastore_v1.types.Entity):
The entity to upsert. The entity may or may
not already exist. The entity key's final path
element may be incomplete.
This field is a member of `oneof`_ ``operation``.
delete (google.cloud.datastore_v1.types.Key):
The key of the entity to delete. The entity
may or may not already exist. Must have a
complete key path and must not be
reserved/read-only.
This field is a member of `oneof`_ ``operation``.
base_version (int):
The version of the entity that this mutation
is being applied to. If this does not match the
current version on the server, the mutation
conflicts.
This field is a member of `oneof`_ ``conflict_detection_strategy``.
update_time (google.protobuf.timestamp_pb2.Timestamp):
The update time of the entity that this
mutation is being applied to. If this does not
match the current update time on the server, the
mutation conflicts.
This field is a member of `oneof`_ ``conflict_detection_strategy``.
"""
insert = proto.Field(
proto.MESSAGE,
number=4,
oneof="operation",
message=entity.Entity,
)
update = proto.Field(
proto.MESSAGE,
number=5,
oneof="operation",
message=entity.Entity,
)
upsert = proto.Field(
proto.MESSAGE,
number=6,
oneof="operation",
message=entity.Entity,
)
delete = proto.Field(
proto.MESSAGE,
number=7,
oneof="operation",
message=entity.Key,
)
base_version = proto.Field(
proto.INT64,
number=8,
oneof="conflict_detection_strategy",
)
update_time = proto.Field(
proto.MESSAGE,
number=11,
oneof="conflict_detection_strategy",
message=timestamp_pb2.Timestamp,
)
class MutationResult(proto.Message):
r"""The result of applying a mutation.
Attributes:
key (google.cloud.datastore_v1.types.Key):
The automatically allocated key.
Set only when the mutation allocated a key.
version (int):
The version of the entity on the server after
processing the mutation. If the mutation doesn't
change anything on the server, then the version
will be the version of the current entity or, if
no entity is present, a version that is strictly
greater than the version of any previous entity
and less than the version of any possible future
entity.
update_time (google.protobuf.timestamp_pb2.Timestamp):
The update time of the entity on the server
after processing the mutation. If the mutation
doesn't change anything on the server, then the
timestamp will be the update timestamp of the
current entity. This field will not be set after
a 'delete'.
conflict_detected (bool):
Whether a conflict was detected for this
mutation. Always false when a conflict detection
strategy field is not set in the mutation.
"""
key = proto.Field(
proto.MESSAGE,
number=3,
message=entity.Key,
)
version = proto.Field(
proto.INT64,
number=4,
)
update_time = proto.Field(
proto.MESSAGE,
number=6,
message=timestamp_pb2.Timestamp,
)
conflict_detected = proto.Field(
proto.BOOL,
number=5,
)
class ReadOptions(proto.Message):
r"""The options shared by read requests.
This message has `oneof`_ fields (mutually exclusive fields).
For each oneof, at most one member field can be set at the same time.
Setting any member of the oneof automatically clears all other
members.
.. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields
Attributes:
read_consistency (google.cloud.datastore_v1.types.ReadOptions.ReadConsistency):
The non-transactional read consistency to use. Cannot be set
to ``STRONG`` for global queries.
This field is a member of `oneof`_ ``consistency_type``.
transaction (bytes):
The identifier of the transaction in which to read. A
transaction identifier is returned by a call to
[Datastore.BeginTransaction][google.datastore.v1.Datastore.BeginTransaction].
This field is a member of `oneof`_ ``consistency_type``.
read_time (google.protobuf.timestamp_pb2.Timestamp):
Reads entities as they were at the given
time. This may not be older than 270 seconds.
This value is only supported for Cloud Firestore
in Datastore mode.
This field is a member of `oneof`_ ``consistency_type``.
"""
class ReadConsistency(proto.Enum):
r"""The possible values for read consistencies."""
READ_CONSISTENCY_UNSPECIFIED = 0
STRONG = 1
EVENTUAL = 2
read_consistency = proto.Field(
proto.ENUM,
number=1,
oneof="consistency_type",
enum=ReadConsistency,
)
transaction = proto.Field(
proto.BYTES,
number=2,
oneof="consistency_type",
)
read_time = proto.Field(
proto.MESSAGE,
number=4,
oneof="consistency_type",
message=timestamp_pb2.Timestamp,
)
class TransactionOptions(proto.Message):
r"""Options for beginning a new transaction.
Transactions can be created explicitly with calls to
[Datastore.BeginTransaction][google.datastore.v1.Datastore.BeginTransaction]
or implicitly by setting
[ReadOptions.new_transaction][google.datastore.v1.ReadOptions.new_transaction]
in read requests.
This message has `oneof`_ fields (mutually exclusive fields).
For each oneof, at most one member field can be set at the same time.
Setting any member of the oneof automatically clears all other
members.
.. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields
Attributes:
read_write (google.cloud.datastore_v1.types.TransactionOptions.ReadWrite):
The transaction should allow both reads and
writes.
This field is a member of `oneof`_ ``mode``.
read_only (google.cloud.datastore_v1.types.TransactionOptions.ReadOnly):
The transaction should only allow reads.
This field is a member of `oneof`_ ``mode``.
"""
class ReadWrite(proto.Message):
r"""Options specific to read / write transactions.
Attributes:
previous_transaction (bytes):
The transaction identifier of the transaction
being retried.
"""
previous_transaction = proto.Field(
proto.BYTES,
number=1,
)
class ReadOnly(proto.Message):
r"""Options specific to read-only transactions.
Attributes:
read_time (google.protobuf.timestamp_pb2.Timestamp):
Reads entities at the given time.
This may not be older than 60 seconds.
"""
read_time = proto.Field(
proto.MESSAGE,
number=1,
message=timestamp_pb2.Timestamp,
)
read_write = proto.Field(
proto.MESSAGE,
number=1,
oneof="mode",
message=ReadWrite,
)
read_only = proto.Field(
proto.MESSAGE,
number=2,
oneof="mode",
message=ReadOnly,
)
__all__ = tuple(sorted(__protobuf__.manifest)) | en | 0.790217 | # -*- coding: utf-8 -*- # Copyright 2022 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # type: ignore # type: ignore The request for [Datastore.Lookup][google.datastore.v1.Datastore.Lookup]. Attributes: project_id (str): Required. The ID of the project against which to make the request. read_options (google.cloud.datastore_v1.types.ReadOptions): The options for this lookup request. keys (Sequence[google.cloud.datastore_v1.types.Key]): Required. Keys of entities to look up. The response for [Datastore.Lookup][google.datastore.v1.Datastore.Lookup]. Attributes: found (Sequence[google.cloud.datastore_v1.types.EntityResult]): Entities found as ``ResultType.FULL`` entities. The order of results in this field is undefined and has no relation to the order of the keys in the input. missing (Sequence[google.cloud.datastore_v1.types.EntityResult]): Entities not found as ``ResultType.KEY_ONLY`` entities. The order of results in this field is undefined and has no relation to the order of the keys in the input. deferred (Sequence[google.cloud.datastore_v1.types.Key]): A list of keys that were not looked up due to resource constraints. The order of results in this field is undefined and has no relation to the order of the keys in the input. read_time (google.protobuf.timestamp_pb2.Timestamp): The time at which these entities were read or found missing. The request for [Datastore.RunQuery][google.datastore.v1.Datastore.RunQuery]. This message has `oneof`_ fields (mutually exclusive fields). For each oneof, at most one member field can be set at the same time. Setting any member of the oneof automatically clears all other members. .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields Attributes: project_id (str): Required. The ID of the project against which to make the request. partition_id (google.cloud.datastore_v1.types.PartitionId): Entities are partitioned into subsets, identified by a partition ID. Queries are scoped to a single partition. This partition ID is normalized with the standard default context partition ID. read_options (google.cloud.datastore_v1.types.ReadOptions): The options for this query. query (google.cloud.datastore_v1.types.Query): The query to run. This field is a member of `oneof`_ ``query_type``. gql_query (google.cloud.datastore_v1.types.GqlQuery): The GQL query to run. This field is a member of `oneof`_ ``query_type``. The response for [Datastore.RunQuery][google.datastore.v1.Datastore.RunQuery]. Attributes: batch (google.cloud.datastore_v1.types.QueryResultBatch): A batch of query results (always present). query (google.cloud.datastore_v1.types.Query): The parsed form of the ``GqlQuery`` from the request, if it was set. The request for [Datastore.BeginTransaction][google.datastore.v1.Datastore.BeginTransaction]. Attributes: project_id (str): Required. The ID of the project against which to make the request. transaction_options (google.cloud.datastore_v1.types.TransactionOptions): Options for a new transaction. The response for [Datastore.BeginTransaction][google.datastore.v1.Datastore.BeginTransaction]. Attributes: transaction (bytes): The transaction identifier (always present). The request for [Datastore.Rollback][google.datastore.v1.Datastore.Rollback]. Attributes: project_id (str): Required. The ID of the project against which to make the request. transaction (bytes): Required. The transaction identifier, returned by a call to [Datastore.BeginTransaction][google.datastore.v1.Datastore.BeginTransaction]. The response for [Datastore.Rollback][google.datastore.v1.Datastore.Rollback]. (an empty message). The request for [Datastore.Commit][google.datastore.v1.Datastore.Commit]. .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields Attributes: project_id (str): Required. The ID of the project against which to make the request. mode (google.cloud.datastore_v1.types.CommitRequest.Mode): The type of commit to perform. Defaults to ``TRANSACTIONAL``. transaction (bytes): The identifier of the transaction associated with the commit. A transaction identifier is returned by a call to [Datastore.BeginTransaction][google.datastore.v1.Datastore.BeginTransaction]. This field is a member of `oneof`_ ``transaction_selector``. mutations (Sequence[google.cloud.datastore_v1.types.Mutation]): The mutations to perform. When mode is ``TRANSACTIONAL``, mutations affecting a single entity are applied in order. The following sequences of mutations affecting a single entity are not permitted in a single ``Commit`` request: - ``insert`` followed by ``insert`` - ``update`` followed by ``insert`` - ``upsert`` followed by ``insert`` - ``delete`` followed by ``update`` When mode is ``NON_TRANSACTIONAL``, no two mutations may affect a single entity. The modes available for commits. The response for [Datastore.Commit][google.datastore.v1.Datastore.Commit]. Attributes: mutation_results (Sequence[google.cloud.datastore_v1.types.MutationResult]): The result of performing the mutations. The i-th mutation result corresponds to the i-th mutation in the request. index_updates (int): The number of index entries updated during the commit, or zero if none were updated. commit_time (google.protobuf.timestamp_pb2.Timestamp): The transaction commit timestamp. Not set for non-transactional commits. The request for [Datastore.AllocateIds][google.datastore.v1.Datastore.AllocateIds]. Attributes: project_id (str): Required. The ID of the project against which to make the request. keys (Sequence[google.cloud.datastore_v1.types.Key]): Required. A list of keys with incomplete key paths for which to allocate IDs. No key may be reserved/read-only. The response for [Datastore.AllocateIds][google.datastore.v1.Datastore.AllocateIds]. Attributes: keys (Sequence[google.cloud.datastore_v1.types.Key]): The keys specified in the request (in the same order), each with its key path completed with a newly allocated ID. The request for [Datastore.ReserveIds][google.datastore.v1.Datastore.ReserveIds]. Attributes: project_id (str): Required. The ID of the project against which to make the request. database_id (str): If not empty, the ID of the database against which to make the request. keys (Sequence[google.cloud.datastore_v1.types.Key]): Required. A list of keys with complete key paths whose numeric IDs should not be auto-allocated. The response for [Datastore.ReserveIds][google.datastore.v1.Datastore.ReserveIds]. A mutation to apply to an entity. This message has `oneof`_ fields (mutually exclusive fields). For each oneof, at most one member field can be set at the same time. Setting any member of the oneof automatically clears all other members. .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields Attributes: insert (google.cloud.datastore_v1.types.Entity): The entity to insert. The entity must not already exist. The entity key's final path element may be incomplete. This field is a member of `oneof`_ ``operation``. update (google.cloud.datastore_v1.types.Entity): The entity to update. The entity must already exist. Must have a complete key path. This field is a member of `oneof`_ ``operation``. upsert (google.cloud.datastore_v1.types.Entity): The entity to upsert. The entity may or may not already exist. The entity key's final path element may be incomplete. This field is a member of `oneof`_ ``operation``. delete (google.cloud.datastore_v1.types.Key): The key of the entity to delete. The entity may or may not already exist. Must have a complete key path and must not be reserved/read-only. This field is a member of `oneof`_ ``operation``. base_version (int): The version of the entity that this mutation is being applied to. If this does not match the current version on the server, the mutation conflicts. This field is a member of `oneof`_ ``conflict_detection_strategy``. update_time (google.protobuf.timestamp_pb2.Timestamp): The update time of the entity that this mutation is being applied to. If this does not match the current update time on the server, the mutation conflicts. This field is a member of `oneof`_ ``conflict_detection_strategy``. The result of applying a mutation. Attributes: key (google.cloud.datastore_v1.types.Key): The automatically allocated key. Set only when the mutation allocated a key. version (int): The version of the entity on the server after processing the mutation. If the mutation doesn't change anything on the server, then the version will be the version of the current entity or, if no entity is present, a version that is strictly greater than the version of any previous entity and less than the version of any possible future entity. update_time (google.protobuf.timestamp_pb2.Timestamp): The update time of the entity on the server after processing the mutation. If the mutation doesn't change anything on the server, then the timestamp will be the update timestamp of the current entity. This field will not be set after a 'delete'. conflict_detected (bool): Whether a conflict was detected for this mutation. Always false when a conflict detection strategy field is not set in the mutation. The options shared by read requests. This message has `oneof`_ fields (mutually exclusive fields). For each oneof, at most one member field can be set at the same time. Setting any member of the oneof automatically clears all other members. .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields Attributes: read_consistency (google.cloud.datastore_v1.types.ReadOptions.ReadConsistency): The non-transactional read consistency to use. Cannot be set to ``STRONG`` for global queries. This field is a member of `oneof`_ ``consistency_type``. transaction (bytes): The identifier of the transaction in which to read. A transaction identifier is returned by a call to [Datastore.BeginTransaction][google.datastore.v1.Datastore.BeginTransaction]. This field is a member of `oneof`_ ``consistency_type``. read_time (google.protobuf.timestamp_pb2.Timestamp): Reads entities as they were at the given time. This may not be older than 270 seconds. This value is only supported for Cloud Firestore in Datastore mode. This field is a member of `oneof`_ ``consistency_type``. The possible values for read consistencies. Options for beginning a new transaction. Transactions can be created explicitly with calls to [Datastore.BeginTransaction][google.datastore.v1.Datastore.BeginTransaction] or implicitly by setting [ReadOptions.new_transaction][google.datastore.v1.ReadOptions.new_transaction] in read requests. This message has `oneof`_ fields (mutually exclusive fields). For each oneof, at most one member field can be set at the same time. Setting any member of the oneof automatically clears all other members. .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields Attributes: read_write (google.cloud.datastore_v1.types.TransactionOptions.ReadWrite): The transaction should allow both reads and writes. This field is a member of `oneof`_ ``mode``. read_only (google.cloud.datastore_v1.types.TransactionOptions.ReadOnly): The transaction should only allow reads. This field is a member of `oneof`_ ``mode``. Options specific to read / write transactions. Attributes: previous_transaction (bytes): The transaction identifier of the transaction being retried. Options specific to read-only transactions. Attributes: read_time (google.protobuf.timestamp_pb2.Timestamp): Reads entities at the given time. This may not be older than 60 seconds. | 1.710338 | 2 |
initialisers/database.py | PeriGK/MIL | 0 | 6632118 | <reponame>PeriGK/MIL
import logging
def initialise_database_connection(database_conf):
logging.info('Initialising database') | import logging
def initialise_database_connection(database_conf):
logging.info('Initialising database') | none | 1 | 1.646313 | 2 |
|
tests/dag/test_dagclients.py | MarcoJHB/ploomber | 2,141 | 6632119 | from pathlib import Path
from unittest.mock import Mock
import pytest
from ploomber.dag.dagclients import DAGClients
from ploomber.tasks import SQLScript
from ploomber.products import File
from ploomber.util.dotted_path import DottedPath
from ploomber.clients import LocalStorageClient
def test_error_if_setting_invalid_key():
clients = DAGClients()
with pytest.raises(ValueError) as excinfo:
clients[object] = Mock()
assert ('DAG client keys must be Tasks or '
'Products, value <class \'object\'> is not') == str(excinfo.value)
def test_iter():
assert list(DAGClients()) == []
def test_setitem_and_getitem_with_str():
clients = DAGClients()
mock = Mock()
clients['SQLScript'] = mock
assert clients[SQLScript] is mock
assert clients['SQLScript'] is mock
def test_error_setitem_invalid_str():
clients = DAGClients()
mock = Mock()
with pytest.raises(ValueError) as excinfo:
clients['invalid_name'] = mock
expected = (f"Could not set DAG-level client {mock!r}. 'invalid_name' "
"is not a valid Task or Product class name")
assert str(excinfo.value) == expected
@pytest.mark.parametrize('typo, expected', [
['sqlscript', 'SQLScript'],
['SQLSCRIPT', 'SQLScript'],
['sql_script', 'SQLScript'],
['sql-script', 'SQLScript'],
['sql script', 'SQLScript'],
['file', 'File'],
])
def test_error_setitem_invalid_str_with_typo(typo, expected):
clients = DAGClients()
mock = Mock()
with pytest.raises(ValueError) as excinfo:
clients[typo] = mock
assert f"Did you mean {expected!r}?" in str(excinfo.value)
@pytest.mark.parametrize('typo, expected, class_', [
['sqlscript', 'SQLScript', SQLScript],
['SQLSCRIPT', 'SQLScript', SQLScript],
['sql_script', 'SQLScript', SQLScript],
['sql-script', 'SQLScript', SQLScript],
['sql script', 'SQLScript', SQLScript],
['file', 'File', File],
])
def test_error_getitem_invalid_str_with_typo(typo, expected, class_):
clients = DAGClients()
mock = Mock()
clients[class_] = mock
with pytest.raises(KeyError) as excinfo:
clients[typo]
expected = f"{typo!r}. Did you mean {expected!r}?"
assert expected in str(excinfo.value)
def test_error_does_not_suggest_if_key_does_not_exist():
clients = DAGClients()
with pytest.raises(KeyError) as excinfo:
clients['sqlscript']
assert "Did you mean 'SQLScript'?" not in str(excinfo.value)
def test_repr():
clients = DAGClients()
clients[SQLScript] = 1
expected = "DAGClients({<class 'ploomber.tasks.sql.SQLScript'>: 1})"
assert repr(clients) == expected
def test_initializes_dotted_path_spec(tmp_directory, tmp_imports):
Path('my_testing_clients.py').write_text("""
from ploomber.clients import LocalStorageClient
def get_client():
return LocalStorageClient('backup', path_to_project_root='.')
""")
clients = DAGClients()
# this happens when using the spec API and lazy load is turned on
clients[File] = DottedPath('my_testing_clients.get_client')
client = clients[File]
assert isinstance(client, LocalStorageClient)
assert clients[File] is client
| from pathlib import Path
from unittest.mock import Mock
import pytest
from ploomber.dag.dagclients import DAGClients
from ploomber.tasks import SQLScript
from ploomber.products import File
from ploomber.util.dotted_path import DottedPath
from ploomber.clients import LocalStorageClient
def test_error_if_setting_invalid_key():
clients = DAGClients()
with pytest.raises(ValueError) as excinfo:
clients[object] = Mock()
assert ('DAG client keys must be Tasks or '
'Products, value <class \'object\'> is not') == str(excinfo.value)
def test_iter():
assert list(DAGClients()) == []
def test_setitem_and_getitem_with_str():
clients = DAGClients()
mock = Mock()
clients['SQLScript'] = mock
assert clients[SQLScript] is mock
assert clients['SQLScript'] is mock
def test_error_setitem_invalid_str():
clients = DAGClients()
mock = Mock()
with pytest.raises(ValueError) as excinfo:
clients['invalid_name'] = mock
expected = (f"Could not set DAG-level client {mock!r}. 'invalid_name' "
"is not a valid Task or Product class name")
assert str(excinfo.value) == expected
@pytest.mark.parametrize('typo, expected', [
['sqlscript', 'SQLScript'],
['SQLSCRIPT', 'SQLScript'],
['sql_script', 'SQLScript'],
['sql-script', 'SQLScript'],
['sql script', 'SQLScript'],
['file', 'File'],
])
def test_error_setitem_invalid_str_with_typo(typo, expected):
clients = DAGClients()
mock = Mock()
with pytest.raises(ValueError) as excinfo:
clients[typo] = mock
assert f"Did you mean {expected!r}?" in str(excinfo.value)
@pytest.mark.parametrize('typo, expected, class_', [
['sqlscript', 'SQLScript', SQLScript],
['SQLSCRIPT', 'SQLScript', SQLScript],
['sql_script', 'SQLScript', SQLScript],
['sql-script', 'SQLScript', SQLScript],
['sql script', 'SQLScript', SQLScript],
['file', 'File', File],
])
def test_error_getitem_invalid_str_with_typo(typo, expected, class_):
clients = DAGClients()
mock = Mock()
clients[class_] = mock
with pytest.raises(KeyError) as excinfo:
clients[typo]
expected = f"{typo!r}. Did you mean {expected!r}?"
assert expected in str(excinfo.value)
def test_error_does_not_suggest_if_key_does_not_exist():
clients = DAGClients()
with pytest.raises(KeyError) as excinfo:
clients['sqlscript']
assert "Did you mean 'SQLScript'?" not in str(excinfo.value)
def test_repr():
clients = DAGClients()
clients[SQLScript] = 1
expected = "DAGClients({<class 'ploomber.tasks.sql.SQLScript'>: 1})"
assert repr(clients) == expected
def test_initializes_dotted_path_spec(tmp_directory, tmp_imports):
Path('my_testing_clients.py').write_text("""
from ploomber.clients import LocalStorageClient
def get_client():
return LocalStorageClient('backup', path_to_project_root='.')
""")
clients = DAGClients()
# this happens when using the spec API and lazy load is turned on
clients[File] = DottedPath('my_testing_clients.get_client')
client = clients[File]
assert isinstance(client, LocalStorageClient)
assert clients[File] is client
| en | 0.627441 | from ploomber.clients import LocalStorageClient def get_client(): return LocalStorageClient('backup', path_to_project_root='.') # this happens when using the spec API and lazy load is turned on | 2.450317 | 2 |
Jobindex_webscrape.py | Yousef0353/Data-Science-Projects | 0 | 6632120 | # -*- coding: utf-8 -*-
"""
Created on Fri Mar 25 18:54:45 2022
@author: balas
"""
import requests
from bs4 import BeautifulSoup
import pandas as pd
def extract(location,tag, page):
#Using User Agent,sometimes you will find that the webserver blocks certain user agents.
#This is mostly because it identifies the origin as a bot and certain websites don't allow bot crawlers or scrapers.
headers = {"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/99.0.4844.82 Safari/537.36"}
#Manipulating the jobindex URL
url = f"https://www.jobindex.dk/jobsoegning/{location}?page={page}&q={tag}"
r = requests.get(url, headers)
soup = BeautifulSoup(r.content.decode("utf-8"), "html.parser")
return soup
joblist = []
def transform(soup):
#This is the div/class for every single jobpost
divs = soup.find_all("div", class_="jobsearch-result")
for item in divs:
#Extracting all the tags and information
title = item.find_all("b")[0].text.strip()
company = item.find_all("b")[1].text.strip()
published_date = item.find("time").text.strip()
summary = item.find_all("p")[1].text.strip()
job_location = item.find_all("p")[0].text.strip()
job_url = item.select_one('[data-click*="u="]:has(> b)')['href']
#Creating a dictionary
job = {
"title" : title,
"company" : company,
"published_date" : published_date,
"summary" : summary,
"job_location" : job_location,
"Job_url" : job_url
}
joblist.append(job)
return
#keywords1 = input("Hvor søger du?: ")
keywords2 = input("Hvad søger du?: ")
område = ["storkoebenhavn", "nordsjaelland", "region-sjaelland"]
print("Vælg det ønsket jobområde: ")
x = 0
while x < len(område):
print("Mulighed: ",x+1, område[x])
x+=1
keywords1 = int(input("Vælg det ønsket nummer: "))
print("Du har valgt ", område[keywords1-1])
if keywords1 == int("1"):
keywords1 = "storkoebenhavn"
elif keywords1 == int("2"):
keywords1 = "nordsjaelland"
elif keywords1 == int("3"):
keywords1 = "region-sjaelland"
else:
print("område ikke på liste")
#Applying function
for x in range(1,10):
c = extract(keywords1, keywords2, 0)
transform(c)
#Converting list to dataframe
df = pd.DataFrame(joblist)
df.to_csv('Jobpost_ '+str(keywords2)+'.csv', index=False, encoding='utf-8-sig')
print("Finished")
| # -*- coding: utf-8 -*-
"""
Created on Fri Mar 25 18:54:45 2022
@author: balas
"""
import requests
from bs4 import BeautifulSoup
import pandas as pd
def extract(location,tag, page):
#Using User Agent,sometimes you will find that the webserver blocks certain user agents.
#This is mostly because it identifies the origin as a bot and certain websites don't allow bot crawlers or scrapers.
headers = {"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/99.0.4844.82 Safari/537.36"}
#Manipulating the jobindex URL
url = f"https://www.jobindex.dk/jobsoegning/{location}?page={page}&q={tag}"
r = requests.get(url, headers)
soup = BeautifulSoup(r.content.decode("utf-8"), "html.parser")
return soup
joblist = []
def transform(soup):
#This is the div/class for every single jobpost
divs = soup.find_all("div", class_="jobsearch-result")
for item in divs:
#Extracting all the tags and information
title = item.find_all("b")[0].text.strip()
company = item.find_all("b")[1].text.strip()
published_date = item.find("time").text.strip()
summary = item.find_all("p")[1].text.strip()
job_location = item.find_all("p")[0].text.strip()
job_url = item.select_one('[data-click*="u="]:has(> b)')['href']
#Creating a dictionary
job = {
"title" : title,
"company" : company,
"published_date" : published_date,
"summary" : summary,
"job_location" : job_location,
"Job_url" : job_url
}
joblist.append(job)
return
#keywords1 = input("Hvor søger du?: ")
keywords2 = input("Hvad søger du?: ")
område = ["storkoebenhavn", "nordsjaelland", "region-sjaelland"]
print("Vælg det ønsket jobområde: ")
x = 0
while x < len(område):
print("Mulighed: ",x+1, område[x])
x+=1
keywords1 = int(input("Vælg det ønsket nummer: "))
print("Du har valgt ", område[keywords1-1])
if keywords1 == int("1"):
keywords1 = "storkoebenhavn"
elif keywords1 == int("2"):
keywords1 = "nordsjaelland"
elif keywords1 == int("3"):
keywords1 = "region-sjaelland"
else:
print("område ikke på liste")
#Applying function
for x in range(1,10):
c = extract(keywords1, keywords2, 0)
transform(c)
#Converting list to dataframe
df = pd.DataFrame(joblist)
df.to_csv('Jobpost_ '+str(keywords2)+'.csv', index=False, encoding='utf-8-sig')
print("Finished")
| en | 0.730492 | # -*- coding: utf-8 -*- Created on Fri Mar 25 18:54:45 2022
@author: balas #Using User Agent,sometimes you will find that the webserver blocks certain user agents. #This is mostly because it identifies the origin as a bot and certain websites don't allow bot crawlers or scrapers. #Manipulating the jobindex URL #This is the div/class for every single jobpost #Extracting all the tags and information #Creating a dictionary #keywords1 = input("Hvor søger du?: ") #Applying function #Converting list to dataframe | 3.132297 | 3 |
src/grokcore/component/tests/event/provideHandler.py | zopefoundation/grokcore.component | 1 | 6632121 | <filename>src/grokcore/component/tests/event/provideHandler.py<gh_stars>1-10
"""
When you use the @grokcore.component.subscribe decorator, you can also
use zope.component.provideHandler to register the subscriber. This
can be useful for unittests where you may not want to grok everything
in a module but just enable certain components.
>>> from zope.interface.interfaces import ObjectEvent
>>> from zope.component import provideHandler
>>> provideHandler(mammothAdded)
>>> manfred = Mammoth('Manfred')
>>> import zope.event
>>> zope.event.notify(ObjectEvent(manfred))
>>> mammoths
['Manfred']
"""
import grokcore.component as grok
from zope.interface.interfaces import IObjectEvent
class Mammoth(object):
def __init__(self, name):
self.name = name
mammoths = []
@grok.subscribe(Mammoth, IObjectEvent)
def mammothAdded(mammoth, event):
mammoths.append(mammoth.name)
| <filename>src/grokcore/component/tests/event/provideHandler.py<gh_stars>1-10
"""
When you use the @grokcore.component.subscribe decorator, you can also
use zope.component.provideHandler to register the subscriber. This
can be useful for unittests where you may not want to grok everything
in a module but just enable certain components.
>>> from zope.interface.interfaces import ObjectEvent
>>> from zope.component import provideHandler
>>> provideHandler(mammothAdded)
>>> manfred = Mammoth('Manfred')
>>> import zope.event
>>> zope.event.notify(ObjectEvent(manfred))
>>> mammoths
['Manfred']
"""
import grokcore.component as grok
from zope.interface.interfaces import IObjectEvent
class Mammoth(object):
def __init__(self, name):
self.name = name
mammoths = []
@grok.subscribe(Mammoth, IObjectEvent)
def mammothAdded(mammoth, event):
mammoths.append(mammoth.name)
| en | 0.591073 | When you use the @grokcore.component.subscribe decorator, you can also use zope.component.provideHandler to register the subscriber. This can be useful for unittests where you may not want to grok everything in a module but just enable certain components. >>> from zope.interface.interfaces import ObjectEvent >>> from zope.component import provideHandler >>> provideHandler(mammothAdded) >>> manfred = Mammoth('Manfred') >>> import zope.event >>> zope.event.notify(ObjectEvent(manfred)) >>> mammoths ['Manfred'] | 2.071826 | 2 |
propagators/math_utils.py | mikelytaev/wave-propagation | 15 | 6632122 |
class DeltaFunction:
def __init__(self, x_c):
self.x_c = x_c |
class DeltaFunction:
def __init__(self, x_c):
self.x_c = x_c | none | 1 | 1.975258 | 2 |
|
scripts_gym/script2.py | lbaiao/sys-simulator-2 | 1 | 6632123 | <gh_stars>1-10
from shutil import copyfile
import numpy as np
from torch.utils.tensorboard.writer import SummaryWriter
from sys_simulator.general.ou_noise import OUNoise
from sys_simulator.ddpg.agent import Agent
import torch
from sys_simulator.ddpg.framework import Framework
import gym
from time import time
import sys_simulator.general as gen
ALGO_NAME = 'ddpg'
# MAX_NUM_EPISODES = 12000
# STEPS_PER_EPISODE = 500
# REPLAY_INITIAL = 10000
MAX_STEPS = 12000
STEPS_PER_EPISODE = 500
REPLAY_INITIAL = int(0E3)
EVAL_NUM_EPISODES = 10
REPLAY_MEMORY_SIZE = int(1E6)
ACTOR_LEARNING_RATE = 1E-4
CRITIC_LEARNING_RATE = 1E-3
HIDDEN_SIZE = 256
N_HIDDEN_LAYERS = 2
BATCH_SIZE = 128
GAMMA = .99
SOFT_TAU = 1E-2
ALPHA = .6
BETA = .4
EXPLORATION = 'ou'
REPLAY_MEMORY_TYPE = 'standard'
PRIO_BETA_ITS = int(.8*(MAX_STEPS - REPLAY_INITIAL))
EVAL_EVERY = int(MAX_STEPS / 20)
OU_DECAY_PERIOD = 100000
OU_MU = 0.0
OU_THETA = .15
OU_MAX_SIGMA = .3
OU_MIN_SIGMA = .3
torch_device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
# env = NormalizedActions(gym.make('Pendulum-v0'))
env = gym.make('Pendulum-v0')
state_size = env.observation_space.shape[0]
action_size = env.action_space.shape[0]
a_min = env.action_space.low
a_max = env.action_space.high
framework = Framework(
REPLAY_MEMORY_TYPE,
REPLAY_MEMORY_SIZE,
REPLAY_INITIAL,
state_size,
action_size,
HIDDEN_SIZE,
N_HIDDEN_LAYERS,
ACTOR_LEARNING_RATE,
CRITIC_LEARNING_RATE,
BATCH_SIZE,
GAMMA,
SOFT_TAU,
torch_device,
alpha=ALPHA,
beta=BETA,
beta_its=PRIO_BETA_ITS
)
ou_noise = OUNoise(
env.action_space,
OU_MU, OU_THETA,
OU_MAX_SIGMA,
OU_MIN_SIGMA,
OU_DECAY_PERIOD
)
agent = Agent(a_min, a_max, EXPLORATION, torch_device)
def print_stuff(step: int, now: int):
if REPLAY_MEMORY_TYPE == 'prioritized':
out = 'Training. ' + \
f'Step: {step}/{MAX_STEPS-1}. ' + \
f'Prio_Beta: {framework.replay_memory._beta}. ' + \
f'Elapsed time: {now} minutes.'
else:
out = 'Training. ' + \
f'Step: {step}/{MAX_STEPS-1}. ' + \
f'Elapsed time: {now} minutes.'
print(out)
def train(start, writer: SummaryWriter, timestamp: str):
actor_losses_bag = list()
critic_losses_bag = list()
best_reward = float('-inf')
test_rewards = []
step = 0
while step < MAX_STEPS:
obs = env.reset()
ou_noise.reset()
now = (time() - start) / 60
print_stuff(step, now)
reward = 0.0
done = False
i = 0
while not done and i < STEPS_PER_EPISODE:
action = agent.act(obs, framework, True, ou=ou_noise, step=i)
next_obs, reward, done, _ = env.step(action)
framework.replay_memory.push(
obs, action, reward, next_obs, done
)
actor_loss, critic_loss = framework.learn()
writer.add_scalar('Actor Losses', actor_loss, step)
writer.add_scalar('Critic Losses', critic_loss, step)
best_reward = reward if reward > best_reward else best_reward
obs = next_obs
i += 1
step += 1
if step % EVAL_EVERY == 0:
t_rewards = test(framework)
test_rewards.append(t_rewards)
writer.add_scalar('Avg test rewards', np.mean(t_rewards), step)
# if REPLAY_MEMORY_TYPE == 'prioritized':
# framework.replay_memory.correct_beta(i, STEPS_PER_EPISODE)
# last test
t_rewards = test(framework)
test_rewards.append(t_rewards)
# save stuff
filename = gen.path_leaf(__file__)
filename = filename.split('.')[0]
data_path = f'models/{ALGO_NAME}/gym/{filename}/{timestamp}'
gen.make_dir(data_path)
torch.save(framework, f'{data_path}/framework.pt')
return test_rewards
def test(framework: Framework):
rewards = []
for _ in range(EVAL_NUM_EPISODES):
obs = env.reset()
done = False
i = 0
ep_rewards = []
while not done and i < STEPS_PER_EPISODE:
action = agent.act(obs, framework, False)
next_obs, reward, done, _ = env.step(action)
obs = next_obs
ep_rewards.append(reward)
# rewards.append(np.mean(ep_rewards))
rewards.append(np.sum(ep_rewards))
return rewards
def test_video(
framework: Framework,
num_episodes: int,
steps_per_episode: int
):
env = gym.make('Pendulum-v0')
agent = Agent(env.action_space.low,
env.action_space.high, EXPLORATION, torch_device)
for _ in range(num_episodes):
obs = env.reset()
done = False
i = 0
while not done and i < steps_per_episode:
env.render()
action = agent.act(obs, framework, False)
next_obs, _, done, _ = env.step(action)
obs = next_obs
def run():
filename = gen.path_leaf(__file__)
filename = filename.split('.')[0]
dir_path = f'data/ddpg/gym/{filename}'
data_path, timestamp = gen.make_dir_timestamp(dir_path)
writer = SummaryWriter(f'{data_path}/tensorboard')
train_rewards = []
test_rewards = []
start = time()
train_rewards = train(start, writer, timestamp)
writer.close()
test_rewards = test(framework)
# save stuff
now = (time() - start) / 60
data_file_path = f'{data_path}/log.pickle'
data = {
'train_rewards': train_rewards,
'test_rewards': test_rewards,
'elapsed_time': now,
'eval_every': EVAL_EVERY,
}
gen.save_with_pickle(data, data_file_path)
copyfile(__file__, f'{data_path}/{filename}.py')
print(f'done. Elapsed time: {now} minutes.')
if __name__ == '__main__':
run()
| from shutil import copyfile
import numpy as np
from torch.utils.tensorboard.writer import SummaryWriter
from sys_simulator.general.ou_noise import OUNoise
from sys_simulator.ddpg.agent import Agent
import torch
from sys_simulator.ddpg.framework import Framework
import gym
from time import time
import sys_simulator.general as gen
ALGO_NAME = 'ddpg'
# MAX_NUM_EPISODES = 12000
# STEPS_PER_EPISODE = 500
# REPLAY_INITIAL = 10000
MAX_STEPS = 12000
STEPS_PER_EPISODE = 500
REPLAY_INITIAL = int(0E3)
EVAL_NUM_EPISODES = 10
REPLAY_MEMORY_SIZE = int(1E6)
ACTOR_LEARNING_RATE = 1E-4
CRITIC_LEARNING_RATE = 1E-3
HIDDEN_SIZE = 256
N_HIDDEN_LAYERS = 2
BATCH_SIZE = 128
GAMMA = .99
SOFT_TAU = 1E-2
ALPHA = .6
BETA = .4
EXPLORATION = 'ou'
REPLAY_MEMORY_TYPE = 'standard'
PRIO_BETA_ITS = int(.8*(MAX_STEPS - REPLAY_INITIAL))
EVAL_EVERY = int(MAX_STEPS / 20)
OU_DECAY_PERIOD = 100000
OU_MU = 0.0
OU_THETA = .15
OU_MAX_SIGMA = .3
OU_MIN_SIGMA = .3
torch_device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
# env = NormalizedActions(gym.make('Pendulum-v0'))
env = gym.make('Pendulum-v0')
state_size = env.observation_space.shape[0]
action_size = env.action_space.shape[0]
a_min = env.action_space.low
a_max = env.action_space.high
framework = Framework(
REPLAY_MEMORY_TYPE,
REPLAY_MEMORY_SIZE,
REPLAY_INITIAL,
state_size,
action_size,
HIDDEN_SIZE,
N_HIDDEN_LAYERS,
ACTOR_LEARNING_RATE,
CRITIC_LEARNING_RATE,
BATCH_SIZE,
GAMMA,
SOFT_TAU,
torch_device,
alpha=ALPHA,
beta=BETA,
beta_its=PRIO_BETA_ITS
)
ou_noise = OUNoise(
env.action_space,
OU_MU, OU_THETA,
OU_MAX_SIGMA,
OU_MIN_SIGMA,
OU_DECAY_PERIOD
)
agent = Agent(a_min, a_max, EXPLORATION, torch_device)
def print_stuff(step: int, now: int):
if REPLAY_MEMORY_TYPE == 'prioritized':
out = 'Training. ' + \
f'Step: {step}/{MAX_STEPS-1}. ' + \
f'Prio_Beta: {framework.replay_memory._beta}. ' + \
f'Elapsed time: {now} minutes.'
else:
out = 'Training. ' + \
f'Step: {step}/{MAX_STEPS-1}. ' + \
f'Elapsed time: {now} minutes.'
print(out)
def train(start, writer: SummaryWriter, timestamp: str):
actor_losses_bag = list()
critic_losses_bag = list()
best_reward = float('-inf')
test_rewards = []
step = 0
while step < MAX_STEPS:
obs = env.reset()
ou_noise.reset()
now = (time() - start) / 60
print_stuff(step, now)
reward = 0.0
done = False
i = 0
while not done and i < STEPS_PER_EPISODE:
action = agent.act(obs, framework, True, ou=ou_noise, step=i)
next_obs, reward, done, _ = env.step(action)
framework.replay_memory.push(
obs, action, reward, next_obs, done
)
actor_loss, critic_loss = framework.learn()
writer.add_scalar('Actor Losses', actor_loss, step)
writer.add_scalar('Critic Losses', critic_loss, step)
best_reward = reward if reward > best_reward else best_reward
obs = next_obs
i += 1
step += 1
if step % EVAL_EVERY == 0:
t_rewards = test(framework)
test_rewards.append(t_rewards)
writer.add_scalar('Avg test rewards', np.mean(t_rewards), step)
# if REPLAY_MEMORY_TYPE == 'prioritized':
# framework.replay_memory.correct_beta(i, STEPS_PER_EPISODE)
# last test
t_rewards = test(framework)
test_rewards.append(t_rewards)
# save stuff
filename = gen.path_leaf(__file__)
filename = filename.split('.')[0]
data_path = f'models/{ALGO_NAME}/gym/{filename}/{timestamp}'
gen.make_dir(data_path)
torch.save(framework, f'{data_path}/framework.pt')
return test_rewards
def test(framework: Framework):
rewards = []
for _ in range(EVAL_NUM_EPISODES):
obs = env.reset()
done = False
i = 0
ep_rewards = []
while not done and i < STEPS_PER_EPISODE:
action = agent.act(obs, framework, False)
next_obs, reward, done, _ = env.step(action)
obs = next_obs
ep_rewards.append(reward)
# rewards.append(np.mean(ep_rewards))
rewards.append(np.sum(ep_rewards))
return rewards
def test_video(
framework: Framework,
num_episodes: int,
steps_per_episode: int
):
env = gym.make('Pendulum-v0')
agent = Agent(env.action_space.low,
env.action_space.high, EXPLORATION, torch_device)
for _ in range(num_episodes):
obs = env.reset()
done = False
i = 0
while not done and i < steps_per_episode:
env.render()
action = agent.act(obs, framework, False)
next_obs, _, done, _ = env.step(action)
obs = next_obs
def run():
filename = gen.path_leaf(__file__)
filename = filename.split('.')[0]
dir_path = f'data/ddpg/gym/{filename}'
data_path, timestamp = gen.make_dir_timestamp(dir_path)
writer = SummaryWriter(f'{data_path}/tensorboard')
train_rewards = []
test_rewards = []
start = time()
train_rewards = train(start, writer, timestamp)
writer.close()
test_rewards = test(framework)
# save stuff
now = (time() - start) / 60
data_file_path = f'{data_path}/log.pickle'
data = {
'train_rewards': train_rewards,
'test_rewards': test_rewards,
'elapsed_time': now,
'eval_every': EVAL_EVERY,
}
gen.save_with_pickle(data, data_file_path)
copyfile(__file__, f'{data_path}/{filename}.py')
print(f'done. Elapsed time: {now} minutes.')
if __name__ == '__main__':
run() | en | 0.541212 | # MAX_NUM_EPISODES = 12000 # STEPS_PER_EPISODE = 500 # REPLAY_INITIAL = 10000 # env = NormalizedActions(gym.make('Pendulum-v0')) # if REPLAY_MEMORY_TYPE == 'prioritized': # framework.replay_memory.correct_beta(i, STEPS_PER_EPISODE) # last test # save stuff # rewards.append(np.mean(ep_rewards)) # save stuff | 1.977996 | 2 |
mycroft/tts/mary_tts.py | NeonDaniel/HolmesV | 9 | 6632124 | # Copyright 2017 Mycroft AI Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import requests
from mycroft.tts.tts import TTSValidator
from mycroft.tts.remote_tts import RemoteTTS
class MaryTTS(RemoteTTS):
PARAMS = {
'LOCALE': 'en_US',
'VOICE': 'cmu-slt-hsmm',
'INPUT_TEXT': 'Hello World',
'INPUT_TYPE': 'TEXT',
'AUDIO': 'WAVE_FILE',
'OUTPUT_TYPE': 'AUDIO'
}
def __init__(self, lang, config):
super(MaryTTS, self).__init__(lang, config, config.get('url'),
'/process', MaryTTSValidator(self))
def build_request_params(self, sentence):
params = self.PARAMS.copy()
params['LOCALE'] = self.lang
params['VOICE'] = self.voice
params['INPUT_TEXT'] = sentence.encode('utf-8')
return params
class MaryTTSValidator(TTSValidator):
def __init__(self, tts):
super(MaryTTSValidator, self).__init__(tts)
def validate_lang(self):
# TODO
pass
def validate_connection(self):
try:
resp = requests.get(self.tts.url + "/version", verify=False)
if resp.status_code == 200:
return True
except Exception:
raise Exception(
'MaryTTS server could not be verified. Check your connection '
'to the server: ' + self.tts.url)
def get_tts_class(self):
return MaryTTS
| # Copyright 2017 Mycroft AI Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import requests
from mycroft.tts.tts import TTSValidator
from mycroft.tts.remote_tts import RemoteTTS
class MaryTTS(RemoteTTS):
PARAMS = {
'LOCALE': 'en_US',
'VOICE': 'cmu-slt-hsmm',
'INPUT_TEXT': 'Hello World',
'INPUT_TYPE': 'TEXT',
'AUDIO': 'WAVE_FILE',
'OUTPUT_TYPE': 'AUDIO'
}
def __init__(self, lang, config):
super(MaryTTS, self).__init__(lang, config, config.get('url'),
'/process', MaryTTSValidator(self))
def build_request_params(self, sentence):
params = self.PARAMS.copy()
params['LOCALE'] = self.lang
params['VOICE'] = self.voice
params['INPUT_TEXT'] = sentence.encode('utf-8')
return params
class MaryTTSValidator(TTSValidator):
def __init__(self, tts):
super(MaryTTSValidator, self).__init__(tts)
def validate_lang(self):
# TODO
pass
def validate_connection(self):
try:
resp = requests.get(self.tts.url + "/version", verify=False)
if resp.status_code == 200:
return True
except Exception:
raise Exception(
'MaryTTS server could not be verified. Check your connection '
'to the server: ' + self.tts.url)
def get_tts_class(self):
return MaryTTS
| en | 0.84995 | # Copyright 2017 Mycroft AI Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # TODO | 2.222969 | 2 |
app.py | pprats/um-tdd | 0 | 6632125 | <gh_stars>0
from main import create_app
from main import db
from main.models import UserModel
import os
def create_admins_in_db():
admins = db.session.query(UserModel.id_num).filter(UserModel.admin == True)
admins_list = [admin for admin, in admins]
if len(admins_list) == 0:
print("Creando admin")
user = UserModel(
email=os.getenv('ADMIN_MAIL'),
plain_password=<PASSWORD>('<PASSWORD>'),
admin=bool(os.getenv('ADMIN_BOOL'))
)
db.session.add(user)
db.session.commit()
else:
pass
# Creating Flask app instance
app = create_app()
# Loading app context
app.app_context().push()
# If this script is run, the db is created if not; and the app is run in an specific port
if __name__ == '__main__':
db.create_all()
create_admins_in_db()
app.run(debug=True, port=os.getenv('PORT'))
| from main import create_app
from main import db
from main.models import UserModel
import os
def create_admins_in_db():
admins = db.session.query(UserModel.id_num).filter(UserModel.admin == True)
admins_list = [admin for admin, in admins]
if len(admins_list) == 0:
print("Creando admin")
user = UserModel(
email=os.getenv('ADMIN_MAIL'),
plain_password=<PASSWORD>('<PASSWORD>'),
admin=bool(os.getenv('ADMIN_BOOL'))
)
db.session.add(user)
db.session.commit()
else:
pass
# Creating Flask app instance
app = create_app()
# Loading app context
app.app_context().push()
# If this script is run, the db is created if not; and the app is run in an specific port
if __name__ == '__main__':
db.create_all()
create_admins_in_db()
app.run(debug=True, port=os.getenv('PORT')) | en | 0.885759 | # Creating Flask app instance # Loading app context # If this script is run, the db is created if not; and the app is run in an specific port | 2.757615 | 3 |
insights/core/filters.py | mglantz/insights-core | 1 | 6632126 | """
The filters module allows developers to apply filters to datasources. A filter
is a simple string, and it matches if it is contained anywhere within a line.
If a datasource has filters defined, it will return only lines matching at
least one of them. If a datasource has no filters, it will return all lines.
Filters aren't applicable to "raw" datasources, which are created with
``kind=RawFileProvider`` and have ``RegistryPoint``s with ``raw=True``.
The addition of a single filter can cause a datasource to change from returning
all lines to returning just those that match. Therefore, any filtered
datasource should have at least one filter in the commit introducing it so
downstream components don't inadvertently change its behavior.
The benefit of this fragility is the ability to drastically reduce in-memory
footprint and archive sizes. An additional benefit is the ability to evaluate
only lines known to be free of sensitive information.
Filters added to a ``RegistryPoint`` will be applied to all datasources that
implement it. Filters added to a datasource implementation apply only to that
implementation.
For example, a filter added to ``Specs.ps_auxww`` will apply to
``DefaultSpecs.ps_auxww``, ``InsightsArchiveSpecs.ps_auxww``,
``SosSpecs.ps_auxww``, etc. But a filter added to `DefaultSpecs.ps_auxww` will
only apply to ``DefaultSpecs.ps_auxww``. See the modules in ``insights.specs``
for those classes.
Filtering can be disabled globally by setting the environment variable
``INSIGHTS_FILTERS_ENABLED=False``. This means that no datasources will be
filtered even if filters are defined for them.
"""
import os
import pkgutil
import six
import yaml as ser
from collections import defaultdict
import insights
from insights.core import dr, plugins
from insights.util import parse_bool
_CACHE = {}
FILTERS = defaultdict(set)
ENABLED = parse_bool(os.environ.get("INSIGHTS_FILTERS_ENABLED"), default=True)
def add_filter(ds, patterns):
"""
Add a filter or list of filters to a datasource. A filter is a simple
string, and it matches if it is contained anywhere within a line.
Args:
ds (@datasource component): The datasource to filter
patterns (str, [str]): A string, list of strings, or set of strings to
add to the datasource's filters.
"""
if not plugins.is_datasource(ds):
raise Exception("Filters are applicable only to datasources.")
delegate = dr.get_delegate(ds)
if delegate.raw:
raise Exception("Filters aren't applicable to raw datasources.")
if not delegate.filterable:
raise Exception("Filters aren't applicable to %s." % dr.get_name(ds))
if ds in _CACHE:
del _CACHE[ds]
if isinstance(patterns, six.string_types):
FILTERS[ds].add(patterns)
elif isinstance(patterns, list):
FILTERS[ds] |= set(patterns)
elif isinstance(patterns, set):
FILTERS[ds] |= patterns
else:
raise TypeError("patterns must be string, list, or set.")
def get_filters(component):
"""
Get the set of filters for the given datasource.
Filters added to a ``RegistryPoint`` will be applied to all datasources that
implement it. Filters added to a datasource implementation apply only to
that implementation.
For example, a filter added to ``Specs.ps_auxww`` will apply to
``DefaultSpecs.ps_auxww``, ``InsightsArchiveSpecs.ps_auxww``,
``SosSpecs.ps_auxww``, etc. But a filter added to ``DefaultSpecs.ps_auxww``
will only apply to ``DefaultSpecs.ps_auxww``. See the modules in
``insights.specs`` for those classes.
Args:
component (a datasource): The target datasource
Returns:
set: The set of filters defined for the datasource
"""
def inner(c, filters=None):
filters = filters or set()
if not ENABLED:
return filters
if not plugins.is_datasource(c):
return filters
if c in FILTERS:
filters |= FILTERS[c]
for d in dr.get_dependents(c):
filters |= inner(d, filters)
return filters
if component not in _CACHE:
_CACHE[component] = inner(component)
return _CACHE[component]
def apply_filters(target, lines):
"""
Applys filters to the lines of a datasource. This function is used only in
integration tests. Filters are applied in an equivalent but more performant
way at run time.
"""
filters = get_filters(target)
if filters:
for l in lines:
if any(f in l for f in filters):
yield l
else:
for l in lines:
yield l
_filename = ".".join(["filters", ser.__name__])
_dumps = ser.dump
_loads = ser.safe_load
def loads(string):
"""Loads the filters dictionary given a string."""
d = _loads(string)
for k, v in d.items():
FILTERS[dr.get_component(k) or k] = set(v)
def load(stream=None):
"""
Loads filters from a stream, normally an open file. If one is
not passed, filters are loaded from a default location within
the project.
"""
if stream:
loads(stream.read())
else:
data = pkgutil.get_data(insights.__name__, _filename)
return loads(data) if data else None
def dumps():
"""Returns a string representation of the FILTERS dictionary."""
d = {}
for k, v in FILTERS.items():
d[dr.get_name(k)] = list(v)
return _dumps(d)
def dump(stream=None):
"""
Dumps a string representation of `FILTERS` to a stream, normally an
open file. If none is passed, `FILTERS` is dumped to a default location
within the project.
"""
if stream:
stream.write(dumps())
else:
path = os.path.join(os.path.dirname(insights.__file__), _filename)
with open(path, "wu") as f:
f.write(dumps())
| """
The filters module allows developers to apply filters to datasources. A filter
is a simple string, and it matches if it is contained anywhere within a line.
If a datasource has filters defined, it will return only lines matching at
least one of them. If a datasource has no filters, it will return all lines.
Filters aren't applicable to "raw" datasources, which are created with
``kind=RawFileProvider`` and have ``RegistryPoint``s with ``raw=True``.
The addition of a single filter can cause a datasource to change from returning
all lines to returning just those that match. Therefore, any filtered
datasource should have at least one filter in the commit introducing it so
downstream components don't inadvertently change its behavior.
The benefit of this fragility is the ability to drastically reduce in-memory
footprint and archive sizes. An additional benefit is the ability to evaluate
only lines known to be free of sensitive information.
Filters added to a ``RegistryPoint`` will be applied to all datasources that
implement it. Filters added to a datasource implementation apply only to that
implementation.
For example, a filter added to ``Specs.ps_auxww`` will apply to
``DefaultSpecs.ps_auxww``, ``InsightsArchiveSpecs.ps_auxww``,
``SosSpecs.ps_auxww``, etc. But a filter added to `DefaultSpecs.ps_auxww` will
only apply to ``DefaultSpecs.ps_auxww``. See the modules in ``insights.specs``
for those classes.
Filtering can be disabled globally by setting the environment variable
``INSIGHTS_FILTERS_ENABLED=False``. This means that no datasources will be
filtered even if filters are defined for them.
"""
import os
import pkgutil
import six
import yaml as ser
from collections import defaultdict
import insights
from insights.core import dr, plugins
from insights.util import parse_bool
_CACHE = {}
FILTERS = defaultdict(set)
ENABLED = parse_bool(os.environ.get("INSIGHTS_FILTERS_ENABLED"), default=True)
def add_filter(ds, patterns):
"""
Add a filter or list of filters to a datasource. A filter is a simple
string, and it matches if it is contained anywhere within a line.
Args:
ds (@datasource component): The datasource to filter
patterns (str, [str]): A string, list of strings, or set of strings to
add to the datasource's filters.
"""
if not plugins.is_datasource(ds):
raise Exception("Filters are applicable only to datasources.")
delegate = dr.get_delegate(ds)
if delegate.raw:
raise Exception("Filters aren't applicable to raw datasources.")
if not delegate.filterable:
raise Exception("Filters aren't applicable to %s." % dr.get_name(ds))
if ds in _CACHE:
del _CACHE[ds]
if isinstance(patterns, six.string_types):
FILTERS[ds].add(patterns)
elif isinstance(patterns, list):
FILTERS[ds] |= set(patterns)
elif isinstance(patterns, set):
FILTERS[ds] |= patterns
else:
raise TypeError("patterns must be string, list, or set.")
def get_filters(component):
"""
Get the set of filters for the given datasource.
Filters added to a ``RegistryPoint`` will be applied to all datasources that
implement it. Filters added to a datasource implementation apply only to
that implementation.
For example, a filter added to ``Specs.ps_auxww`` will apply to
``DefaultSpecs.ps_auxww``, ``InsightsArchiveSpecs.ps_auxww``,
``SosSpecs.ps_auxww``, etc. But a filter added to ``DefaultSpecs.ps_auxww``
will only apply to ``DefaultSpecs.ps_auxww``. See the modules in
``insights.specs`` for those classes.
Args:
component (a datasource): The target datasource
Returns:
set: The set of filters defined for the datasource
"""
def inner(c, filters=None):
filters = filters or set()
if not ENABLED:
return filters
if not plugins.is_datasource(c):
return filters
if c in FILTERS:
filters |= FILTERS[c]
for d in dr.get_dependents(c):
filters |= inner(d, filters)
return filters
if component not in _CACHE:
_CACHE[component] = inner(component)
return _CACHE[component]
def apply_filters(target, lines):
"""
Applys filters to the lines of a datasource. This function is used only in
integration tests. Filters are applied in an equivalent but more performant
way at run time.
"""
filters = get_filters(target)
if filters:
for l in lines:
if any(f in l for f in filters):
yield l
else:
for l in lines:
yield l
_filename = ".".join(["filters", ser.__name__])
_dumps = ser.dump
_loads = ser.safe_load
def loads(string):
"""Loads the filters dictionary given a string."""
d = _loads(string)
for k, v in d.items():
FILTERS[dr.get_component(k) or k] = set(v)
def load(stream=None):
"""
Loads filters from a stream, normally an open file. If one is
not passed, filters are loaded from a default location within
the project.
"""
if stream:
loads(stream.read())
else:
data = pkgutil.get_data(insights.__name__, _filename)
return loads(data) if data else None
def dumps():
"""Returns a string representation of the FILTERS dictionary."""
d = {}
for k, v in FILTERS.items():
d[dr.get_name(k)] = list(v)
return _dumps(d)
def dump(stream=None):
"""
Dumps a string representation of `FILTERS` to a stream, normally an
open file. If none is passed, `FILTERS` is dumped to a default location
within the project.
"""
if stream:
stream.write(dumps())
else:
path = os.path.join(os.path.dirname(insights.__file__), _filename)
with open(path, "wu") as f:
f.write(dumps())
| en | 0.83591 | The filters module allows developers to apply filters to datasources. A filter is a simple string, and it matches if it is contained anywhere within a line. If a datasource has filters defined, it will return only lines matching at least one of them. If a datasource has no filters, it will return all lines. Filters aren't applicable to "raw" datasources, which are created with ``kind=RawFileProvider`` and have ``RegistryPoint``s with ``raw=True``. The addition of a single filter can cause a datasource to change from returning all lines to returning just those that match. Therefore, any filtered datasource should have at least one filter in the commit introducing it so downstream components don't inadvertently change its behavior. The benefit of this fragility is the ability to drastically reduce in-memory footprint and archive sizes. An additional benefit is the ability to evaluate only lines known to be free of sensitive information. Filters added to a ``RegistryPoint`` will be applied to all datasources that implement it. Filters added to a datasource implementation apply only to that implementation. For example, a filter added to ``Specs.ps_auxww`` will apply to ``DefaultSpecs.ps_auxww``, ``InsightsArchiveSpecs.ps_auxww``, ``SosSpecs.ps_auxww``, etc. But a filter added to `DefaultSpecs.ps_auxww` will only apply to ``DefaultSpecs.ps_auxww``. See the modules in ``insights.specs`` for those classes. Filtering can be disabled globally by setting the environment variable ``INSIGHTS_FILTERS_ENABLED=False``. This means that no datasources will be filtered even if filters are defined for them. Add a filter or list of filters to a datasource. A filter is a simple string, and it matches if it is contained anywhere within a line. Args: ds (@datasource component): The datasource to filter patterns (str, [str]): A string, list of strings, or set of strings to add to the datasource's filters. Get the set of filters for the given datasource. Filters added to a ``RegistryPoint`` will be applied to all datasources that implement it. Filters added to a datasource implementation apply only to that implementation. For example, a filter added to ``Specs.ps_auxww`` will apply to ``DefaultSpecs.ps_auxww``, ``InsightsArchiveSpecs.ps_auxww``, ``SosSpecs.ps_auxww``, etc. But a filter added to ``DefaultSpecs.ps_auxww`` will only apply to ``DefaultSpecs.ps_auxww``. See the modules in ``insights.specs`` for those classes. Args: component (a datasource): The target datasource Returns: set: The set of filters defined for the datasource Applys filters to the lines of a datasource. This function is used only in integration tests. Filters are applied in an equivalent but more performant way at run time. Loads the filters dictionary given a string. Loads filters from a stream, normally an open file. If one is not passed, filters are loaded from a default location within the project. Returns a string representation of the FILTERS dictionary. Dumps a string representation of `FILTERS` to a stream, normally an open file. If none is passed, `FILTERS` is dumped to a default location within the project. | 2.08842 | 2 |
lib/utils/logger.py | wjbKimberly/DetectAndTrack-wjb | 1,007 | 6632127 | ##############################################################
# Copyright (c) 2018-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
##############################################################
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np
from collections import deque
import json
# Print lower precision floating point values than default FLOAT_REPR
json.encoder.FLOAT_REPR = lambda o: format(o, '.6f')
def log_json_stats(stats, json_out_file=None, sort_keys=True):
json_str = json.dumps(stats, sort_keys=sort_keys)
print('json_stats: {:s}'.format(json_str))
if json_out_file is not None:
with open(json_out_file, 'a') as fout:
fout.write('{:s}\n'.format(json_str))
class SmoothedValue(object):
"""Track a series of values and provide access to smoothed values over a
window or the global series average.
"""
def __init__(self, window_size):
self.deque = deque(maxlen=window_size)
self.series = []
self.total = 0.0
self.count = 0
def AddValue(self, value):
self.deque.append(value)
self.series.append(value)
self.count += 1
self.total += value
def GetMedianValue(self):
return np.median(self.deque)
def GetAverageValue(self):
return np.mean(self.deque)
def GetGlobalAverageValue(self):
return self.total / self.count
| ##############################################################
# Copyright (c) 2018-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
##############################################################
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np
from collections import deque
import json
# Print lower precision floating point values than default FLOAT_REPR
json.encoder.FLOAT_REPR = lambda o: format(o, '.6f')
def log_json_stats(stats, json_out_file=None, sort_keys=True):
json_str = json.dumps(stats, sort_keys=sort_keys)
print('json_stats: {:s}'.format(json_str))
if json_out_file is not None:
with open(json_out_file, 'a') as fout:
fout.write('{:s}\n'.format(json_str))
class SmoothedValue(object):
"""Track a series of values and provide access to smoothed values over a
window or the global series average.
"""
def __init__(self, window_size):
self.deque = deque(maxlen=window_size)
self.series = []
self.total = 0.0
self.count = 0
def AddValue(self, value):
self.deque.append(value)
self.series.append(value)
self.count += 1
self.total += value
def GetMedianValue(self):
return np.median(self.deque)
def GetAverageValue(self):
return np.mean(self.deque)
def GetGlobalAverageValue(self):
return self.total / self.count
| en | 0.443647 | ############################################################## # Copyright (c) 2018-present, Facebook, Inc. # All rights reserved. # # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. ############################################################## # Print lower precision floating point values than default FLOAT_REPR Track a series of values and provide access to smoothed values over a window or the global series average. | 2.313379 | 2 |
openstackclient/tests/functional/compute/v2/test_aggregate.py | BeyondTheClouds/python-openstackclient | 0 | 6632128 | <reponame>BeyondTheClouds/python-openstackclient
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import time
import uuid
from openstackclient.tests.functional import base
class AggregateTests(base.TestCase):
"""Functional tests for aggregate"""
def wait_for_status(self, check_type, check_name, desired_status,
wait=120, interval=5, failures=None):
current_status = "notset"
if failures is None:
failures = ['error']
total_sleep = 0
while total_sleep < wait:
output = json.loads(self.openstack(
check_type + ' show -f json ' + check_name))
current_status = output['name']
if (current_status == desired_status):
print('{} {} now has status {}'
.format(check_type, check_name, current_status))
return
print('Checking {} {} Waiting for {} current status: {}'
.format(check_type, check_name,
desired_status, current_status))
if current_status in failures:
raise Exception(
'Current status {} of {} {} is one of failures {}'
.format(current_status, check_type, check_name, failures))
time.sleep(interval)
total_sleep += interval
self.assertOutput(desired_status, current_status)
def test_aggregate_crud(self):
"""Test create, delete multiple"""
name1 = uuid.uuid4().hex
cmd_output = json.loads(self.openstack(
'aggregate create -f json ' +
'--zone nova ' +
'--property a=b ' +
name1
))
self.assertEqual(
name1,
cmd_output['name']
)
self.assertEqual(
'nova',
cmd_output['availability_zone']
)
self.assertIn(
'a',
cmd_output['properties']
)
self.wait_for_status('aggregate', name1, name1)
self.addCleanup(
self.openstack,
'aggregate delete ' + name1,
fail_ok=True,
)
name2 = uuid.uuid4().hex
cmd_output = json.loads(self.openstack(
'aggregate create -f json ' +
'--zone external ' +
name2
))
self.assertEqual(
name2,
cmd_output['name']
)
self.assertEqual(
'external',
cmd_output['availability_zone']
)
self.wait_for_status('aggregate', name2, name2)
self.addCleanup(
self.openstack,
'aggregate delete ' + name2,
fail_ok=True,
)
# Test aggregate set
name3 = uuid.uuid4().hex
raw_output = self.openstack(
'aggregate set ' +
'--name ' + name3 + ' ' +
'--zone internal ' +
'--no-property ' +
'--property c=d ' +
name1
)
self.assertOutput('', raw_output)
self.addCleanup(
self.openstack,
'aggregate delete ' + name3,
fail_ok=True,
)
cmd_output = json.loads(self.openstack(
'aggregate show -f json ' +
name3
))
self.assertEqual(
name3,
cmd_output['name']
)
self.assertEqual(
'internal',
cmd_output['availability_zone']
)
self.assertIn(
'c',
cmd_output['properties']
)
self.assertNotIn(
'a',
cmd_output['properties']
)
# Test aggregate list
cmd_output = json.loads(self.openstack(
'aggregate list -f json'
))
names = [x['Name'] for x in cmd_output]
self.assertIn(name3, names)
self.assertIn(name2, names)
zones = [x['Availability Zone'] for x in cmd_output]
self.assertIn('external', zones)
self.assertIn('internal', zones)
# Test aggregate list --long
cmd_output = json.loads(self.openstack(
'aggregate list --long -f json'
))
names = [x['Name'] for x in cmd_output]
self.assertIn(name3, names)
self.assertIn(name2, names)
zones = [x['Availability Zone'] for x in cmd_output]
self.assertIn('external', zones)
self.assertIn('internal', zones)
properties = [x['Properties'] for x in cmd_output]
self.assertNotIn({'a': 'b'}, properties)
self.assertIn({'c': 'd'}, properties)
# Test unset
raw_output = self.openstack(
'aggregate unset ' +
'--property c ' +
name3
)
self.assertOutput('', raw_output)
cmd_output = json.loads(self.openstack(
'aggregate show -f json ' +
name3
))
self.assertNotIn(
"c='d'",
cmd_output['properties']
)
# test aggregate delete
del_output = self.openstack(
'aggregate delete ' +
name3 + ' ' +
name2
)
self.assertOutput('', del_output)
def test_aggregate_add_and_remove_host(self):
"""Test aggregate add and remove host"""
# Get a host
cmd_output = json.loads(self.openstack(
'host list -f json'
))
host_name = cmd_output[0]['Host Name']
# NOTE(dtroyer): Cells v1 is not operable with aggregates. Hostnames
# are returned as rrr@host or ccc!rrr@host.
if '@' in host_name:
self.skipTest("Skip aggregates in a Nova cells v1 configuration")
name = uuid.uuid4().hex
self.openstack(
'aggregate create ' +
name
)
self.addCleanup(self.openstack, 'aggregate delete ' + name)
# Test add host
cmd_output = json.loads(self.openstack(
'aggregate add host -f json ' +
name + ' ' +
host_name
))
self.assertIn(
host_name,
cmd_output['hosts']
)
# Test remove host
cmd_output = json.loads(self.openstack(
'aggregate remove host -f json ' +
name + ' ' +
host_name
))
self.assertNotIn(
host_name,
cmd_output['hosts']
)
| # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import time
import uuid
from openstackclient.tests.functional import base
class AggregateTests(base.TestCase):
"""Functional tests for aggregate"""
def wait_for_status(self, check_type, check_name, desired_status,
wait=120, interval=5, failures=None):
current_status = "notset"
if failures is None:
failures = ['error']
total_sleep = 0
while total_sleep < wait:
output = json.loads(self.openstack(
check_type + ' show -f json ' + check_name))
current_status = output['name']
if (current_status == desired_status):
print('{} {} now has status {}'
.format(check_type, check_name, current_status))
return
print('Checking {} {} Waiting for {} current status: {}'
.format(check_type, check_name,
desired_status, current_status))
if current_status in failures:
raise Exception(
'Current status {} of {} {} is one of failures {}'
.format(current_status, check_type, check_name, failures))
time.sleep(interval)
total_sleep += interval
self.assertOutput(desired_status, current_status)
def test_aggregate_crud(self):
"""Test create, delete multiple"""
name1 = uuid.uuid4().hex
cmd_output = json.loads(self.openstack(
'aggregate create -f json ' +
'--zone nova ' +
'--property a=b ' +
name1
))
self.assertEqual(
name1,
cmd_output['name']
)
self.assertEqual(
'nova',
cmd_output['availability_zone']
)
self.assertIn(
'a',
cmd_output['properties']
)
self.wait_for_status('aggregate', name1, name1)
self.addCleanup(
self.openstack,
'aggregate delete ' + name1,
fail_ok=True,
)
name2 = uuid.uuid4().hex
cmd_output = json.loads(self.openstack(
'aggregate create -f json ' +
'--zone external ' +
name2
))
self.assertEqual(
name2,
cmd_output['name']
)
self.assertEqual(
'external',
cmd_output['availability_zone']
)
self.wait_for_status('aggregate', name2, name2)
self.addCleanup(
self.openstack,
'aggregate delete ' + name2,
fail_ok=True,
)
# Test aggregate set
name3 = uuid.uuid4().hex
raw_output = self.openstack(
'aggregate set ' +
'--name ' + name3 + ' ' +
'--zone internal ' +
'--no-property ' +
'--property c=d ' +
name1
)
self.assertOutput('', raw_output)
self.addCleanup(
self.openstack,
'aggregate delete ' + name3,
fail_ok=True,
)
cmd_output = json.loads(self.openstack(
'aggregate show -f json ' +
name3
))
self.assertEqual(
name3,
cmd_output['name']
)
self.assertEqual(
'internal',
cmd_output['availability_zone']
)
self.assertIn(
'c',
cmd_output['properties']
)
self.assertNotIn(
'a',
cmd_output['properties']
)
# Test aggregate list
cmd_output = json.loads(self.openstack(
'aggregate list -f json'
))
names = [x['Name'] for x in cmd_output]
self.assertIn(name3, names)
self.assertIn(name2, names)
zones = [x['Availability Zone'] for x in cmd_output]
self.assertIn('external', zones)
self.assertIn('internal', zones)
# Test aggregate list --long
cmd_output = json.loads(self.openstack(
'aggregate list --long -f json'
))
names = [x['Name'] for x in cmd_output]
self.assertIn(name3, names)
self.assertIn(name2, names)
zones = [x['Availability Zone'] for x in cmd_output]
self.assertIn('external', zones)
self.assertIn('internal', zones)
properties = [x['Properties'] for x in cmd_output]
self.assertNotIn({'a': 'b'}, properties)
self.assertIn({'c': 'd'}, properties)
# Test unset
raw_output = self.openstack(
'aggregate unset ' +
'--property c ' +
name3
)
self.assertOutput('', raw_output)
cmd_output = json.loads(self.openstack(
'aggregate show -f json ' +
name3
))
self.assertNotIn(
"c='d'",
cmd_output['properties']
)
# test aggregate delete
del_output = self.openstack(
'aggregate delete ' +
name3 + ' ' +
name2
)
self.assertOutput('', del_output)
def test_aggregate_add_and_remove_host(self):
"""Test aggregate add and remove host"""
# Get a host
cmd_output = json.loads(self.openstack(
'host list -f json'
))
host_name = cmd_output[0]['Host Name']
# NOTE(dtroyer): Cells v1 is not operable with aggregates. Hostnames
# are returned as rrr@host or ccc!rrr@host.
if '@' in host_name:
self.skipTest("Skip aggregates in a Nova cells v1 configuration")
name = uuid.uuid4().hex
self.openstack(
'aggregate create ' +
name
)
self.addCleanup(self.openstack, 'aggregate delete ' + name)
# Test add host
cmd_output = json.loads(self.openstack(
'aggregate add host -f json ' +
name + ' ' +
host_name
))
self.assertIn(
host_name,
cmd_output['hosts']
)
# Test remove host
cmd_output = json.loads(self.openstack(
'aggregate remove host -f json ' +
name + ' ' +
host_name
))
self.assertNotIn(
host_name,
cmd_output['hosts']
) | en | 0.867302 | # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. Functional tests for aggregate Test create, delete multiple # Test aggregate set # Test aggregate list # Test aggregate list --long # Test unset # test aggregate delete Test aggregate add and remove host # Get a host # NOTE(dtroyer): Cells v1 is not operable with aggregates. Hostnames # are returned as rrr@host or ccc!rrr@host. # Test add host # Test remove host | 2.368147 | 2 |
PMS5003.py | andyrew/pi-airq | 0 | 6632129 | <reponame>andyrew/pi-airq<filename>PMS5003.py
import serial
import struct
import sys
import bisect
aqi_breakpoints = [ 0, 50, 100, 150, 200, 300, 400, 500 ]
aqi_pm25_breakpoints = [ 0, 12.1, 35.5, 55.5, 150.5, 250.5, 350.5, 500.5 ]
class PMS5003:
def __init__(self, serial_terminal="/dev/serial0"):
self.serial_terminal = serial_terminal
self.baudrate = 9600
self.read()
def read(self):
try:
self.serial_connection = serial.Serial(self.serial_terminal, baudrate=self.baudrate)
# The following block of code is largely taken from Adafruit Learning System Guides
#
# github.com/adafruit/Adafruit_Learning_System_Guides/PMS5003_Air_Quality_Sensor/PMS5003_CircuitPython/main.py
# Copyright (c) 2018 Adafruit Industries
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
buffer = []
data = self.serial_connection.read(32) # read up to 32 bytes
buffer += list(data)
while buffer and buffer[0] != 0x42:
buffer.pop(0)
if len(buffer) > 200:
buffer = [] # avoid an overrun if all bad data
raise RuntimeError("potential overrun avoided - PMS5003 read")
if len(buffer) < 32:
raise RuntimeError("not enough bytes received - PMS5003 read")
if buffer[1] != 0x4d:
buffer.pop(0)
raise RuntimeError("data received doesn't start correctly - PMS5003 read")
frame_len = struct.unpack(">H", bytes(buffer[2:4]))[0]
if frame_len != 28:
buffer = []
raise RuntimeError("not enough good bytes received - PMS5003 read")
frame = struct.unpack(">HHHHHHHHHHHHHH", bytes(buffer[4:]))
pm10_standard, pm25_standard, pm100_standard, pm10_env, \
pm25_env, pm100_env, particles_03um, particles_05um, particles_10um, \
particles_25um, particles_50um, particles_100um, skip, checksum = frame
check = sum(buffer[0:30])
if check != checksum:
buffer = []
raise RuntimeError("checksum doesn't match - PMS5003 read")
# End Adafruit Code
# Thanks Adafruit!
self.pm10_standard = pm10_standard
self.pm25_standard = pm25_standard
self.pm100_standard = pm100_standard
self.pm10_env = pm10_env
self.pm25_env = pm25_env
self.pm100_env = pm100_env
self.particles_03um = particles_03um
self.particles_05um = particles_05um
self.particles_10um = particles_10um
self.particles_25um = particles_25um
self.particles_50um = particles_50um
self.particles_100um = particles_100um
self.calc_aq_index()
except:
sys.stderr.write('problem with PMS5003 read')
def calc_aq_index(self):
# find breakpoints
idx_pm25 = bisect.bisect_left(aqi_pm25_breakpoints, self.pm25_standard)
self.aqi_pm25 = int (( aqi_breakpoints[idx_pm25] - aqi_breakpoints[idx_pm25-1] ) / (aqi_pm25_breakpoints[idx_pm25] - aqi_pm25_breakpoints[idx_pm25-1]) * \
( self.pm25_standard - aqi_pm25_breakpoints[idx_pm25-1] ) + aqi_breakpoints[idx_pm25-1] )
| import serial
import struct
import sys
import bisect
aqi_breakpoints = [ 0, 50, 100, 150, 200, 300, 400, 500 ]
aqi_pm25_breakpoints = [ 0, 12.1, 35.5, 55.5, 150.5, 250.5, 350.5, 500.5 ]
class PMS5003:
def __init__(self, serial_terminal="/dev/serial0"):
self.serial_terminal = serial_terminal
self.baudrate = 9600
self.read()
def read(self):
try:
self.serial_connection = serial.Serial(self.serial_terminal, baudrate=self.baudrate)
# The following block of code is largely taken from Adafruit Learning System Guides
#
# github.com/adafruit/Adafruit_Learning_System_Guides/PMS5003_Air_Quality_Sensor/PMS5003_CircuitPython/main.py
# Copyright (c) 2018 Adafruit Industries
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
buffer = []
data = self.serial_connection.read(32) # read up to 32 bytes
buffer += list(data)
while buffer and buffer[0] != 0x42:
buffer.pop(0)
if len(buffer) > 200:
buffer = [] # avoid an overrun if all bad data
raise RuntimeError("potential overrun avoided - PMS5003 read")
if len(buffer) < 32:
raise RuntimeError("not enough bytes received - PMS5003 read")
if buffer[1] != 0x4d:
buffer.pop(0)
raise RuntimeError("data received doesn't start correctly - PMS5003 read")
frame_len = struct.unpack(">H", bytes(buffer[2:4]))[0]
if frame_len != 28:
buffer = []
raise RuntimeError("not enough good bytes received - PMS5003 read")
frame = struct.unpack(">HHHHHHHHHHHHHH", bytes(buffer[4:]))
pm10_standard, pm25_standard, pm100_standard, pm10_env, \
pm25_env, pm100_env, particles_03um, particles_05um, particles_10um, \
particles_25um, particles_50um, particles_100um, skip, checksum = frame
check = sum(buffer[0:30])
if check != checksum:
buffer = []
raise RuntimeError("checksum doesn't match - PMS5003 read")
# End Adafruit Code
# Thanks Adafruit!
self.pm10_standard = pm10_standard
self.pm25_standard = pm25_standard
self.pm100_standard = pm100_standard
self.pm10_env = pm10_env
self.pm25_env = pm25_env
self.pm100_env = pm100_env
self.particles_03um = particles_03um
self.particles_05um = particles_05um
self.particles_10um = particles_10um
self.particles_25um = particles_25um
self.particles_50um = particles_50um
self.particles_100um = particles_100um
self.calc_aq_index()
except:
sys.stderr.write('problem with PMS5003 read')
def calc_aq_index(self):
# find breakpoints
idx_pm25 = bisect.bisect_left(aqi_pm25_breakpoints, self.pm25_standard)
self.aqi_pm25 = int (( aqi_breakpoints[idx_pm25] - aqi_breakpoints[idx_pm25-1] ) / (aqi_pm25_breakpoints[idx_pm25] - aqi_pm25_breakpoints[idx_pm25-1]) * \
( self.pm25_standard - aqi_pm25_breakpoints[idx_pm25-1] ) + aqi_breakpoints[idx_pm25-1] ) | en | 0.831536 | # The following block of code is largely taken from Adafruit Learning System Guides # # github.com/adafruit/Adafruit_Learning_System_Guides/PMS5003_Air_Quality_Sensor/PMS5003_CircuitPython/main.py # Copyright (c) 2018 Adafruit Industries # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # read up to 32 bytes # avoid an overrun if all bad data # End Adafruit Code # Thanks Adafruit! # find breakpoints | 2.619102 | 3 |
src/tests/test_pagure_flask_ui_login.py | yifengyou/learn-pagure | 0 | 6632130 | # -*- coding: utf-8 -*-
"""
(c) 2016 - Copyright Red Hat Inc
Authors:
<NAME> <<EMAIL>>
<NAME> <<EMAIL>>
"""
from __future__ import unicode_literals, absolute_import
import datetime
import hashlib
import json
import unittest
import shutil
import sys
import tempfile
import os
import flask
import pygit2
import six
from mock import patch, MagicMock
sys.path.insert(
0, os.path.join(os.path.dirname(os.path.abspath(__file__)), "..")
)
import pagure.lib.query
import tests
from pagure.lib.repo import PagureRepo
import pagure.ui.login
class PagureFlaskLogintests(tests.SimplePagureTest):
""" Tests for flask app controller of pagure """
def setUp(self):
""" Create the application with PAGURE_AUTH being local. """
super(PagureFlaskLogintests, self).setUp()
app = pagure.flask_app.create_app(
{"DB_URL": self.dbpath, "PAGURE_AUTH": "local"}
)
# Remove the log handlers for the tests
app.logger.handlers = []
self.app = app.test_client()
@patch.dict("pagure.config.config", {"PAGURE_AUTH": "local"})
def test_front_page(self):
""" Test the front page. """
# First access the front page
output = self.app.get("/")
self.assertEqual(output.status_code, 200)
self.assertIn(
"<title>Home - Pagure</title>", output.get_data(as_text=True)
)
@patch.dict("pagure.config.config", {"PAGURE_AUTH": "local"})
@patch("pagure.lib.notify.send_email", MagicMock(return_value=True))
def test_new_user(self):
""" Test the new_user endpoint. """
# Check before:
items = pagure.lib.query.search_user(self.session)
self.assertEqual(2, len(items))
# First access the new user page
output = self.app.get("/user/new")
self.assertEqual(output.status_code, 200)
self.assertIn(
"<title>New user - Pagure</title>", output.get_data(as_text=True)
)
self.assertIn(
'<form action="/user/new" method="post">',
output.get_data(as_text=True),
)
# Create the form to send there
# This has all the data needed
data = {
"user": "foo",
"fullname": "user foo",
"email_address": "<EMAIL>",
"password": "<PASSWORD>",
"confirm_password": "<PASSWORD>",
}
# Submit this form - Doesn't work since there is no csrf token
output = self.app.post("/user/new", data=data)
self.assertEqual(output.status_code, 200)
self.assertIn(
"<title>New user - Pagure</title>", output.get_data(as_text=True)
)
self.assertIn(
'<form action="/user/new" method="post">',
output.get_data(as_text=True),
)
csrf_token = (
output.get_data(as_text=True)
.split('name="csrf_token" type="hidden" value="')[1]
.split('">')[0]
)
# Submit the form with the csrf token
data["csrf_token"] = csrf_token
output = self.app.post("/user/new", data=data, follow_redirects=True)
self.assertEqual(output.status_code, 200)
self.assertIn(
"<title>New user - Pagure</title>", output.get_data(as_text=True)
)
self.assertIn(
'<form action="/user/new" method="post">',
output.get_data(as_text=True),
)
self.assertIn("Username already taken.", output.get_data(as_text=True))
# Submit the form with another username
data["user"] = "foouser"
output = self.app.post("/user/new", data=data, follow_redirects=True)
self.assertEqual(output.status_code, 200)
self.assertIn(
"<title>New user - Pagure</title>", output.get_data(as_text=True)
)
self.assertIn(
"Email address already taken.", output.get_data(as_text=True)
)
# Submit the form with proper data
data["email_address"] = "<EMAIL>"
output = self.app.post("/user/new", data=data, follow_redirects=True)
self.assertEqual(output.status_code, 200)
self.assertIn(
"<title>Login - Pagure</title>", output.get_data(as_text=True)
)
self.assertIn(
"User created, please check your email to activate the account",
output.get_data(as_text=True),
)
# Check after:
items = pagure.lib.query.search_user(self.session)
self.assertEqual(3, len(items))
@patch.dict("pagure.config.config", {"PAGURE_AUTH": "local"})
@patch.dict("pagure.config.config", {"ALLOW_USER_REGISTRATION": False})
@patch("pagure.lib.notify.send_email", MagicMock(return_value=True))
def test_new_user_disabled(self):
""" Test the disabling of the new_user endpoint. """
# Check before:
items = pagure.lib.query.search_user(self.session)
self.assertEqual(2, len(items))
# Attempt to access the new user page
output = self.app.get("/user/new", follow_redirects=True)
self.assertEqual(output.status_code, 200)
self.assertIn(
"<title>Login - Pagure</title>", output.get_data(as_text=True)
)
self.assertIn(
"User registration is disabled.", output.get_data(as_text=True)
)
# Check after:
items = pagure.lib.query.search_user(self.session)
self.assertEqual(2, len(items))
@patch.dict("pagure.config.config", {"PAGURE_AUTH": "local"})
@patch.dict("pagure.config.config", {"CHECK_SESSION_IP": False})
def test_do_login(self):
""" Test the do_login endpoint. """
output = self.app.get("/login/")
self.assertEqual(output.status_code, 200)
self.assertIn(
"<title>Login - Pagure</title>", output.get_data(as_text=True)
)
self.assertIn(
'<form action="/dologin" method="post">',
output.get_data(as_text=True),
)
# This has all the data needed
data = {"username": "foouser", "password": "<PASSWORD>"}
# Submit this form - Doesn't work since there is no csrf token
output = self.app.post("/dologin", data=data, follow_redirects=True)
self.assertEqual(output.status_code, 200)
self.assertIn(
"<title>Login - Pagure</title>", output.get_data(as_text=True)
)
self.assertIn(
'<form action="/dologin" method="post">',
output.get_data(as_text=True),
)
self.assertIn(
"Insufficient information provided", output.get_data(as_text=True)
)
csrf_token = (
output.get_data(as_text=True)
.split('name="csrf_token" type="hidden" value="')[1]
.split('">')[0]
)
# Submit the form with the csrf token - but invalid user
data["csrf_token"] = csrf_token
output = self.app.post("/dologin", data=data, follow_redirects=True)
self.assertEqual(output.status_code, 200)
self.assertIn(
"<title>Login - Pagure</title>", output.get_data(as_text=True)
)
self.assertIn(
'<form action="/dologin" method="post">',
output.get_data(as_text=True),
)
self.assertIn(
"Username or password invalid.", output.get_data(as_text=True)
)
# Create a local user
self.test_new_user()
items = pagure.lib.query.search_user(self.session)
self.assertEqual(3, len(items))
# Submit the form with the csrf token - but user not confirmed
data["csrf_token"] = csrf_token
output = self.app.post("/dologin", data=data, follow_redirects=True)
self.assertEqual(output.status_code, 200)
self.assertIn(
"<title>Login - Pagure</title>", output.get_data(as_text=True)
)
self.assertIn(
'<form action="/dologin" method="post">',
output.get_data(as_text=True),
)
self.assertIn(
"Invalid user, did you confirm the creation with the url "
"provided by email?",
output.get_data(as_text=True),
)
# User in the DB, csrf provided - but wrong password submitted
data["password"] = "password"
output = self.app.post("/dologin", data=data, follow_redirects=True)
self.assertEqual(output.status_code, 200)
self.assertIn(
"<title>Login - Pagure</title>", output.get_data(as_text=True)
)
self.assertIn(
'<form action="/dologin" method="post">',
output.get_data(as_text=True),
)
self.assertIn(
"Username or password invalid.", output.get_data(as_text=True)
)
# When account is not confirmed i.e user_obj != None
data["password"] = "<PASSWORD>"
output = self.app.post("/dologin", data=data, follow_redirects=True)
self.assertEqual(output.status_code, 200)
self.assertIn(
"<title>Login - Pagure</title>", output.get_data(as_text=True)
)
self.assertIn(
'<form action="/dologin" method="post">',
output.get_data(as_text=True),
)
self.assertIn(
"Invalid user, did you confirm the creation with the url "
"provided by email?",
output.get_data(as_text=True),
)
# Confirm the user so that we can log in
self.session.commit()
item = pagure.lib.query.search_user(self.session, username="foouser")
self.assertEqual(item.user, "foouser")
self.assertNotEqual(item.token, None)
# Remove the token
item.token = None
self.session.add(item)
self.session.commit()
# Check the user
item = pagure.lib.query.search_user(self.session, username="foouser")
self.assertEqual(item.user, "foouser")
self.assertEqual(item.token, None)
# Login but cannot save the session to the DB due to the missing IP
# address in the flask request
data["password"] = "<PASSWORD>"
output = self.app.post("/dologin", data=data, follow_redirects=True)
self.assertEqual(output.status_code, 200)
output_text = output.get_data(as_text=True)
self.assertIn("<title>Home - Pagure</title>", output_text)
# I'm not sure if the change was in flask or werkzeug, but in older
# version flask.request.remote_addr was returning None, while it
# now returns 127.0.0.1 making our logic pass where it used to
# partly fail
if hasattr(flask, "__version__"):
flask_v = tuple(int(el) for el in flask.__version__.split("."))
if flask_v < (0, 12, 0):
self.assertIn(
'<a class="btn btn-primary" '
'href="/login/?next=http://localhost/">',
output_text,
)
self.assertIn(
"Could not set the session in the db, please report "
"this error to an admin",
output_text,
)
else:
self.assertIn(
'<a class="dropdown-item" '
'href="/logout/?next=http://localhost/dashboard/projects">',
output_text,
)
# Make the password invalid
self.session.commit()
item = pagure.lib.query.search_user(self.session, username="foouser")
self.assertEqual(item.user, "foouser")
self.assertTrue(item.password.startswith("$2$"))
# Remove the $2$
item.password = item.password[3:]
self.session.add(item)
self.session.commit()
# Check the password
self.session.commit()
item = pagure.lib.query.search_user(self.session, username="foouser")
self.assertEqual(item.user, "foouser")
self.assertFalse(item.password.startswith("$2$"))
# Try login again
output = self.app.post(
"/dologin",
data=data,
follow_redirects=True,
environ_base={"REMOTE_ADDR": "127.0.0.1"},
)
self.assertEqual(output.status_code, 200)
self.assertIn(
"<title>Login - Pagure</title>", output.get_data(as_text=True)
)
self.assertIn(
'<form action="/dologin" method="post">',
output.get_data(as_text=True),
)
self.assertIn(
"Username or password invalid.",
output.get_data(as_text=True),
)
# Check the password is still not of a known version
self.session.commit()
item = pagure.lib.query.search_user(self.session, username="foouser")
self.assertEqual(item.user, "foouser")
self.assertFalse(item.password.startswith("$1$"))
self.assertFalse(item.password.startswith("$2$"))
# V1 password
password = "%s%s" % ("bar<PASSWORD>", None)
if isinstance(password, six.text_type):
password = password.encode("utf-8")
password = hashlib.sha512(password).hexdigest().encode("utf-8")
item.token = None
item.password = b"$1$" + password
self.session.add(item)
self.session.commit()
# Check the password
self.session.commit()
item = pagure.lib.query.search_user(self.session, username="foouser")
self.assertEqual(item.user, "foouser")
self.assertTrue(item.password.startswith(b"$1$"))
# Log in with a v1 password
output = self.app.post(
"/dologin",
data=data,
follow_redirects=True,
environ_base={"REMOTE_ADDR": "127.0.0.1"},
)
self.assertEqual(output.status_code, 200)
output_text = output.get_data(as_text=True)
self.assertIn("<title>Home - Pagure</title>", output_text)
self.assertIn("Welcome foouser", output_text)
self.assertIn("Activity", output_text)
# Check the password got upgraded to version 2
self.session.commit()
item = pagure.lib.query.search_user(self.session, username="foouser")
self.assertEqual(item.user, "foouser")
self.assertTrue(item.password.startswith("$2$"))
# We have set the REMOTE_ADDR in the request, so this works with all
# versions of Flask.
self.assertIn(
'<a class="dropdown-item" '
'href="/logout/?next=http://localhost/dashboard/projects">',
output_text,
)
@patch.dict("pagure.config.config", {"PAGURE_AUTH": "local"})
@patch.dict("pagure.config.config", {"CHECK_SESSION_IP": False})
def test_do_login_and_redirect(self):
""" Test the do_login endpoint with a non-default redirect. """
# This has all the data needed
data = {
"username": "foouser",
"password": "<PASSWORD>",
"csrf_token": self.get_csrf(url="/login/"),
"next_url": "http://localhost/test/",
}
# Create a local user
self.test_new_user()
self.session.commit()
# Confirm the user so that we can log in
item = pagure.lib.query.search_user(self.session, username="foouser")
self.assertEqual(item.user, "foouser")
self.assertNotEqual(item.token, None)
# Remove the token
item.token = None
self.session.add(item)
self.session.commit()
# Check the user
item = pagure.lib.query.search_user(self.session, username="foouser")
self.assertEqual(item.user, "foouser")
self.assertEqual(item.token, None)
# Add a test project to the user
tests.create_projects(self.session, user_id=3)
tests.create_projects_git(os.path.join(self.path, "repos"))
output = self.app.get("/test")
output_text = output.get_data(as_text=True)
self.assertEqual(output.status_code, 200)
self.assertIn("<title>Overview - test - Pagure</title>", output_text)
# Login and redirect to the test project
output = self.app.post(
"/dologin",
data=data,
follow_redirects=True,
environ_base={"REMOTE_ADDR": "127.0.0.1"},
)
self.assertEqual(output.status_code, 200)
output_text = output.get_data(as_text=True)
self.assertIn("<title>Overview - test - Pagure</title>", output_text)
self.assertIn(
'<a class="dropdown-item" '
'href="/logout/?next=http://localhost/test/">',
output_text,
)
self.assertIn(
'<span class="d-none d-md-inline">Settings</span>', output_text
)
@patch.dict("pagure.config.config", {"PAGURE_AUTH": "local"})
@patch.dict("pagure.config.config", {"CHECK_SESSION_IP": False})
def test_has_settings(self):
"""Test that user can see the Settings button when they are logged
in."""
# Create a local user
self.test_new_user()
self.session.commit()
# Remove the token
item = pagure.lib.query.search_user(self.session, username="foouser")
item.token = None
self.session.add(item)
self.session.commit()
# Check the user
item = pagure.lib.query.search_user(self.session, username="foouser")
self.assertEqual(item.user, "foouser")
self.assertEqual(item.token, None)
# Add a test project to the user
tests.create_projects(self.session)
tests.create_projects_git(os.path.join(self.path, "repos"))
output = self.app.get("/test")
output_text = output.get_data(as_text=True)
self.assertEqual(output.status_code, 200)
self.assertIn("<title>Overview - test - Pagure</title>", output_text)
# Login and redirect to the test project
user = tests.FakeUser(username="pingou")
with tests.user_set(self.app.application, user):
output = self.app.get("/test")
self.assertEqual(output.status_code, 200)
output_text = output.get_data(as_text=True)
self.assertIn(
"<title>Overview - test - Pagure</title>", output_text
)
self.assertIn(
'<span class="d-none d-md-inline">Settings</span>', output_text
)
@patch.dict("pagure.config.config", {"PAGURE_AUTH": "local"})
@patch("pagure.lib.notify.send_email", MagicMock(return_value=True))
def test_non_ascii_password(self):
"""Test login and create user functionality when the password is
non-ascii.
"""
# Check before:
items = pagure.lib.query.search_user(self.session)
self.assertEqual(2, len(items))
# First access the new user page
output = self.app.get("/user/new")
self.assertEqual(output.status_code, 200)
output_text = output.get_data(as_text=True)
self.assertIn("<title>New user - Pagure</title>", output_text)
self.assertIn('<form action="/user/new" method="post">', output_text)
# Create the form to send there
# This has all the data needed
data = {
"user": "foo",
"fullname": "user foo",
"email_address": "<EMAIL>",
"password": "ö",
"confirm_password": "ö",
}
# Submit this form - Doesn't work since there is no csrf token
output = self.app.post("/user/new", data=data)
self.assertEqual(output.status_code, 200)
output_text = output.get_data(as_text=True)
self.assertIn("<title>New user - Pagure</title>", output_text)
self.assertIn('<form action="/user/new" method="post">', output_text)
csrf_token = output_text.split(
'name="csrf_token" type="hidden" value="'
)[1].split('">')[0]
# Submit the form with the csrf token
data["csrf_token"] = csrf_token
output = self.app.post("/user/new", data=data, follow_redirects=True)
self.assertEqual(output.status_code, 200)
output_text = output.get_data(as_text=True)
self.assertIn("<title>New user - Pagure</title>", output_text)
self.assertIn('<form action="/user/new" method="post">', output_text)
self.assertIn("Username already taken.", output_text)
# Submit the form with another username
data["user"] = "foobar"
output = self.app.post("/user/new", data=data, follow_redirects=True)
self.assertEqual(output.status_code, 200)
output_text = output.get_data(as_text=True)
self.assertIn("<title>New user - Pagure</title>", output_text)
self.assertIn("Email address already taken.", output_text)
# Submit the form with proper data
data["email_address"] = "<EMAIL>"
output = self.app.post("/user/new", data=data, follow_redirects=True)
self.assertEqual(output.status_code, 200)
output_text = output.get_data(as_text=True)
self.assertIn("<title>Login - Pagure</title>", output_text)
self.assertIn(
"User created, please check your email to activate the account",
output_text,
)
# Check after:
items = pagure.lib.query.search_user(self.session)
self.assertEqual(3, len(items))
# Checking for the /login page
output = self.app.get("/login/")
self.assertEqual(output.status_code, 200)
output_text = output.get_data(as_text=True)
self.assertIn("<title>Login - Pagure</title>", output_text)
self.assertIn('<form action="/dologin" method="post">', output_text)
# This has all the data needed
data = {"username": "foob_bar", "password": "ö"}
# Submit this form - Doesn't work since there is no csrf token
output = self.app.post("/dologin", data=data, follow_redirects=True)
self.assertEqual(output.status_code, 200)
output_text = output.get_data(as_text=True)
self.assertIn("<title>Login - Pagure</title>", output_text)
self.assertIn('<form action="/dologin" method="post">', output_text)
self.assertIn("Insufficient information provided", output_text)
# Submit the form with the csrf token - but invalid user
data["csrf_token"] = csrf_token
output = self.app.post("/dologin", data=data, follow_redirects=True)
self.assertEqual(output.status_code, 200)
output_text = output.get_data(as_text=True)
self.assertIn("<title>Login - Pagure</title>", output_text)
self.assertIn('<form action="/dologin" method="post">', output_text)
self.assertIn("Username or password invalid.", output_text)
# Submit the form with the csrf token - but user not confirmed
data["username"] = "foobar"
output = self.app.post("/dologin", data=data, follow_redirects=True)
self.assertEqual(output.status_code, 200)
output_text = output.get_data(as_text=True)
self.assertIn("<title>Login - Pagure</title>", output_text)
self.assertIn('<form action="/dologin" method="post">', output_text)
self.assertIn(
"Invalid user, did you confirm the creation with the url "
"provided by email?",
output_text,
)
# User in the DB, csrf provided - but wrong password submitted
data["password"] = "öö"
output = self.app.post("/dologin", data=data, follow_redirects=True)
self.assertEqual(output.status_code, 200)
output_text = output.get_data(as_text=True)
self.assertIn("<title>Login - Pagure</title>", output_text)
self.assertIn('<form action="/dologin" method="post">', output_text)
self.assertIn("Username or password invalid.", output_text)
# When account is not confirmed i.e user_obj != None
data["password"] = "ö"
output = self.app.post("/dologin", data=data, follow_redirects=True)
self.assertEqual(output.status_code, 200)
output_text = output.get_data(as_text=True)
self.assertIn("<title>Login - Pagure</title>", output_text)
self.assertIn('<form action="/dologin" method="post">', output_text)
self.assertIn(
"Invalid user, did you confirm the creation with the url "
"provided by email?",
output_text,
)
# Confirm the user so that we can log in
item = pagure.lib.query.search_user(self.session, username="foobar")
self.assertEqual(item.user, "foobar")
self.assertNotEqual(item.token, None)
# Remove the token
item.token = None
self.session.add(item)
self.session.commit()
# Login but cannot save the session to the DB due to the missing IP
# address in the flask request
data["password"] = "ö"
output = self.app.post("/dologin", data=data, follow_redirects=True)
self.assertEqual(output.status_code, 200)
output_text = output.get_data(as_text=True)
self.assertIn("<title>Home - Pagure</title>", output_text)
# I'm not sure if the change was in flask or werkzeug, but in older
# version flask.request.remote_addr was returning None, while it
# now returns 127.0.0.1 making our logic pass where it used to
# partly fail
if hasattr(flask, "__version__"):
flask_v = tuple(int(el) for el in flask.__version__.split("."))
if flask_v <= (0, 12, 0):
self.assertIn(
'<a class="btn btn-primary" '
'href="/login/?next=http://localhost/">',
output_text,
)
self.assertIn(
"Could not set the session in the db, please report "
"this error to an admin",
output_text,
)
else:
self.assertIn(
'<a class="dropdown-item" '
'href="/logout/?next=http://localhost/dashboard/projects">',
output_text,
)
# Check the user
item = pagure.lib.query.search_user(self.session, username="foobar")
self.assertEqual(item.user, "foobar")
self.assertEqual(item.token, None)
def test_confirm_user(self):
""" Test the confirm_user endpoint. """
output = self.app.get("/confirm/foo", follow_redirects=True)
self.assertEqual(output.status_code, 200)
self.assertIn(
"<title>Home - Pagure</title>", output.get_data(as_text=True)
)
self.assertIn(
"No user associated with this token.",
output.get_data(as_text=True),
)
# Create a local user
self.test_new_user()
items = pagure.lib.query.search_user(self.session)
self.assertEqual(3, len(items))
item = pagure.lib.query.search_user(self.session, username="foouser")
self.assertEqual(item.user, "foouser")
self.assertTrue(item.password.startswith("$2$"))
self.assertNotEqual(item.token, None)
output = self.app.get(
"/confirm/%s" % item.token, follow_redirects=True
)
self.assertEqual(output.status_code, 200)
self.assertIn(
"<title>Login - Pagure</title>", output.get_data(as_text=True)
)
self.assertIn(
"Email confirmed, account activated", output.get_data(as_text=True)
)
@patch.dict("pagure.config.config", {"PAGURE_AUTH": "local"})
@patch("pagure.lib.notify.send_email", MagicMock(return_value=True))
def test_lost_password(self):
""" Test the lost_password endpoint. """
output = self.app.get("/password/lost")
self.assertEqual(output.status_code, 200)
self.assertIn(
"<title>Lost password - Pagure</title>",
output.get_data(as_text=True),
)
self.assertIn(
'<form action="/password/lost" method="post">',
output.get_data(as_text=True),
)
# Prepare the data to send
data = {"username": "foouser"}
# Missing CSRF
output = self.app.post("/password/lost", data=data)
self.assertEqual(output.status_code, 200)
self.assertIn(
"<title>Lost password - Pagure</title>",
output.get_data(as_text=True),
)
self.assertIn(
'<form action="/password/lost" method="post">',
output.get_data(as_text=True),
)
csrf_token = (
output.get_data(as_text=True)
.split('name="csrf_token" type="hidden" value="')[1]
.split('">')[0]
)
# With the CSRF - But invalid user
data["csrf_token"] = csrf_token
output = self.app.post(
"/password/lost", data=data, follow_redirects=True
)
self.assertEqual(output.status_code, 200)
self.assertIn(
"<title>Login - Pagure</title>", output.get_data(as_text=True)
)
self.assertIn("Username invalid.", output.get_data(as_text=True))
# With the CSRF and a valid user
data["username"] = "foo"
output = self.app.post(
"/password/lost", data=data, follow_redirects=True
)
self.assertEqual(output.status_code, 200)
self.assertIn(
"<title>Login - Pagure</title>", output.get_data(as_text=True)
)
self.assertIn(
"Check your email to finish changing your password",
output.get_data(as_text=True),
)
# With the CSRF and a valid user - but too quick after the last one
data["username"] = "foo"
output = self.app.post(
"/password/lost", data=data, follow_redirects=True
)
self.assertEqual(output.status_code, 200)
self.assertIn(
"<title>Login - Pagure</title>", output.get_data(as_text=True)
)
self.assertIn(
"An email was sent to you less than 3 minutes ago, did you "
"check your spam folder? Otherwise, try again after some time.",
output.get_data(as_text=True),
)
@patch.dict("pagure.config.config", {"PAGURE_AUTH": "local"})
@patch("pagure.lib.notify.send_email", MagicMock(return_value=True))
def test_reset_password(self):
""" Test the reset_password endpoint. """
output = self.app.get("/password/reset/foo", follow_redirects=True)
self.assertEqual(output.status_code, 200)
self.assertIn(
"<title>Login - Pagure</title>", output.get_data(as_text=True)
)
self.assertIn(
"No user associated with this token.",
output.get_data(as_text=True),
)
self.assertIn(
'<form action="/dologin" method="post">',
output.get_data(as_text=True),
)
self.test_lost_password()
self.test_new_user()
# Check the password
item = pagure.lib.query.search_user(self.session, username="foouser")
self.assertEqual(item.user, "foouser")
self.assertNotEqual(item.token, None)
self.assertTrue(item.password.startswith("$2$"))
old_password = <PASSWORD>
token = item.token
output = self.app.get(
"/password/reset/%s" % token, follow_redirects=True
)
self.assertEqual(output.status_code, 200)
self.assertIn(
"<title>Change password - Pagure</title>",
output.get_data(as_text=True),
)
self.assertIn(
'<form action="/password/reset/', output.get_data(as_text=True)
)
data = {"password": "<PASSWORD>", "confirm_password": "<PASSWORD>"}
# Missing CSRF
output = self.app.post(
"/password/reset/%s" % token, data=data, follow_redirects=True
)
self.assertEqual(output.status_code, 200)
self.assertIn(
"<title>Change password - Pagure</title>",
output.get_data(as_text=True),
)
self.assertIn(
'<form action="/password/reset/', output.get_data(as_text=True)
)
csrf_token = (
output.get_data(as_text=True)
.split('name="csrf_token" type="hidden" value="')[1]
.split('">')[0]
)
# With CSRF
data["csrf_token"] = csrf_token
output = self.app.post(
"/password/reset/%s" % token, data=data, follow_redirects=True
)
self.assertEqual(output.status_code, 200)
self.assertIn(
"<title>Login - Pagure</title>", output.get_data(as_text=True)
)
self.assertIn("Password changed", output.get_data(as_text=True))
@patch(
"pagure.ui.login._check_session_cookie", MagicMock(return_value=True)
)
@patch.dict("pagure.config.config", {"PAGURE_AUTH": "local"})
def test_change_password(self):
""" Test the change_password endpoint. """
# Not logged in, redirects
output = self.app.get("/password/change", follow_redirects=True)
self.assertEqual(output.status_code, 200)
self.assertIn(
"<title>Login - Pagure</title>", output.get_data(as_text=True)
)
self.assertIn(
'<form action="/dologin" method="post">',
output.get_data(as_text=True),
)
user = tests.FakeUser()
with tests.user_set(self.app.application, user):
output = self.app.get("/password/change")
self.assertEqual(output.status_code, 404)
self.assertIn("User not found", output.get_data(as_text=True))
user = tests.FakeUser(username="foo")
with tests.user_set(self.app.application, user):
output = self.app.get("/password/change")
self.assertEqual(output.status_code, 200)
self.assertIn(
"<title>Change password - Pagure</title>",
output.get_data(as_text=True),
)
self.assertIn(
'<form action="/password/change" method="post">',
output.get_data(as_text=True),
)
data = {
"old_password": "<PASSWORD>",
"password": "<PASSWORD>",
"confirm_password": "<PASSWORD>",
}
# No CSRF token
output = self.app.post("/password/change", data=data)
self.assertEqual(output.status_code, 200)
self.assertIn(
"<title>Change password - Pagure</title>",
output.get_data(as_text=True),
)
self.assertIn(
'<form action="/password/change" method="post">',
output.get_data(as_text=True),
)
csrf_token = (
output.get_data(as_text=True)
.split('name="csrf_token" type="hidden" value="')[1]
.split('">')[0]
)
# With CSRF - Invalid password format
data["csrf_token"] = csrf_token
output = self.app.post(
"/password/change", data=data, follow_redirects=True
)
self.assertEqual(output.status_code, 200)
self.assertIn(
"<title>Home - Pagure</title>", output.get_data(as_text=True)
)
self.assertIn(
"Could not update your password, either user or password "
"could not be checked",
output.get_data(as_text=True),
)
self.test_new_user()
# Remove token of foouser
item = pagure.lib.query.search_user(self.session, username="foouser")
self.assertEqual(item.user, "foouser")
self.assertNotEqual(item.token, None)
self.assertTrue(item.password.startswith("$2$"))
item.token = None
self.session.add(item)
self.session.commit()
user = tests.FakeUser(username="foouser")
with tests.user_set(self.app.application, user):
output = self.app.get("/password/change")
self.assertEqual(output.status_code, 200)
self.assertIn(
"<title>Change password - Pagure</title>",
output.get_data(as_text=True),
)
self.assertIn(
'<form action="/password/change" method="post">',
output.get_data(as_text=True),
)
data = {
"old_password": "<PASSWORD>",
"password": "<PASSWORD>",
"confirm_password": "<PASSWORD>",
}
# No CSRF token
output = self.app.post("/password/change", data=data)
self.assertEqual(output.status_code, 200)
self.assertIn(
"<title>Change password - Pagure</title>",
output.get_data(as_text=True),
)
self.assertIn(
'<form action="/password/change" method="post">',
output.get_data(as_text=True),
)
csrf_token = (
output.get_data(as_text=True)
.split('name="csrf_token" type="hidden" value="')[1]
.split('">')[0]
)
# With CSRF - Incorrect password
data["csrf_token"] = csrf_token
output = self.app.post(
"/password/change", data=data, follow_redirects=True
)
self.assertEqual(output.status_code, 200)
self.assertIn(
"<title>Home - Pagure</title>", output.get_data(as_text=True)
)
self.assertIn(
"Could not update your password, either user or password "
"could not be checked",
output.get_data(as_text=True),
)
# With CSRF - Correct password
data["old_password"] = "<PASSWORD>"
output = self.app.post(
"/password/change", data=data, follow_redirects=True
)
self.assertEqual(output.status_code, 200)
self.assertIn(
"<title>Home - Pagure</title>", output.get_data(as_text=True)
)
self.assertIn("Password changed", output.get_data(as_text=True))
@patch.dict("pagure.config.config", {"PAGURE_AUTH": "local"})
def test_logout(self):
""" Test the auth_logout endpoint for local login. """
output = self.app.get("/logout/", follow_redirects=True)
self.assertEqual(output.status_code, 200)
self.assertIn(
"<title>Home - Pagure</title>", output.get_data(as_text=True)
)
self.assertNotIn(
"You have been logged out", output.get_data(as_text=True)
)
self.assertIn(
'<a class="btn btn-primary" '
'href="/login/?next=http://localhost/">',
output.get_data(as_text=True),
)
user = tests.FakeUser(username="foo")
with tests.user_set(self.app.application, user):
output = self.app.get("/logout/", follow_redirects=True)
self.assertEqual(output.status_code, 200)
self.assertIn(
"<title>Home - Pagure</title>", output.get_data(as_text=True)
)
self.assertIn(
"You have been logged out", output.get_data(as_text=True)
)
# Due to the way the tests are running we do not actually
# log out
self.assertIn(
'<a class="dropdown-item" href="/logout/?next='
'http://localhost/dashboard/projects">Log Out</a>',
output.get_data(as_text=True),
)
@patch.dict("pagure.config.config", {"PAGURE_AUTH": "local"})
def test_settings_admin_session_timedout(self):
""" Test the admin_session_timedout with settings endpoint. """
lifetime = pagure.config.config.get(
"ADMIN_SESSION_LIFETIME", datetime.timedelta(minutes=15)
)
td1 = datetime.timedelta(minutes=1)
# session already expired
user = tests.FakeUser(username="foo")
user.login_time = datetime.datetime.utcnow() - lifetime - td1
with tests.user_set(self.app.application, user):
# not following the redirect because user_set contextmanager
# will run again for the login page and set back the user
# which results in a loop, since admin_session_timedout will
# redirect again for the login page
output = self.app.get("/settings/")
self.assertEqual(output.status_code, 302)
self.assertIn("http://localhost/login/", output.location)
# session did not expire
user.login_time = datetime.datetime.utcnow() - lifetime + td1
with tests.user_set(self.app.application, user):
output = self.app.get("/settings/")
self.assertEqual(output.status_code, 200)
@patch("flask.flash")
def test_admin_session_timedout(self, flash):
""" Test the call to admin_session_timedout. """
lifetime = pagure.config.config.get(
"ADMIN_SESSION_LIFETIME", datetime.timedelta(minutes=15)
)
td1 = datetime.timedelta(minutes=1)
# session already expired
user = tests.FakeUser(username="foo")
user.login_time = datetime.datetime.utcnow() - lifetime - td1
with self.app.application.app_context() as ctx:
ctx.g.session = self.session
ctx.g.fas_user = user
self.assertTrue(pagure.flask_app.admin_session_timedout())
# session did not expire
user.login_time = datetime.datetime.utcnow() - lifetime + td1
with self.app.application.app_context() as ctx:
ctx.g.session = self.session
ctx.g.fas_user = user
self.assertFalse(pagure.flask_app.admin_session_timedout())
@patch.dict("pagure.config.config", {"PAGURE_AUTH": "local"})
def test_force_logout(self):
""" Test forcing logout. """
user = tests.FakeUser(username="foo")
with tests.user_set(self.app.application, user, keep_get_user=True):
# Test that accessing settings works
output = self.app.get("/settings")
self.assertEqual(output.status_code, 200)
# Now logout everywhere
data = {"csrf_token": self.get_csrf()}
output = self.app.post("/settings/forcelogout/", data=data)
self.assertEqual(output.status_code, 302)
self.assertEqual(
output.headers["Location"], "http://localhost/settings"
)
# We should now get redirected to index, because our session became
# invalid
output = self.app.get("/settings")
self.assertEqual(output.headers["Location"], "http://localhost/")
# After changing the login_time to now, the session should again be
# valid
user.login_time = datetime.datetime.utcnow()
output = self.app.get("/")
self.assertEqual(output.status_code, 302)
if __name__ == "__main__":
unittest.main(verbosity=2)
| # -*- coding: utf-8 -*-
"""
(c) 2016 - Copyright Red Hat Inc
Authors:
<NAME> <<EMAIL>>
<NAME> <<EMAIL>>
"""
from __future__ import unicode_literals, absolute_import
import datetime
import hashlib
import json
import unittest
import shutil
import sys
import tempfile
import os
import flask
import pygit2
import six
from mock import patch, MagicMock
sys.path.insert(
0, os.path.join(os.path.dirname(os.path.abspath(__file__)), "..")
)
import pagure.lib.query
import tests
from pagure.lib.repo import PagureRepo
import pagure.ui.login
class PagureFlaskLogintests(tests.SimplePagureTest):
""" Tests for flask app controller of pagure """
def setUp(self):
""" Create the application with PAGURE_AUTH being local. """
super(PagureFlaskLogintests, self).setUp()
app = pagure.flask_app.create_app(
{"DB_URL": self.dbpath, "PAGURE_AUTH": "local"}
)
# Remove the log handlers for the tests
app.logger.handlers = []
self.app = app.test_client()
@patch.dict("pagure.config.config", {"PAGURE_AUTH": "local"})
def test_front_page(self):
""" Test the front page. """
# First access the front page
output = self.app.get("/")
self.assertEqual(output.status_code, 200)
self.assertIn(
"<title>Home - Pagure</title>", output.get_data(as_text=True)
)
@patch.dict("pagure.config.config", {"PAGURE_AUTH": "local"})
@patch("pagure.lib.notify.send_email", MagicMock(return_value=True))
def test_new_user(self):
""" Test the new_user endpoint. """
# Check before:
items = pagure.lib.query.search_user(self.session)
self.assertEqual(2, len(items))
# First access the new user page
output = self.app.get("/user/new")
self.assertEqual(output.status_code, 200)
self.assertIn(
"<title>New user - Pagure</title>", output.get_data(as_text=True)
)
self.assertIn(
'<form action="/user/new" method="post">',
output.get_data(as_text=True),
)
# Create the form to send there
# This has all the data needed
data = {
"user": "foo",
"fullname": "user foo",
"email_address": "<EMAIL>",
"password": "<PASSWORD>",
"confirm_password": "<PASSWORD>",
}
# Submit this form - Doesn't work since there is no csrf token
output = self.app.post("/user/new", data=data)
self.assertEqual(output.status_code, 200)
self.assertIn(
"<title>New user - Pagure</title>", output.get_data(as_text=True)
)
self.assertIn(
'<form action="/user/new" method="post">',
output.get_data(as_text=True),
)
csrf_token = (
output.get_data(as_text=True)
.split('name="csrf_token" type="hidden" value="')[1]
.split('">')[0]
)
# Submit the form with the csrf token
data["csrf_token"] = csrf_token
output = self.app.post("/user/new", data=data, follow_redirects=True)
self.assertEqual(output.status_code, 200)
self.assertIn(
"<title>New user - Pagure</title>", output.get_data(as_text=True)
)
self.assertIn(
'<form action="/user/new" method="post">',
output.get_data(as_text=True),
)
self.assertIn("Username already taken.", output.get_data(as_text=True))
# Submit the form with another username
data["user"] = "foouser"
output = self.app.post("/user/new", data=data, follow_redirects=True)
self.assertEqual(output.status_code, 200)
self.assertIn(
"<title>New user - Pagure</title>", output.get_data(as_text=True)
)
self.assertIn(
"Email address already taken.", output.get_data(as_text=True)
)
# Submit the form with proper data
data["email_address"] = "<EMAIL>"
output = self.app.post("/user/new", data=data, follow_redirects=True)
self.assertEqual(output.status_code, 200)
self.assertIn(
"<title>Login - Pagure</title>", output.get_data(as_text=True)
)
self.assertIn(
"User created, please check your email to activate the account",
output.get_data(as_text=True),
)
# Check after:
items = pagure.lib.query.search_user(self.session)
self.assertEqual(3, len(items))
@patch.dict("pagure.config.config", {"PAGURE_AUTH": "local"})
@patch.dict("pagure.config.config", {"ALLOW_USER_REGISTRATION": False})
@patch("pagure.lib.notify.send_email", MagicMock(return_value=True))
def test_new_user_disabled(self):
""" Test the disabling of the new_user endpoint. """
# Check before:
items = pagure.lib.query.search_user(self.session)
self.assertEqual(2, len(items))
# Attempt to access the new user page
output = self.app.get("/user/new", follow_redirects=True)
self.assertEqual(output.status_code, 200)
self.assertIn(
"<title>Login - Pagure</title>", output.get_data(as_text=True)
)
self.assertIn(
"User registration is disabled.", output.get_data(as_text=True)
)
# Check after:
items = pagure.lib.query.search_user(self.session)
self.assertEqual(2, len(items))
@patch.dict("pagure.config.config", {"PAGURE_AUTH": "local"})
@patch.dict("pagure.config.config", {"CHECK_SESSION_IP": False})
def test_do_login(self):
""" Test the do_login endpoint. """
output = self.app.get("/login/")
self.assertEqual(output.status_code, 200)
self.assertIn(
"<title>Login - Pagure</title>", output.get_data(as_text=True)
)
self.assertIn(
'<form action="/dologin" method="post">',
output.get_data(as_text=True),
)
# This has all the data needed
data = {"username": "foouser", "password": "<PASSWORD>"}
# Submit this form - Doesn't work since there is no csrf token
output = self.app.post("/dologin", data=data, follow_redirects=True)
self.assertEqual(output.status_code, 200)
self.assertIn(
"<title>Login - Pagure</title>", output.get_data(as_text=True)
)
self.assertIn(
'<form action="/dologin" method="post">',
output.get_data(as_text=True),
)
self.assertIn(
"Insufficient information provided", output.get_data(as_text=True)
)
csrf_token = (
output.get_data(as_text=True)
.split('name="csrf_token" type="hidden" value="')[1]
.split('">')[0]
)
# Submit the form with the csrf token - but invalid user
data["csrf_token"] = csrf_token
output = self.app.post("/dologin", data=data, follow_redirects=True)
self.assertEqual(output.status_code, 200)
self.assertIn(
"<title>Login - Pagure</title>", output.get_data(as_text=True)
)
self.assertIn(
'<form action="/dologin" method="post">',
output.get_data(as_text=True),
)
self.assertIn(
"Username or password invalid.", output.get_data(as_text=True)
)
# Create a local user
self.test_new_user()
items = pagure.lib.query.search_user(self.session)
self.assertEqual(3, len(items))
# Submit the form with the csrf token - but user not confirmed
data["csrf_token"] = csrf_token
output = self.app.post("/dologin", data=data, follow_redirects=True)
self.assertEqual(output.status_code, 200)
self.assertIn(
"<title>Login - Pagure</title>", output.get_data(as_text=True)
)
self.assertIn(
'<form action="/dologin" method="post">',
output.get_data(as_text=True),
)
self.assertIn(
"Invalid user, did you confirm the creation with the url "
"provided by email?",
output.get_data(as_text=True),
)
# User in the DB, csrf provided - but wrong password submitted
data["password"] = "password"
output = self.app.post("/dologin", data=data, follow_redirects=True)
self.assertEqual(output.status_code, 200)
self.assertIn(
"<title>Login - Pagure</title>", output.get_data(as_text=True)
)
self.assertIn(
'<form action="/dologin" method="post">',
output.get_data(as_text=True),
)
self.assertIn(
"Username or password invalid.", output.get_data(as_text=True)
)
# When account is not confirmed i.e user_obj != None
data["password"] = "<PASSWORD>"
output = self.app.post("/dologin", data=data, follow_redirects=True)
self.assertEqual(output.status_code, 200)
self.assertIn(
"<title>Login - Pagure</title>", output.get_data(as_text=True)
)
self.assertIn(
'<form action="/dologin" method="post">',
output.get_data(as_text=True),
)
self.assertIn(
"Invalid user, did you confirm the creation with the url "
"provided by email?",
output.get_data(as_text=True),
)
# Confirm the user so that we can log in
self.session.commit()
item = pagure.lib.query.search_user(self.session, username="foouser")
self.assertEqual(item.user, "foouser")
self.assertNotEqual(item.token, None)
# Remove the token
item.token = None
self.session.add(item)
self.session.commit()
# Check the user
item = pagure.lib.query.search_user(self.session, username="foouser")
self.assertEqual(item.user, "foouser")
self.assertEqual(item.token, None)
# Login but cannot save the session to the DB due to the missing IP
# address in the flask request
data["password"] = "<PASSWORD>"
output = self.app.post("/dologin", data=data, follow_redirects=True)
self.assertEqual(output.status_code, 200)
output_text = output.get_data(as_text=True)
self.assertIn("<title>Home - Pagure</title>", output_text)
# I'm not sure if the change was in flask or werkzeug, but in older
# version flask.request.remote_addr was returning None, while it
# now returns 127.0.0.1 making our logic pass where it used to
# partly fail
if hasattr(flask, "__version__"):
flask_v = tuple(int(el) for el in flask.__version__.split("."))
if flask_v < (0, 12, 0):
self.assertIn(
'<a class="btn btn-primary" '
'href="/login/?next=http://localhost/">',
output_text,
)
self.assertIn(
"Could not set the session in the db, please report "
"this error to an admin",
output_text,
)
else:
self.assertIn(
'<a class="dropdown-item" '
'href="/logout/?next=http://localhost/dashboard/projects">',
output_text,
)
# Make the password invalid
self.session.commit()
item = pagure.lib.query.search_user(self.session, username="foouser")
self.assertEqual(item.user, "foouser")
self.assertTrue(item.password.startswith("$2$"))
# Remove the $2$
item.password = item.password[3:]
self.session.add(item)
self.session.commit()
# Check the password
self.session.commit()
item = pagure.lib.query.search_user(self.session, username="foouser")
self.assertEqual(item.user, "foouser")
self.assertFalse(item.password.startswith("$2$"))
# Try login again
output = self.app.post(
"/dologin",
data=data,
follow_redirects=True,
environ_base={"REMOTE_ADDR": "127.0.0.1"},
)
self.assertEqual(output.status_code, 200)
self.assertIn(
"<title>Login - Pagure</title>", output.get_data(as_text=True)
)
self.assertIn(
'<form action="/dologin" method="post">',
output.get_data(as_text=True),
)
self.assertIn(
"Username or password invalid.",
output.get_data(as_text=True),
)
# Check the password is still not of a known version
self.session.commit()
item = pagure.lib.query.search_user(self.session, username="foouser")
self.assertEqual(item.user, "foouser")
self.assertFalse(item.password.startswith("$1$"))
self.assertFalse(item.password.startswith("$2$"))
# V1 password
password = "%s%s" % ("bar<PASSWORD>", None)
if isinstance(password, six.text_type):
password = password.encode("utf-8")
password = hashlib.sha512(password).hexdigest().encode("utf-8")
item.token = None
item.password = b"$1$" + password
self.session.add(item)
self.session.commit()
# Check the password
self.session.commit()
item = pagure.lib.query.search_user(self.session, username="foouser")
self.assertEqual(item.user, "foouser")
self.assertTrue(item.password.startswith(b"$1$"))
# Log in with a v1 password
output = self.app.post(
"/dologin",
data=data,
follow_redirects=True,
environ_base={"REMOTE_ADDR": "127.0.0.1"},
)
self.assertEqual(output.status_code, 200)
output_text = output.get_data(as_text=True)
self.assertIn("<title>Home - Pagure</title>", output_text)
self.assertIn("Welcome foouser", output_text)
self.assertIn("Activity", output_text)
# Check the password got upgraded to version 2
self.session.commit()
item = pagure.lib.query.search_user(self.session, username="foouser")
self.assertEqual(item.user, "foouser")
self.assertTrue(item.password.startswith("$2$"))
# We have set the REMOTE_ADDR in the request, so this works with all
# versions of Flask.
self.assertIn(
'<a class="dropdown-item" '
'href="/logout/?next=http://localhost/dashboard/projects">',
output_text,
)
@patch.dict("pagure.config.config", {"PAGURE_AUTH": "local"})
@patch.dict("pagure.config.config", {"CHECK_SESSION_IP": False})
def test_do_login_and_redirect(self):
""" Test the do_login endpoint with a non-default redirect. """
# This has all the data needed
data = {
"username": "foouser",
"password": "<PASSWORD>",
"csrf_token": self.get_csrf(url="/login/"),
"next_url": "http://localhost/test/",
}
# Create a local user
self.test_new_user()
self.session.commit()
# Confirm the user so that we can log in
item = pagure.lib.query.search_user(self.session, username="foouser")
self.assertEqual(item.user, "foouser")
self.assertNotEqual(item.token, None)
# Remove the token
item.token = None
self.session.add(item)
self.session.commit()
# Check the user
item = pagure.lib.query.search_user(self.session, username="foouser")
self.assertEqual(item.user, "foouser")
self.assertEqual(item.token, None)
# Add a test project to the user
tests.create_projects(self.session, user_id=3)
tests.create_projects_git(os.path.join(self.path, "repos"))
output = self.app.get("/test")
output_text = output.get_data(as_text=True)
self.assertEqual(output.status_code, 200)
self.assertIn("<title>Overview - test - Pagure</title>", output_text)
# Login and redirect to the test project
output = self.app.post(
"/dologin",
data=data,
follow_redirects=True,
environ_base={"REMOTE_ADDR": "127.0.0.1"},
)
self.assertEqual(output.status_code, 200)
output_text = output.get_data(as_text=True)
self.assertIn("<title>Overview - test - Pagure</title>", output_text)
self.assertIn(
'<a class="dropdown-item" '
'href="/logout/?next=http://localhost/test/">',
output_text,
)
self.assertIn(
'<span class="d-none d-md-inline">Settings</span>', output_text
)
@patch.dict("pagure.config.config", {"PAGURE_AUTH": "local"})
@patch.dict("pagure.config.config", {"CHECK_SESSION_IP": False})
def test_has_settings(self):
"""Test that user can see the Settings button when they are logged
in."""
# Create a local user
self.test_new_user()
self.session.commit()
# Remove the token
item = pagure.lib.query.search_user(self.session, username="foouser")
item.token = None
self.session.add(item)
self.session.commit()
# Check the user
item = pagure.lib.query.search_user(self.session, username="foouser")
self.assertEqual(item.user, "foouser")
self.assertEqual(item.token, None)
# Add a test project to the user
tests.create_projects(self.session)
tests.create_projects_git(os.path.join(self.path, "repos"))
output = self.app.get("/test")
output_text = output.get_data(as_text=True)
self.assertEqual(output.status_code, 200)
self.assertIn("<title>Overview - test - Pagure</title>", output_text)
# Login and redirect to the test project
user = tests.FakeUser(username="pingou")
with tests.user_set(self.app.application, user):
output = self.app.get("/test")
self.assertEqual(output.status_code, 200)
output_text = output.get_data(as_text=True)
self.assertIn(
"<title>Overview - test - Pagure</title>", output_text
)
self.assertIn(
'<span class="d-none d-md-inline">Settings</span>', output_text
)
@patch.dict("pagure.config.config", {"PAGURE_AUTH": "local"})
@patch("pagure.lib.notify.send_email", MagicMock(return_value=True))
def test_non_ascii_password(self):
"""Test login and create user functionality when the password is
non-ascii.
"""
# Check before:
items = pagure.lib.query.search_user(self.session)
self.assertEqual(2, len(items))
# First access the new user page
output = self.app.get("/user/new")
self.assertEqual(output.status_code, 200)
output_text = output.get_data(as_text=True)
self.assertIn("<title>New user - Pagure</title>", output_text)
self.assertIn('<form action="/user/new" method="post">', output_text)
# Create the form to send there
# This has all the data needed
data = {
"user": "foo",
"fullname": "user foo",
"email_address": "<EMAIL>",
"password": "ö",
"confirm_password": "ö",
}
# Submit this form - Doesn't work since there is no csrf token
output = self.app.post("/user/new", data=data)
self.assertEqual(output.status_code, 200)
output_text = output.get_data(as_text=True)
self.assertIn("<title>New user - Pagure</title>", output_text)
self.assertIn('<form action="/user/new" method="post">', output_text)
csrf_token = output_text.split(
'name="csrf_token" type="hidden" value="'
)[1].split('">')[0]
# Submit the form with the csrf token
data["csrf_token"] = csrf_token
output = self.app.post("/user/new", data=data, follow_redirects=True)
self.assertEqual(output.status_code, 200)
output_text = output.get_data(as_text=True)
self.assertIn("<title>New user - Pagure</title>", output_text)
self.assertIn('<form action="/user/new" method="post">', output_text)
self.assertIn("Username already taken.", output_text)
# Submit the form with another username
data["user"] = "foobar"
output = self.app.post("/user/new", data=data, follow_redirects=True)
self.assertEqual(output.status_code, 200)
output_text = output.get_data(as_text=True)
self.assertIn("<title>New user - Pagure</title>", output_text)
self.assertIn("Email address already taken.", output_text)
# Submit the form with proper data
data["email_address"] = "<EMAIL>"
output = self.app.post("/user/new", data=data, follow_redirects=True)
self.assertEqual(output.status_code, 200)
output_text = output.get_data(as_text=True)
self.assertIn("<title>Login - Pagure</title>", output_text)
self.assertIn(
"User created, please check your email to activate the account",
output_text,
)
# Check after:
items = pagure.lib.query.search_user(self.session)
self.assertEqual(3, len(items))
# Checking for the /login page
output = self.app.get("/login/")
self.assertEqual(output.status_code, 200)
output_text = output.get_data(as_text=True)
self.assertIn("<title>Login - Pagure</title>", output_text)
self.assertIn('<form action="/dologin" method="post">', output_text)
# This has all the data needed
data = {"username": "foob_bar", "password": "ö"}
# Submit this form - Doesn't work since there is no csrf token
output = self.app.post("/dologin", data=data, follow_redirects=True)
self.assertEqual(output.status_code, 200)
output_text = output.get_data(as_text=True)
self.assertIn("<title>Login - Pagure</title>", output_text)
self.assertIn('<form action="/dologin" method="post">', output_text)
self.assertIn("Insufficient information provided", output_text)
# Submit the form with the csrf token - but invalid user
data["csrf_token"] = csrf_token
output = self.app.post("/dologin", data=data, follow_redirects=True)
self.assertEqual(output.status_code, 200)
output_text = output.get_data(as_text=True)
self.assertIn("<title>Login - Pagure</title>", output_text)
self.assertIn('<form action="/dologin" method="post">', output_text)
self.assertIn("Username or password invalid.", output_text)
# Submit the form with the csrf token - but user not confirmed
data["username"] = "foobar"
output = self.app.post("/dologin", data=data, follow_redirects=True)
self.assertEqual(output.status_code, 200)
output_text = output.get_data(as_text=True)
self.assertIn("<title>Login - Pagure</title>", output_text)
self.assertIn('<form action="/dologin" method="post">', output_text)
self.assertIn(
"Invalid user, did you confirm the creation with the url "
"provided by email?",
output_text,
)
# User in the DB, csrf provided - but wrong password submitted
data["password"] = "öö"
output = self.app.post("/dologin", data=data, follow_redirects=True)
self.assertEqual(output.status_code, 200)
output_text = output.get_data(as_text=True)
self.assertIn("<title>Login - Pagure</title>", output_text)
self.assertIn('<form action="/dologin" method="post">', output_text)
self.assertIn("Username or password invalid.", output_text)
# When account is not confirmed i.e user_obj != None
data["password"] = "ö"
output = self.app.post("/dologin", data=data, follow_redirects=True)
self.assertEqual(output.status_code, 200)
output_text = output.get_data(as_text=True)
self.assertIn("<title>Login - Pagure</title>", output_text)
self.assertIn('<form action="/dologin" method="post">', output_text)
self.assertIn(
"Invalid user, did you confirm the creation with the url "
"provided by email?",
output_text,
)
# Confirm the user so that we can log in
item = pagure.lib.query.search_user(self.session, username="foobar")
self.assertEqual(item.user, "foobar")
self.assertNotEqual(item.token, None)
# Remove the token
item.token = None
self.session.add(item)
self.session.commit()
# Login but cannot save the session to the DB due to the missing IP
# address in the flask request
data["password"] = "ö"
output = self.app.post("/dologin", data=data, follow_redirects=True)
self.assertEqual(output.status_code, 200)
output_text = output.get_data(as_text=True)
self.assertIn("<title>Home - Pagure</title>", output_text)
# I'm not sure if the change was in flask or werkzeug, but in older
# version flask.request.remote_addr was returning None, while it
# now returns 127.0.0.1 making our logic pass where it used to
# partly fail
if hasattr(flask, "__version__"):
flask_v = tuple(int(el) for el in flask.__version__.split("."))
if flask_v <= (0, 12, 0):
self.assertIn(
'<a class="btn btn-primary" '
'href="/login/?next=http://localhost/">',
output_text,
)
self.assertIn(
"Could not set the session in the db, please report "
"this error to an admin",
output_text,
)
else:
self.assertIn(
'<a class="dropdown-item" '
'href="/logout/?next=http://localhost/dashboard/projects">',
output_text,
)
# Check the user
item = pagure.lib.query.search_user(self.session, username="foobar")
self.assertEqual(item.user, "foobar")
self.assertEqual(item.token, None)
def test_confirm_user(self):
""" Test the confirm_user endpoint. """
output = self.app.get("/confirm/foo", follow_redirects=True)
self.assertEqual(output.status_code, 200)
self.assertIn(
"<title>Home - Pagure</title>", output.get_data(as_text=True)
)
self.assertIn(
"No user associated with this token.",
output.get_data(as_text=True),
)
# Create a local user
self.test_new_user()
items = pagure.lib.query.search_user(self.session)
self.assertEqual(3, len(items))
item = pagure.lib.query.search_user(self.session, username="foouser")
self.assertEqual(item.user, "foouser")
self.assertTrue(item.password.startswith("$2$"))
self.assertNotEqual(item.token, None)
output = self.app.get(
"/confirm/%s" % item.token, follow_redirects=True
)
self.assertEqual(output.status_code, 200)
self.assertIn(
"<title>Login - Pagure</title>", output.get_data(as_text=True)
)
self.assertIn(
"Email confirmed, account activated", output.get_data(as_text=True)
)
@patch.dict("pagure.config.config", {"PAGURE_AUTH": "local"})
@patch("pagure.lib.notify.send_email", MagicMock(return_value=True))
def test_lost_password(self):
""" Test the lost_password endpoint. """
output = self.app.get("/password/lost")
self.assertEqual(output.status_code, 200)
self.assertIn(
"<title>Lost password - Pagure</title>",
output.get_data(as_text=True),
)
self.assertIn(
'<form action="/password/lost" method="post">',
output.get_data(as_text=True),
)
# Prepare the data to send
data = {"username": "foouser"}
# Missing CSRF
output = self.app.post("/password/lost", data=data)
self.assertEqual(output.status_code, 200)
self.assertIn(
"<title>Lost password - Pagure</title>",
output.get_data(as_text=True),
)
self.assertIn(
'<form action="/password/lost" method="post">',
output.get_data(as_text=True),
)
csrf_token = (
output.get_data(as_text=True)
.split('name="csrf_token" type="hidden" value="')[1]
.split('">')[0]
)
# With the CSRF - But invalid user
data["csrf_token"] = csrf_token
output = self.app.post(
"/password/lost", data=data, follow_redirects=True
)
self.assertEqual(output.status_code, 200)
self.assertIn(
"<title>Login - Pagure</title>", output.get_data(as_text=True)
)
self.assertIn("Username invalid.", output.get_data(as_text=True))
# With the CSRF and a valid user
data["username"] = "foo"
output = self.app.post(
"/password/lost", data=data, follow_redirects=True
)
self.assertEqual(output.status_code, 200)
self.assertIn(
"<title>Login - Pagure</title>", output.get_data(as_text=True)
)
self.assertIn(
"Check your email to finish changing your password",
output.get_data(as_text=True),
)
# With the CSRF and a valid user - but too quick after the last one
data["username"] = "foo"
output = self.app.post(
"/password/lost", data=data, follow_redirects=True
)
self.assertEqual(output.status_code, 200)
self.assertIn(
"<title>Login - Pagure</title>", output.get_data(as_text=True)
)
self.assertIn(
"An email was sent to you less than 3 minutes ago, did you "
"check your spam folder? Otherwise, try again after some time.",
output.get_data(as_text=True),
)
@patch.dict("pagure.config.config", {"PAGURE_AUTH": "local"})
@patch("pagure.lib.notify.send_email", MagicMock(return_value=True))
def test_reset_password(self):
""" Test the reset_password endpoint. """
output = self.app.get("/password/reset/foo", follow_redirects=True)
self.assertEqual(output.status_code, 200)
self.assertIn(
"<title>Login - Pagure</title>", output.get_data(as_text=True)
)
self.assertIn(
"No user associated with this token.",
output.get_data(as_text=True),
)
self.assertIn(
'<form action="/dologin" method="post">',
output.get_data(as_text=True),
)
self.test_lost_password()
self.test_new_user()
# Check the password
item = pagure.lib.query.search_user(self.session, username="foouser")
self.assertEqual(item.user, "foouser")
self.assertNotEqual(item.token, None)
self.assertTrue(item.password.startswith("$2$"))
old_password = <PASSWORD>
token = item.token
output = self.app.get(
"/password/reset/%s" % token, follow_redirects=True
)
self.assertEqual(output.status_code, 200)
self.assertIn(
"<title>Change password - Pagure</title>",
output.get_data(as_text=True),
)
self.assertIn(
'<form action="/password/reset/', output.get_data(as_text=True)
)
data = {"password": "<PASSWORD>", "confirm_password": "<PASSWORD>"}
# Missing CSRF
output = self.app.post(
"/password/reset/%s" % token, data=data, follow_redirects=True
)
self.assertEqual(output.status_code, 200)
self.assertIn(
"<title>Change password - Pagure</title>",
output.get_data(as_text=True),
)
self.assertIn(
'<form action="/password/reset/', output.get_data(as_text=True)
)
csrf_token = (
output.get_data(as_text=True)
.split('name="csrf_token" type="hidden" value="')[1]
.split('">')[0]
)
# With CSRF
data["csrf_token"] = csrf_token
output = self.app.post(
"/password/reset/%s" % token, data=data, follow_redirects=True
)
self.assertEqual(output.status_code, 200)
self.assertIn(
"<title>Login - Pagure</title>", output.get_data(as_text=True)
)
self.assertIn("Password changed", output.get_data(as_text=True))
@patch(
"pagure.ui.login._check_session_cookie", MagicMock(return_value=True)
)
@patch.dict("pagure.config.config", {"PAGURE_AUTH": "local"})
def test_change_password(self):
""" Test the change_password endpoint. """
# Not logged in, redirects
output = self.app.get("/password/change", follow_redirects=True)
self.assertEqual(output.status_code, 200)
self.assertIn(
"<title>Login - Pagure</title>", output.get_data(as_text=True)
)
self.assertIn(
'<form action="/dologin" method="post">',
output.get_data(as_text=True),
)
user = tests.FakeUser()
with tests.user_set(self.app.application, user):
output = self.app.get("/password/change")
self.assertEqual(output.status_code, 404)
self.assertIn("User not found", output.get_data(as_text=True))
user = tests.FakeUser(username="foo")
with tests.user_set(self.app.application, user):
output = self.app.get("/password/change")
self.assertEqual(output.status_code, 200)
self.assertIn(
"<title>Change password - Pagure</title>",
output.get_data(as_text=True),
)
self.assertIn(
'<form action="/password/change" method="post">',
output.get_data(as_text=True),
)
data = {
"old_password": "<PASSWORD>",
"password": "<PASSWORD>",
"confirm_password": "<PASSWORD>",
}
# No CSRF token
output = self.app.post("/password/change", data=data)
self.assertEqual(output.status_code, 200)
self.assertIn(
"<title>Change password - Pagure</title>",
output.get_data(as_text=True),
)
self.assertIn(
'<form action="/password/change" method="post">',
output.get_data(as_text=True),
)
csrf_token = (
output.get_data(as_text=True)
.split('name="csrf_token" type="hidden" value="')[1]
.split('">')[0]
)
# With CSRF - Invalid password format
data["csrf_token"] = csrf_token
output = self.app.post(
"/password/change", data=data, follow_redirects=True
)
self.assertEqual(output.status_code, 200)
self.assertIn(
"<title>Home - Pagure</title>", output.get_data(as_text=True)
)
self.assertIn(
"Could not update your password, either user or password "
"could not be checked",
output.get_data(as_text=True),
)
self.test_new_user()
# Remove token of foouser
item = pagure.lib.query.search_user(self.session, username="foouser")
self.assertEqual(item.user, "foouser")
self.assertNotEqual(item.token, None)
self.assertTrue(item.password.startswith("$2$"))
item.token = None
self.session.add(item)
self.session.commit()
user = tests.FakeUser(username="foouser")
with tests.user_set(self.app.application, user):
output = self.app.get("/password/change")
self.assertEqual(output.status_code, 200)
self.assertIn(
"<title>Change password - Pagure</title>",
output.get_data(as_text=True),
)
self.assertIn(
'<form action="/password/change" method="post">',
output.get_data(as_text=True),
)
data = {
"old_password": "<PASSWORD>",
"password": "<PASSWORD>",
"confirm_password": "<PASSWORD>",
}
# No CSRF token
output = self.app.post("/password/change", data=data)
self.assertEqual(output.status_code, 200)
self.assertIn(
"<title>Change password - Pagure</title>",
output.get_data(as_text=True),
)
self.assertIn(
'<form action="/password/change" method="post">',
output.get_data(as_text=True),
)
csrf_token = (
output.get_data(as_text=True)
.split('name="csrf_token" type="hidden" value="')[1]
.split('">')[0]
)
# With CSRF - Incorrect password
data["csrf_token"] = csrf_token
output = self.app.post(
"/password/change", data=data, follow_redirects=True
)
self.assertEqual(output.status_code, 200)
self.assertIn(
"<title>Home - Pagure</title>", output.get_data(as_text=True)
)
self.assertIn(
"Could not update your password, either user or password "
"could not be checked",
output.get_data(as_text=True),
)
# With CSRF - Correct password
data["old_password"] = "<PASSWORD>"
output = self.app.post(
"/password/change", data=data, follow_redirects=True
)
self.assertEqual(output.status_code, 200)
self.assertIn(
"<title>Home - Pagure</title>", output.get_data(as_text=True)
)
self.assertIn("Password changed", output.get_data(as_text=True))
@patch.dict("pagure.config.config", {"PAGURE_AUTH": "local"})
def test_logout(self):
""" Test the auth_logout endpoint for local login. """
output = self.app.get("/logout/", follow_redirects=True)
self.assertEqual(output.status_code, 200)
self.assertIn(
"<title>Home - Pagure</title>", output.get_data(as_text=True)
)
self.assertNotIn(
"You have been logged out", output.get_data(as_text=True)
)
self.assertIn(
'<a class="btn btn-primary" '
'href="/login/?next=http://localhost/">',
output.get_data(as_text=True),
)
user = tests.FakeUser(username="foo")
with tests.user_set(self.app.application, user):
output = self.app.get("/logout/", follow_redirects=True)
self.assertEqual(output.status_code, 200)
self.assertIn(
"<title>Home - Pagure</title>", output.get_data(as_text=True)
)
self.assertIn(
"You have been logged out", output.get_data(as_text=True)
)
# Due to the way the tests are running we do not actually
# log out
self.assertIn(
'<a class="dropdown-item" href="/logout/?next='
'http://localhost/dashboard/projects">Log Out</a>',
output.get_data(as_text=True),
)
@patch.dict("pagure.config.config", {"PAGURE_AUTH": "local"})
def test_settings_admin_session_timedout(self):
""" Test the admin_session_timedout with settings endpoint. """
lifetime = pagure.config.config.get(
"ADMIN_SESSION_LIFETIME", datetime.timedelta(minutes=15)
)
td1 = datetime.timedelta(minutes=1)
# session already expired
user = tests.FakeUser(username="foo")
user.login_time = datetime.datetime.utcnow() - lifetime - td1
with tests.user_set(self.app.application, user):
# not following the redirect because user_set contextmanager
# will run again for the login page and set back the user
# which results in a loop, since admin_session_timedout will
# redirect again for the login page
output = self.app.get("/settings/")
self.assertEqual(output.status_code, 302)
self.assertIn("http://localhost/login/", output.location)
# session did not expire
user.login_time = datetime.datetime.utcnow() - lifetime + td1
with tests.user_set(self.app.application, user):
output = self.app.get("/settings/")
self.assertEqual(output.status_code, 200)
@patch("flask.flash")
def test_admin_session_timedout(self, flash):
""" Test the call to admin_session_timedout. """
lifetime = pagure.config.config.get(
"ADMIN_SESSION_LIFETIME", datetime.timedelta(minutes=15)
)
td1 = datetime.timedelta(minutes=1)
# session already expired
user = tests.FakeUser(username="foo")
user.login_time = datetime.datetime.utcnow() - lifetime - td1
with self.app.application.app_context() as ctx:
ctx.g.session = self.session
ctx.g.fas_user = user
self.assertTrue(pagure.flask_app.admin_session_timedout())
# session did not expire
user.login_time = datetime.datetime.utcnow() - lifetime + td1
with self.app.application.app_context() as ctx:
ctx.g.session = self.session
ctx.g.fas_user = user
self.assertFalse(pagure.flask_app.admin_session_timedout())
@patch.dict("pagure.config.config", {"PAGURE_AUTH": "local"})
def test_force_logout(self):
""" Test forcing logout. """
user = tests.FakeUser(username="foo")
with tests.user_set(self.app.application, user, keep_get_user=True):
# Test that accessing settings works
output = self.app.get("/settings")
self.assertEqual(output.status_code, 200)
# Now logout everywhere
data = {"csrf_token": self.get_csrf()}
output = self.app.post("/settings/forcelogout/", data=data)
self.assertEqual(output.status_code, 302)
self.assertEqual(
output.headers["Location"], "http://localhost/settings"
)
# We should now get redirected to index, because our session became
# invalid
output = self.app.get("/settings")
self.assertEqual(output.headers["Location"], "http://localhost/")
# After changing the login_time to now, the session should again be
# valid
user.login_time = datetime.datetime.utcnow()
output = self.app.get("/")
self.assertEqual(output.status_code, 302)
if __name__ == "__main__":
unittest.main(verbosity=2)
| en | 0.852417 | # -*- coding: utf-8 -*- (c) 2016 - Copyright Red Hat Inc Authors: <NAME> <<EMAIL>> <NAME> <<EMAIL>> Tests for flask app controller of pagure Create the application with PAGURE_AUTH being local. # Remove the log handlers for the tests Test the front page. # First access the front page Test the new_user endpoint. # Check before: # First access the new user page # Create the form to send there # This has all the data needed # Submit this form - Doesn't work since there is no csrf token # Submit the form with the csrf token # Submit the form with another username # Submit the form with proper data # Check after: Test the disabling of the new_user endpoint. # Check before: # Attempt to access the new user page # Check after: Test the do_login endpoint. # This has all the data needed # Submit this form - Doesn't work since there is no csrf token # Submit the form with the csrf token - but invalid user # Create a local user # Submit the form with the csrf token - but user not confirmed # User in the DB, csrf provided - but wrong password submitted # When account is not confirmed i.e user_obj != None # Confirm the user so that we can log in # Remove the token # Check the user # Login but cannot save the session to the DB due to the missing IP # address in the flask request # I'm not sure if the change was in flask or werkzeug, but in older # version flask.request.remote_addr was returning None, while it # now returns 127.0.0.1 making our logic pass where it used to # partly fail # Make the password invalid # Remove the $2$ # Check the password # Try login again # Check the password is still not of a known version # V1 password # Check the password # Log in with a v1 password # Check the password got upgraded to version 2 # We have set the REMOTE_ADDR in the request, so this works with all # versions of Flask. Test the do_login endpoint with a non-default redirect. # This has all the data needed # Create a local user # Confirm the user so that we can log in # Remove the token # Check the user # Add a test project to the user # Login and redirect to the test project Test that user can see the Settings button when they are logged in. # Create a local user # Remove the token # Check the user # Add a test project to the user # Login and redirect to the test project Test login and create user functionality when the password is non-ascii. # Check before: # First access the new user page # Create the form to send there # This has all the data needed # Submit this form - Doesn't work since there is no csrf token # Submit the form with the csrf token # Submit the form with another username # Submit the form with proper data # Check after: # Checking for the /login page # This has all the data needed # Submit this form - Doesn't work since there is no csrf token # Submit the form with the csrf token - but invalid user # Submit the form with the csrf token - but user not confirmed # User in the DB, csrf provided - but wrong password submitted # When account is not confirmed i.e user_obj != None # Confirm the user so that we can log in # Remove the token # Login but cannot save the session to the DB due to the missing IP # address in the flask request # I'm not sure if the change was in flask or werkzeug, but in older # version flask.request.remote_addr was returning None, while it # now returns 127.0.0.1 making our logic pass where it used to # partly fail # Check the user Test the confirm_user endpoint. # Create a local user Test the lost_password endpoint. # Prepare the data to send # Missing CSRF # With the CSRF - But invalid user # With the CSRF and a valid user # With the CSRF and a valid user - but too quick after the last one Test the reset_password endpoint. # Check the password # Missing CSRF # With CSRF Test the change_password endpoint. # Not logged in, redirects # No CSRF token # With CSRF - Invalid password format # Remove token of foouser # No CSRF token # With CSRF - Incorrect password # With CSRF - Correct password Test the auth_logout endpoint for local login. # Due to the way the tests are running we do not actually # log out Test the admin_session_timedout with settings endpoint. # session already expired # not following the redirect because user_set contextmanager # will run again for the login page and set back the user # which results in a loop, since admin_session_timedout will # redirect again for the login page # session did not expire Test the call to admin_session_timedout. # session already expired # session did not expire Test forcing logout. # Test that accessing settings works # Now logout everywhere # We should now get redirected to index, because our session became # invalid # After changing the login_time to now, the session should again be # valid | 2.286891 | 2 |
narnia.py | maisammusthafa/narnia | 0 | 6632131 | #!/usr/bin/env python3
""" narnia """
import curses
import curses.textpad
import os
import sys
import time
from narnia.colorstr import add_cstr, init_colors
from narnia.common import Config as c, Globals as g, refresh_windows
from narnia.common import Header, Status
from narnia.download import Download
from narnia.process import start_threads, thread_priority_data, thread_action
def create_downloads():
""" get downloads and create classes """
response = []
prev_downloads = list(g.downloads)
g.downloads = []
def lookup(query):
""" lookup downloads """
for item in prev_downloads:
if item.gid == query:
return prev_downloads.index(item)
return -1
for state in g.download_states:
for item in state:
response.append(item)
for item in response:
index = lookup(item['gid'])
if index != -1:
prev_downloads[index].refresh(item)
g.downloads.append(prev_downloads[index])
else:
g.downloads.append(Download(item))
diff = len(prev_downloads) - len(g.downloads)
if diff != 0:
for item in prev_downloads[-diff:]:
item.win.clear()
item.win.noutrefresh()
g.num_downloads = len(g.downloads)
def key_actions(key):
""" actions based on key input """
def confirm_del():
g.status.win.nodelay(False)
curses.echo(True)
add_cstr(0, 0, '<red.b>confirm deletion? [y/N] </red.b>' + ' ' * (int(g.tty['curr_w']) - 32), g.status.win)
curses.echo(False)
response = g.status.win.getch()
g.status.win.nodelay(True)
g.status.draw(True)
return True if response == ord('y') else False
def nav_up():
# TODO: [BUG] Does not ALWAYS properly pan up
y_pos = g.focused.win.getbegyx()[0]
if g.num_downloads > g.tty['curr_h']:
if g.focused == g.downloads[0]:
g.start_idx = g.num_downloads - (g.tty['curr_h'] - 3)
elif y_pos == 1:
g.start_idx -= 1
g.focused.highlight = 0
g.focused = g.downloads[(g.downloads.index(g.focused) - 1) %
g.num_downloads]
def nav_down():
y_pos = g.focused.win.getbegyx()[0]
if g.focused == g.downloads[-1]:
g.start_idx = 0
elif (y_pos + 3) >= g.tty['curr_h']:
g.start_idx += 1
g.focused.highlight = 0
g.focused = g.downloads[(g.downloads.index(g.focused) + 1) %
g.num_downloads]
def end():
sys.exit()
def pause():
if g.focused.status == 'active' or g.focused.status == 'waiting':
thread_action("c.aria2.pause('{}')".format(g.focused.gid))
elif g.focused.status == 'paused':
thread_action("c.aria2.unpause('{}')".format(g.focused.gid))
def pause_all():
if not c.aria2.tell_active():
thread_action('c.aria2.unpause_all()')
else:
thread_action('c.aria2.pause_all()')
def queue_up():
if g.focused.status == 'waiting':
thread_action("c.aria2.change_position('{}', -1, 'POS_CUR')".format(g.focused.gid))
thread_priority_data() # TODO: Optimize here
def queue_down():
if g.focused.status == 'waiting':
thread_action("c.aria2.change_position('{}', 1, 'POS_CUR')".format(g.focused.gid))
thread_priority_data() # TODO: Optimize here
def purge():
thread_action('c.aria2.purge_download_result()')
def retry():
# TODO: TEST
if g.focused.status == "error":
url = g.focused.data['files'][0]['uris'][0]['uri'].strip()
thread_action("c.aria2.remove_download_result('{}')".format(g.focused.gid))
thread_action("c.aria2.add_uri(['{}'])".format(url))
def delete():
if g.focused.status == 'complete' or g.focused.status == 'removed' or g.focused.status == 'error':
thread_action("c.aria2.remove_download_result('{}')".format(g.focused.gid))
nav_up()
elif confirm_del():
thread_action("c.aria2.remove('{}')".format(g.focused.gid))
g.status.draw(True)
nav_up()
def add():
def tb_validator(char):
def refresh(txt):
g.s_textbox = txt
tb_win.clear()
tb_win.addstr(0, 0, g.s_textbox)
tb_win.refresh()
if char == 10:
return 7
elif char == 27:
refresh(g.s_textbox)
g.s_textbox = ''
return 7
elif char == 263:
refresh(g.s_textbox)
g.s_textbox = g.s_textbox[0:-1]
return 8
elif (char >= 258 and char <= 261) or char == 330:
return
elif char == 21:
refresh('')
return
refresh(g.s_textbox + chr(char))
prompt_win = curses.newwin(1, 6, g.tty['curr_h'] - 1, 0)
prompt_win.clear()
add_cstr(0, 0, '<base3.b>add: </base3.b>', prompt_win)
prompt_win.refresh()
g.s_textbox = ''
tb_win = curses.newwin(1, 512, g.tty['curr_h'] - 1, 5)
curses.textpad.Textbox(tb_win, insert_mode=True).edit(tb_validator)
thread_action("c.aria2.add_uri([\"{}\"])".format(g.s_textbox.strip()))
g.status.draw(True)
def none():
pass
actions = {
curses.KEY_RESIZE: refresh_windows,
curses.KEY_UP: nav_up,
c.keys.key_up: nav_up,
curses.KEY_DOWN: nav_down,
c.keys.key_down: nav_down,
c.keys.pause_all: pause_all,
c.keys.pause: pause,
c.keys.add: add,
c.keys.delete: delete,
c.keys.purge: purge,
c.keys.queue_up: queue_up,
c.keys.queue_down: queue_down,
# c.keys.select: select,
# c.keys.expand: expand,
c.keys.retry: retry,
c.keys.quit: end,
}
if g.num_downloads != 0 or \
key == c.keys.add or key == c.keys.quit or key == curses.KEY_RESIZE:
actions.get(key, none)()
def main(screen):
""" main """
curses.curs_set(False)
init_colors()
screen.nodelay(True)
screen.keypad(True)
screen.getch()
g.timer_ui = c.refresh_interval * 100
g.tty['curr_h'], g.tty['curr_w'] = list(map(
int, os.popen('stty size', 'r').read().split()))
g.tty['prev_h'] = g.tty['curr_h']
g.tty['prev_w'] = g.tty['curr_w']
g.header = Header()
g.status = Status()
g.file_status = curses.newwin(1, g.tty['curr_w'], g.tty['curr_h'] - 2, 0)
g.pos_status = curses.newwin(1, 8, g.tty['curr_h'] - 1, g.tty['curr_w'] - 7)
add_cstr(0, 0, g.s_pos, g.pos_status)
g.pos_status.noutrefresh()
refresh_windows()
g.header.draw(True)
g.status.draw(True)
start_threads()
# TODO: [BUG] initial delay even on fast networks
while True:
if g.timer_ui == c.refresh_interval * 100:
g.header.draw(False)
create_downloads()
if g.num_downloads != 0:
if g.focused not in g.downloads:
g.focused = g.downloads[0]
g.focused.highlight = curses.A_REVERSE
for i in range(g.start_idx, min(g.num_downloads, g.tty['curr_h'] - 3) + g.start_idx):
g.downloads[i].draw(i - g.start_idx + 1, False)
file_status_data = '[{}] {}'.format(g.focused.gid, g.focused.name)
if len(file_status_data) >= g.tty['curr_w']:
file_status_data = file_status_data[:-(len(file_status_data) - g.tty['curr_w'] + 3)] + '..'
else:
file_status_data += ' ' * (g.tty['curr_w'] - len(file_status_data) - 1)
add_cstr(0, 0, file_status_data, g.file_status)
g.file_status.noutrefresh()
g.curr_pos = g.downloads.index(g.focused)
if g.num_downloads == 1 or g.curr_pos == 0:
g.s_pos = '[top]'
elif g.curr_pos + 1 == g.num_downloads:
g.s_pos = '[bot]'
else:
g.s_pos = '[{:2}%]'.format(round(((g.curr_pos + 1) / g.num_downloads) * 100))
g.s_pos = '<status.b> {}</status.b>'.format(g.s_pos)
g.pos_status.clear()
add_cstr(0, 0, g.s_pos, g.pos_status)
g.pos_status.noutrefresh()
else:
file_status_data = ' ' * (g.tty['curr_w'] - 1)
add_cstr(0, 0, file_status_data, g.file_status)
g.file_status.noutrefresh()
g.pos_status.clear()
add_cstr(0, 0, g.s_pos, g.pos_status)
g.pos_status.noutrefresh()
g.status.draw(True)
g.tty['prev_h'] = g.tty['curr_h']
g.tty['prev_w'] = g.tty['curr_w']
curses.doupdate()
g.timer_ui = 0
time.sleep(0.01)
g.timer_ui += 1
key_in = screen.getch()
if key_in != -1:
g.timer_ui = c.refresh_interval * 100
key_actions(key_in)
input()
curses.wrapper(main)
| #!/usr/bin/env python3
""" narnia """
import curses
import curses.textpad
import os
import sys
import time
from narnia.colorstr import add_cstr, init_colors
from narnia.common import Config as c, Globals as g, refresh_windows
from narnia.common import Header, Status
from narnia.download import Download
from narnia.process import start_threads, thread_priority_data, thread_action
def create_downloads():
""" get downloads and create classes """
response = []
prev_downloads = list(g.downloads)
g.downloads = []
def lookup(query):
""" lookup downloads """
for item in prev_downloads:
if item.gid == query:
return prev_downloads.index(item)
return -1
for state in g.download_states:
for item in state:
response.append(item)
for item in response:
index = lookup(item['gid'])
if index != -1:
prev_downloads[index].refresh(item)
g.downloads.append(prev_downloads[index])
else:
g.downloads.append(Download(item))
diff = len(prev_downloads) - len(g.downloads)
if diff != 0:
for item in prev_downloads[-diff:]:
item.win.clear()
item.win.noutrefresh()
g.num_downloads = len(g.downloads)
def key_actions(key):
""" actions based on key input """
def confirm_del():
g.status.win.nodelay(False)
curses.echo(True)
add_cstr(0, 0, '<red.b>confirm deletion? [y/N] </red.b>' + ' ' * (int(g.tty['curr_w']) - 32), g.status.win)
curses.echo(False)
response = g.status.win.getch()
g.status.win.nodelay(True)
g.status.draw(True)
return True if response == ord('y') else False
def nav_up():
# TODO: [BUG] Does not ALWAYS properly pan up
y_pos = g.focused.win.getbegyx()[0]
if g.num_downloads > g.tty['curr_h']:
if g.focused == g.downloads[0]:
g.start_idx = g.num_downloads - (g.tty['curr_h'] - 3)
elif y_pos == 1:
g.start_idx -= 1
g.focused.highlight = 0
g.focused = g.downloads[(g.downloads.index(g.focused) - 1) %
g.num_downloads]
def nav_down():
y_pos = g.focused.win.getbegyx()[0]
if g.focused == g.downloads[-1]:
g.start_idx = 0
elif (y_pos + 3) >= g.tty['curr_h']:
g.start_idx += 1
g.focused.highlight = 0
g.focused = g.downloads[(g.downloads.index(g.focused) + 1) %
g.num_downloads]
def end():
sys.exit()
def pause():
if g.focused.status == 'active' or g.focused.status == 'waiting':
thread_action("c.aria2.pause('{}')".format(g.focused.gid))
elif g.focused.status == 'paused':
thread_action("c.aria2.unpause('{}')".format(g.focused.gid))
def pause_all():
if not c.aria2.tell_active():
thread_action('c.aria2.unpause_all()')
else:
thread_action('c.aria2.pause_all()')
def queue_up():
if g.focused.status == 'waiting':
thread_action("c.aria2.change_position('{}', -1, 'POS_CUR')".format(g.focused.gid))
thread_priority_data() # TODO: Optimize here
def queue_down():
if g.focused.status == 'waiting':
thread_action("c.aria2.change_position('{}', 1, 'POS_CUR')".format(g.focused.gid))
thread_priority_data() # TODO: Optimize here
def purge():
thread_action('c.aria2.purge_download_result()')
def retry():
# TODO: TEST
if g.focused.status == "error":
url = g.focused.data['files'][0]['uris'][0]['uri'].strip()
thread_action("c.aria2.remove_download_result('{}')".format(g.focused.gid))
thread_action("c.aria2.add_uri(['{}'])".format(url))
def delete():
if g.focused.status == 'complete' or g.focused.status == 'removed' or g.focused.status == 'error':
thread_action("c.aria2.remove_download_result('{}')".format(g.focused.gid))
nav_up()
elif confirm_del():
thread_action("c.aria2.remove('{}')".format(g.focused.gid))
g.status.draw(True)
nav_up()
def add():
def tb_validator(char):
def refresh(txt):
g.s_textbox = txt
tb_win.clear()
tb_win.addstr(0, 0, g.s_textbox)
tb_win.refresh()
if char == 10:
return 7
elif char == 27:
refresh(g.s_textbox)
g.s_textbox = ''
return 7
elif char == 263:
refresh(g.s_textbox)
g.s_textbox = g.s_textbox[0:-1]
return 8
elif (char >= 258 and char <= 261) or char == 330:
return
elif char == 21:
refresh('')
return
refresh(g.s_textbox + chr(char))
prompt_win = curses.newwin(1, 6, g.tty['curr_h'] - 1, 0)
prompt_win.clear()
add_cstr(0, 0, '<base3.b>add: </base3.b>', prompt_win)
prompt_win.refresh()
g.s_textbox = ''
tb_win = curses.newwin(1, 512, g.tty['curr_h'] - 1, 5)
curses.textpad.Textbox(tb_win, insert_mode=True).edit(tb_validator)
thread_action("c.aria2.add_uri([\"{}\"])".format(g.s_textbox.strip()))
g.status.draw(True)
def none():
pass
actions = {
curses.KEY_RESIZE: refresh_windows,
curses.KEY_UP: nav_up,
c.keys.key_up: nav_up,
curses.KEY_DOWN: nav_down,
c.keys.key_down: nav_down,
c.keys.pause_all: pause_all,
c.keys.pause: pause,
c.keys.add: add,
c.keys.delete: delete,
c.keys.purge: purge,
c.keys.queue_up: queue_up,
c.keys.queue_down: queue_down,
# c.keys.select: select,
# c.keys.expand: expand,
c.keys.retry: retry,
c.keys.quit: end,
}
if g.num_downloads != 0 or \
key == c.keys.add or key == c.keys.quit or key == curses.KEY_RESIZE:
actions.get(key, none)()
def main(screen):
""" main """
curses.curs_set(False)
init_colors()
screen.nodelay(True)
screen.keypad(True)
screen.getch()
g.timer_ui = c.refresh_interval * 100
g.tty['curr_h'], g.tty['curr_w'] = list(map(
int, os.popen('stty size', 'r').read().split()))
g.tty['prev_h'] = g.tty['curr_h']
g.tty['prev_w'] = g.tty['curr_w']
g.header = Header()
g.status = Status()
g.file_status = curses.newwin(1, g.tty['curr_w'], g.tty['curr_h'] - 2, 0)
g.pos_status = curses.newwin(1, 8, g.tty['curr_h'] - 1, g.tty['curr_w'] - 7)
add_cstr(0, 0, g.s_pos, g.pos_status)
g.pos_status.noutrefresh()
refresh_windows()
g.header.draw(True)
g.status.draw(True)
start_threads()
# TODO: [BUG] initial delay even on fast networks
while True:
if g.timer_ui == c.refresh_interval * 100:
g.header.draw(False)
create_downloads()
if g.num_downloads != 0:
if g.focused not in g.downloads:
g.focused = g.downloads[0]
g.focused.highlight = curses.A_REVERSE
for i in range(g.start_idx, min(g.num_downloads, g.tty['curr_h'] - 3) + g.start_idx):
g.downloads[i].draw(i - g.start_idx + 1, False)
file_status_data = '[{}] {}'.format(g.focused.gid, g.focused.name)
if len(file_status_data) >= g.tty['curr_w']:
file_status_data = file_status_data[:-(len(file_status_data) - g.tty['curr_w'] + 3)] + '..'
else:
file_status_data += ' ' * (g.tty['curr_w'] - len(file_status_data) - 1)
add_cstr(0, 0, file_status_data, g.file_status)
g.file_status.noutrefresh()
g.curr_pos = g.downloads.index(g.focused)
if g.num_downloads == 1 or g.curr_pos == 0:
g.s_pos = '[top]'
elif g.curr_pos + 1 == g.num_downloads:
g.s_pos = '[bot]'
else:
g.s_pos = '[{:2}%]'.format(round(((g.curr_pos + 1) / g.num_downloads) * 100))
g.s_pos = '<status.b> {}</status.b>'.format(g.s_pos)
g.pos_status.clear()
add_cstr(0, 0, g.s_pos, g.pos_status)
g.pos_status.noutrefresh()
else:
file_status_data = ' ' * (g.tty['curr_w'] - 1)
add_cstr(0, 0, file_status_data, g.file_status)
g.file_status.noutrefresh()
g.pos_status.clear()
add_cstr(0, 0, g.s_pos, g.pos_status)
g.pos_status.noutrefresh()
g.status.draw(True)
g.tty['prev_h'] = g.tty['curr_h']
g.tty['prev_w'] = g.tty['curr_w']
curses.doupdate()
g.timer_ui = 0
time.sleep(0.01)
g.timer_ui += 1
key_in = screen.getch()
if key_in != -1:
g.timer_ui = c.refresh_interval * 100
key_actions(key_in)
input()
curses.wrapper(main)
| en | 0.628843 | #!/usr/bin/env python3 narnia get downloads and create classes lookup downloads actions based on key input # TODO: [BUG] Does not ALWAYS properly pan up # TODO: Optimize here # TODO: Optimize here # TODO: TEST # c.keys.select: select, # c.keys.expand: expand, main # TODO: [BUG] initial delay even on fast networks | 2.527144 | 3 |
api/urls.py | vimeworks/ImpaQto | 0 | 6632132 | from django.conf.urls import include, url
from . import views
urlpatterns = [
#url(r'^',include('coworkersimpaqto.urls')),
url(r'^hola_mundo_rest/(?P<username>\w+)/$',views.hola_mundo),
url(r'^coworker/$',views.coworker),
url(r'^coworker/(?P<username>\w+)/$',views.coworker),
url(r'^contrato/$',views.contrato),
url(r'^contrato/(?P<username>\w+)/$',views.contrato),
url(r'^consumo/(?P<username>\w+)/$',views.consumo),
] | from django.conf.urls import include, url
from . import views
urlpatterns = [
#url(r'^',include('coworkersimpaqto.urls')),
url(r'^hola_mundo_rest/(?P<username>\w+)/$',views.hola_mundo),
url(r'^coworker/$',views.coworker),
url(r'^coworker/(?P<username>\w+)/$',views.coworker),
url(r'^contrato/$',views.contrato),
url(r'^contrato/(?P<username>\w+)/$',views.contrato),
url(r'^consumo/(?P<username>\w+)/$',views.consumo),
] | en | 0.591773 | #url(r'^',include('coworkersimpaqto.urls')), | 1.804785 | 2 |
test/test_file_header_parser.py | fkie-cad/Codescanner | 5 | 6632133 | <filename>test/test_file_header_parser.py
import numpy
import os
import pytest
import tempfile
import unittest
from file_header_parser import FileHeaderParser
class FileHeaderParserTest(unittest.TestCase):
def setUp(self):
self._test_file_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'data')
self._test_binary_src = os.path.join(self._test_file_dir, 'testfile')
self._test_binary_exe = os.path.join(self._test_file_dir, 'AdapterTroubleshooter.exe')
def test_not_existing_file(self):
with pytest.raises(IOError):
FileHeaderParser.get_file_header('not.existi.ng')
def test_tilde_directory_path(self):
file_name = 'what/ever.exe'
file_src = '~/' + file_name
home = os.path.expanduser("~")
expected = os.path.join(home, file_name)
try:
FileHeaderParser.get_file_header(file_src)
except IOError as e:
assert str(e) == 'IOError: Source file does not exist: ' + expected
def test_get_elf_file_header(self):
header = FileHeaderParser.get_file_header(self._test_binary_src)
assert header == 'ELF'
def test_get_pe_file_header(self):
temp_dir = tempfile.gettempdir()
bin_src = os.path.join(temp_dir, 'FileHeaderParserTest_test_get_pe_file_header.exe')
self._create_file(bin_src, 0x200, FileHeaderParser.MAGIC_PE_FILE_BYTES)
header = FileHeaderParser.get_file_header(bin_src)
assert header == 'PE'
os.remove(bin_src)
def test_get_undefined_file_header(self):
temp_dir = tempfile.gettempdir()
bin_src = os.path.join(temp_dir, 'FileHeaderParserTest_test_get_pe_file_header.exe')
self._create_file(bin_src, 0x200, bytearray(b'\xba\xd4\xea\xda'))
header = FileHeaderParser.get_file_header(bin_src)
assert header is None
os.remove(bin_src)
def test_get_too_small_file(self):
temp_dir = tempfile.gettempdir()
bin_src = os.path.join(temp_dir, 'FileHeaderParserTest_test_get_too_small_file.exe')
with open(bin_src, 'wb') as f:
f.write(bytearray(b'\x00\x00\x00'))
header = FileHeaderParser.get_file_header(bin_src)
assert header is None
os.remove(bin_src)
def _create_file(self, name, size, magic):
pe_bytes = numpy.random.bytes(size - 4)
pe_bytes = magic + pe_bytes
with open(name, 'wb') as f:
f.write(pe_bytes)
| <filename>test/test_file_header_parser.py
import numpy
import os
import pytest
import tempfile
import unittest
from file_header_parser import FileHeaderParser
class FileHeaderParserTest(unittest.TestCase):
def setUp(self):
self._test_file_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'data')
self._test_binary_src = os.path.join(self._test_file_dir, 'testfile')
self._test_binary_exe = os.path.join(self._test_file_dir, 'AdapterTroubleshooter.exe')
def test_not_existing_file(self):
with pytest.raises(IOError):
FileHeaderParser.get_file_header('not.existi.ng')
def test_tilde_directory_path(self):
file_name = 'what/ever.exe'
file_src = '~/' + file_name
home = os.path.expanduser("~")
expected = os.path.join(home, file_name)
try:
FileHeaderParser.get_file_header(file_src)
except IOError as e:
assert str(e) == 'IOError: Source file does not exist: ' + expected
def test_get_elf_file_header(self):
header = FileHeaderParser.get_file_header(self._test_binary_src)
assert header == 'ELF'
def test_get_pe_file_header(self):
temp_dir = tempfile.gettempdir()
bin_src = os.path.join(temp_dir, 'FileHeaderParserTest_test_get_pe_file_header.exe')
self._create_file(bin_src, 0x200, FileHeaderParser.MAGIC_PE_FILE_BYTES)
header = FileHeaderParser.get_file_header(bin_src)
assert header == 'PE'
os.remove(bin_src)
def test_get_undefined_file_header(self):
temp_dir = tempfile.gettempdir()
bin_src = os.path.join(temp_dir, 'FileHeaderParserTest_test_get_pe_file_header.exe')
self._create_file(bin_src, 0x200, bytearray(b'\xba\xd4\xea\xda'))
header = FileHeaderParser.get_file_header(bin_src)
assert header is None
os.remove(bin_src)
def test_get_too_small_file(self):
temp_dir = tempfile.gettempdir()
bin_src = os.path.join(temp_dir, 'FileHeaderParserTest_test_get_too_small_file.exe')
with open(bin_src, 'wb') as f:
f.write(bytearray(b'\x00\x00\x00'))
header = FileHeaderParser.get_file_header(bin_src)
assert header is None
os.remove(bin_src)
def _create_file(self, name, size, magic):
pe_bytes = numpy.random.bytes(size - 4)
pe_bytes = magic + pe_bytes
with open(name, 'wb') as f:
f.write(pe_bytes)
| none | 1 | 2.514147 | 3 |
|
tools/train_al.py | PrateekMunjal/TorchAL | 0 | 6632134 | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# Modified by <NAME> from official pycls codebase inorder to add the AL functionality
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Train a classification model."""
import argparse
import numpy as np
import os
import optuna
import sys
import torch
import pickle
import subprocess as sp
import copy
from pycls.core.config import assert_cfg
from pycls.core.config import dump_cfg
from pycls.core.config import custom_dump_cfg
from pycls.utils.meters import TestMeter
from pycls.utils.meters import TrainMeter
from pycls.utils.meters import ValMeter
import pycls.core.losses as losses
import pycls.core.model_builder as model_builder
import pycls.core.optimizer as optim
import pycls.utils.benchmark as bu
import pycls.utils.checkpoint as cu
import pycls.utils.distributed as du
import pycls.utils.logging as lu
import pycls.utils.metrics as mu
import pycls.utils.multiprocessing as mpu
import pycls.utils.net as nu
import pycls.datasets.loader as imagenet_loader
from helper.args_util import get_main_args
from helper.args_util import parse_args
from helper.args_util import get_al_args
from helper.subprocess_utils import vaal_sampling_util
from helper.subprocess_utils import active_sampling
from helper.subprocess_utils import test_net_subprocess_call
from helper.subprocess_utils import SWA_subprocess_call
from helper.path_extractor import get_latest_model_path
from helper.path_extractor import get_best_model_path
from helper.path_extractor import update_lset_uset_paths
logger = lu.get_logger(__name__)
plot_epoch_xvalues = []
plot_epoch_yvalues = []
plot_it_xvalues = []
plot_it_y_values = []
def plot_arrays(cfg, x_vals, y_vals, x_name, y_name, dataset_name, isDebug=False):
"""Basic utility to plot X vs Y line graphs.
Args:
cfg: Reference to the config yaml
x_vals: values on x-axis
y_vals: values on y-axis
x_name: Label on x-axis
y_name: Label on y-axis
dataset_name: Dataset name.
isDebug (bool, optional): Switch for debug mode. Defaults to False.
"""
if not du.is_master_proc(cfg):
return
import matplotlib.pyplot as plt
temp_name = "{}_vs_{}".format(x_name, y_name)
plt.xlabel(x_name)
plt.ylabel(y_name)
plt.title("Dataset: {}; {}".format(dataset_name, temp_name))
plt.plot(x_vals, y_vals)
if isDebug:
print(f"plot_saved at {cfg.OUT_DIR+temp_name}.png")
if cfg.TRAIN.TRANSFER_EXP:
temp_path = (
os.path.join(
"transfer_experiment",
cfg.MODEL.TRANSFER_MODEL_TYPE
+ "_depth_"
+ str(cfg.MODEL.TRANSFER_MODEL_DEPTH),
)
+ "/"
)
plt.savefig(cfg.OUT_DIR + temp_path + temp_name + ".png")
plt.savefig(cfg.OUT_DIR + temp_name + ".png")
plt.close()
def save_plot_values(
cfg, temp_arrays, temp_names, isParallel=True, saveInTextFormat=False, isDebug=True
):
"""Saves arrays provided in the list in npy format"""
# return if not master process
if isParallel:
if not du.is_master_proc(cfg):
return
for i in range(len(temp_arrays)):
temp_arrays[i] = np.array(temp_arrays[i])
temp_dir = cfg.OUT_DIR
if cfg.TRAIN.TRANSFER_EXP:
temp_dir += (
os.path.join(
"transfer_experiment",
cfg.MODEL.TRANSFER_MODEL_TYPE
+ "_depth_"
+ str(cfg.MODEL.TRANSFER_MODEL_DEPTH),
)
+ "/"
)
if not os.path.exists(temp_dir):
os.makedirs(temp_dir)
if saveInTextFormat:
if isDebug:
print(
f"Saving {temp_names[i]} at {temp_dir+temp_names[i]}.txt in text format!!"
)
np.savetxt(temp_dir + temp_names[i] + ".txt", temp_arrays[i], fmt="%d")
else:
if isDebug:
print(
f"Saving {temp_names[i]} at {temp_dir+temp_names[i]}.npy in numpy format!!"
)
np.save(temp_dir + temp_names[i] + ".npy", temp_arrays[i])
def is_eval_epoch(cfg, cur_epoch):
"""Determines if the model should be evaluated at the current epoch."""
return (cur_epoch + 1) % cfg.TRAIN.EVAL_PERIOD == 0 or (
cur_epoch + 1
) == cfg.OPTIM.MAX_EPOCH
def log_model_info(model):
"""Logs model info"""
logger.info("Model:\n{}".format(model))
logger.info("Params: {:,}".format(mu.params_count(model)))
logger.info("Flops: {:,}".format(mu.flops_count(model)))
def train_epoch(
train_loader,
model,
loss_fun,
optimizer,
train_meter,
cur_epoch,
cfg,
clf_iter_count,
clf_change_lr_iter,
clf_max_iter,
):
"""Performs one epoch of training."""
if cfg.NUM_GPUS > 1:
train_loader.sampler.set_epoch(cur_epoch)
# Update the learning rate
lr = optim.get_epoch_lr(cfg, cur_epoch)
if cfg.OPTIM.TYPE == "sgd":
optim.set_lr(optimizer, lr)
# Enable training mode
model.train()
train_meter.iter_tic() # This basically notes the start time in timer class defined in utils/timer.py
len_train_loader = len(train_loader)
for cur_iter, (inputs, labels) in enumerate(train_loader):
# ensuring that inputs are floatTensor as model weights are
inputs = inputs.type(torch.cuda.FloatTensor)
inputs, labels = inputs.cuda(), labels.cuda(non_blocking=True)
# Perform the forward pass
preds = model(inputs)
# Compute the loss
loss = loss_fun(preds, labels)
# Perform the backward pass
optimizer.zero_grad()
loss.backward()
# Update the parametersSWA
optimizer.step()
# Compute the errors
top1_err, top5_err = mu.topk_errors(preds, labels, [1, 5])
# Combine the stats across the GPUs
if cfg.NUM_GPUS > 1:
# Average error and losses across GPUs
# Also this this calls wait method on reductions so we are ensured
# to obtain synchronized results
loss, top1_err = du.scaled_all_reduce(cfg, [loss, top1_err])
# Copy the stats from GPU to CPU (sync point)
loss, top1_err = loss.item(), top1_err.item()
# #ONLY MASTER PROCESS SHOULD WRITE TO TENSORBOARD
if du.is_master_proc(cfg):
if cur_iter is not 0 and cur_iter % 5 == 0:
# because cur_epoch starts with 0
plot_it_xvalues.append((cur_epoch) * len_train_loader + cur_iter)
plot_it_y_values.append(loss)
save_plot_values(
cfg,
[plot_it_xvalues, plot_it_y_values],
["plot_it_xvalues.npy", "plot_it_y_values.npy"],
isDebug=False,
)
plot_arrays(
cfg,
x_vals=plot_it_xvalues,
y_vals=plot_it_y_values,
x_name="Iterations",
y_name="Loss",
dataset_name=cfg.TRAIN.DATASET,
)
# Compute the difference in time now from start time initialized just before this for loop.
train_meter.iter_toc()
train_meter.update_stats(
top1_err=top1_err, loss=loss, lr=lr, mb_size=inputs.size(0) * cfg.NUM_GPUS
)
train_meter.log_iter_stats(cur_epoch, cur_iter)
train_meter.iter_tic()
# Log epoch stats
train_meter.log_epoch_stats(cur_epoch)
train_meter.reset()
return loss, clf_iter_count
@torch.no_grad()
def test_epoch(cfg, test_loader, model, test_meter, cur_epoch):
"""Evaluates the model on the test set."""
# Enable eval mode
model.eval()
test_meter.iter_tic()
misclassifications = 0.0
totalSamples = 0.0
for cur_iter, (inputs, labels) in enumerate(test_loader):
# Transfer the data to the current GPU device
inputs, labels = inputs.cuda(), labels.cuda(non_blocking=True)
inputs = inputs.type(torch.cuda.FloatTensor)
# Compute the predictions
preds = model(inputs)
# Compute the errors
top1_err, top5_err = mu.topk_errors(preds, labels, [1, 5])
# Combine the errors across the GPUs
if cfg.NUM_GPUS > 1:
top1_err = du.scaled_all_reduce(cfg, [top1_err])
# as above returns a list
top1_err = top1_err[0]
# Copy the errors from GPU to CPU (sync point)
top1_err = top1_err.item()
# Multiply by Number of GPU's as top1_err is scaled by 1/Num_GPUs
misclassifications += top1_err * inputs.size(0) * cfg.NUM_GPUS
totalSamples += inputs.size(0) * cfg.NUM_GPUS
test_meter.iter_toc()
# Update and log stats
test_meter.update_stats(
top1_err=top1_err, mb_size=inputs.size(0) * cfg.NUM_GPUS
)
test_meter.log_iter_stats(cur_epoch, cur_iter)
test_meter.iter_tic()
# Log epoch stats
test_meter.log_epoch_stats(cur_epoch)
test_meter.reset()
return misclassifications / totalSamples
def train_model(
best_val_acc,
best_val_epoch,
trainDataset,
valDataset,
dataObj,
cfg,
trial,
isPruning,
):
"""Trains the model."""
global plot_epoch_xvalues
global plot_epoch_yvalues
global plot_it_xvalues
global plot_it_y_values
plot_epoch_xvalues = []
plot_epoch_yvalues = []
plot_it_xvalues = []
plot_it_y_values = []
# Build the model (before the loaders to speed up debugging)
model = model_builder.build_model(
cfg, active_sampling=cfg.ACTIVE_LEARNING.ACTIVATE, isDistributed=True
)
# Define the loss function
if cfg.TRAIN.IMBALANCED:
if cfg.TRAIN.DATASET == "IMAGENET":
raise NotImplementedError
temp_lSet, _, _ = dataObj.loadPartitions(
lSetPath=cfg.ACTIVE_LEARNING.LSET_PATH,
uSetPath=cfg.ACTIVE_LEARNING.USET_PATH,
valSetPath=cfg.ACTIVE_LEARNING.VALSET_PATH,
)
temp_weights = dataObj.getClassWeightsFromDataset(
dataset=trainDataset, index_set=temp_lSet, bs=cfg.TRAIN.BATCH_SIZE
)
# print(f"temp_weights: {temp_weights}")
loss_fun = torch.nn.CrossEntropyLoss(
weight=temp_weights.cuda(torch.cuda.current_device())
)
print("Weighted cross entropy loss chosen as loss function")
print(
"Sum of weights: {} and weights.shape: {}".format(
torch.sum(temp_weights), temp_weights.shape
)
)
else:
loss_fun = losses.get_loss_fun()
# Construct the optimizer
optimizer = optim.construct_optimizer(cfg, model)
print("========OPTIMIZER========")
print("optimizer: {}".format(optimizer))
print("=========================")
start_epoch = 0
# Load initial weights if there are any
if cfg.TRAIN.WEIGHTS:
start_epoch = cu.load_checkpoint(cfg, cfg.TRAIN.WEIGHTS, model, optimizer)
logger.info("=================================")
logger.info("Loaded initial weights from: {}".format(cfg.TRAIN.WEIGHTS))
logger.info("Base LR: {}".format(cfg.OPTIM.BASE_LR))
logger.info("=================================")
# If active learning mode then there has to be some starting point model
if cfg.ACTIVE_LEARNING.ACTIVATE:
if cfg.TRAIN.DATASET in ["CIFAR10", "CIFAR100", "SVHN", "MNIST", "STL10"]:
print("==================================")
print(
"We are not finetuning over the provided dataset {}".format(
cfg.TRAIN.DATASET
)
)
print(
"So Although we can load best model from path: {} -- but we won't do on CIFAR datsets".format(
cfg.ACTIVE_LEARNING.MODEL_LOAD_DIR
)
)
print("Exiting model loafing function")
print("==================================")
else:
cu.load_checkpoint(cfg, cfg.ACTIVE_LEARNING.MODEL_LOAD_DIR, model)
logger.info("=================================")
logger.info(
"Loaded initial weights from: {}".format(
cfg.ACTIVE_LEARNING.MODEL_LOAD_DIR
)
)
logger.info("Base LR: {}".format(cfg.OPTIM.BASE_LR))
logger.info("=================================")
# check if randAug activated
if cfg.RANDAUG.ACTIVATE:
print("==========================================")
print(
"RandAug activated with N: {} and M: {}".format(
cfg.RANDAUG.N, cfg.RANDAUG.M
)
)
print("==========================================")
# Compute precise time
if start_epoch == 0 and cfg.PREC_TIME.ENABLED:
logger.info("Computing precise time...")
bu.compute_precise_time(model, loss_fun)
nu.reset_bn_stats(model)
# Create data loaders
lSet = []
uSet = []
# handles when we pass cifar/svhn datasets
if cfg.TRAIN.DATASET in ["CIFAR10", "CIFAR100", "SVHN", "MNIST", "STL10"]:
# get partitions
lSet, uSet, valSet = dataObj.loadPartitions(
lSetPath=cfg.ACTIVE_LEARNING.LSET_PATH,
uSetPath=cfg.ACTIVE_LEARNING.USET_PATH,
valSetPath=cfg.ACTIVE_LEARNING.VALSET_PATH,
)
print("====== Partitions Loaded =======")
print("lSet: {}, uSet:{}, valSet: {}".format(len(lSet), len(uSet), len(valSet)))
print("================================")
train_loader = dataObj.getDistributedIndexesDataLoader(
cfg=cfg,
indexes=lSet,
batch_size=int(cfg.TRAIN.BATCH_SIZE / cfg.NUM_GPUS),
data=trainDataset,
n_worker=cfg.DATA_LOADER.NUM_WORKERS,
pin_memory=cfg.DATA_LOADER.PIN_MEMORY,
drop_last=True,
)
valSetLoader = dataObj.getDistributedIndexesDataLoader(
cfg=cfg,
indexes=valSet,
batch_size=int(cfg.TEST.BATCH_SIZE / cfg.NUM_GPUS),
data=valDataset,
n_worker=cfg.DATA_LOADER.NUM_WORKERS,
pin_memory=cfg.DATA_LOADER.PIN_MEMORY,
drop_last=False,
allowRepeat=False,
)
# Loading test partition
logger.info("==== Loading TestDataset ====")
oldmode = dataObj.eval_mode
dataObj.eval_mode = True
testDataset, n_TestDatapts = dataObj.getDataset(
save_dir=cfg.TEST_DIR, isTrain=False, isDownload=True
)
print("Number of testing datapoints: {}".format(n_TestDatapts))
test_loader = dataObj.getDistributedIndexesDataLoader(
cfg=cfg,
indexes=None,
batch_size=int(cfg.TEST.BATCH_SIZE / cfg.NUM_GPUS),
data=testDataset,
n_worker=cfg.DATA_LOADER.NUM_WORKERS,
pin_memory=cfg.DATA_LOADER.PIN_MEMORY,
drop_last=False,
allowRepeat=False,
)
dataObj.eval_mode = oldmode
elif cfg.TRAIN.DATASET == "IMAGENET":
logger.info("==========================")
logger.info("Trying to load imagenet dataset")
logger.info("==========================")
train_loader, valSetLoader = imagenet_loader.get_data_loaders(cfg)
test_loader = imagenet_loader.construct_test_loader(cfg)
else:
logger.info(f"Dataset {cfg.TRAIN.DATASET} currently not supported")
raise NotImplementedError
# Create meters
train_meter = TrainMeter(len(train_loader), cfg)
val_meter = ValMeter(len(valSetLoader), cfg)
test_meter = TestMeter(len(test_loader), cfg)
# Perform the training loop
print("Len(train_loader): {}".format(len(train_loader)))
logger.info("Start epoch: {}".format(start_epoch + 1))
val_set_acc = 0.0
temp_best_val_acc = 0.0
temp_best_val_epoch = 0
##best checkpoint states
best_model_state = None
best_opt_state = None
val_acc_epochs_x = []
val_acc_epochs_y = []
clf_train_iterations = cfg.OPTIM.MAX_EPOCH * int(len(lSet) / cfg.TRAIN.BATCH_SIZE)
clf_change_lr_iter = clf_train_iterations // 25
clf_iter_count = 0
for cur_epoch in range(start_epoch, cfg.OPTIM.MAX_EPOCH):
# # Train for one epoch
train_loss, clf_iter_count = train_epoch(
train_loader,
model,
loss_fun,
optimizer,
train_meter,
cur_epoch,
cfg,
clf_iter_count,
clf_change_lr_iter,
clf_train_iterations,
)
# Compute precise BN stats
if cfg.BN.USE_PRECISE_STATS:
nu.compute_precise_bn_stats(model, train_loader)
# # Evaluate the model
if is_eval_epoch(cfg, cur_epoch):
# Original code passes on testLoader but we want to compute on val Set
val_set_err = test_epoch(cfg, valSetLoader, model, val_meter, cur_epoch)
val_set_acc = 100.0 - val_set_err
if temp_best_val_acc < val_set_acc:
temp_best_val_acc = val_set_acc
temp_best_val_epoch = cur_epoch + 1
# Save best model and optimizer state for checkpointing
model.eval()
best_model_state = (
model.module.state_dict()
if cfg.NUM_GPUS > 1
else model.state_dict()
)
best_opt_state = optimizer.state_dict()
model.train()
# log if master process
if du.is_master_proc(cfg):
# as we start from 0 epoch
val_acc_epochs_x.append(cur_epoch + 1)
val_acc_epochs_y.append(val_set_acc)
#######################
# Save a checkpoint
######################
if cfg.TRAIN.DATASET == "IMAGENET" and cu.is_checkpoint_epoch(cfg, cur_epoch):
# named_save_checkpoint saves model with cur_epoch+1 in name
checkpoint_file = cu.named_save_checkpoint(
cfg, "valSet_acc_" + str(val_set_acc), model, optimizer, cur_epoch
)
logger.info("Wrote checkpoint to: {}".format(checkpoint_file))
# ##Tensorboard for loss vs epoch
if du.is_master_proc(cfg):
plot_epoch_xvalues.append(cur_epoch)
plot_epoch_yvalues.append(train_loss)
save_plot_values(
cfg,
[
plot_epoch_xvalues,
plot_epoch_yvalues,
plot_it_xvalues,
plot_it_y_values,
val_acc_epochs_x,
val_acc_epochs_y,
],
[
"plot_epoch_xvalues.npy",
"plot_epoch_yvalues.npy",
"plot_it_xvalues.npy",
"plot_it_y_values.npy",
"val_acc_epochs_x",
"val_acc_epochs_y",
],
isDebug=False,
)
logger.info("Successfully logged numpy arrays!!")
##PLOT arrays
plot_arrays(
cfg,
x_vals=plot_epoch_xvalues,
y_vals=plot_epoch_yvalues,
x_name="Epochs",
y_name="Loss",
dataset_name=cfg.TRAIN.DATASET,
)
plot_arrays(
cfg,
x_vals=val_acc_epochs_x,
y_vals=val_acc_epochs_y,
x_name="Epochs",
y_name="Validation accuracy",
dataset_name=cfg.TRAIN.DATASET,
)
print("~~~ isPruning Flag: ", isPruning)
print("~~~ isEvalEpoch: ", is_eval_epoch(cfg, cur_epoch))
if (
isPruning
and cur_epoch != 0
and (cur_epoch % 20 == 0)
and is_eval_epoch(cfg, cur_epoch)
):
print("======================================\n")
print("Inside pruning: -- ", isPruning)
print("======================================\n")
trial.report(val_set_acc, cur_epoch)
if trial.should_prune():
print("======================================\n")
print("Getting pruned!!")
print("======================================\n")
raise optuna.exceptions.TrialPruned()
save_plot_values(
cfg,
[
plot_epoch_xvalues,
plot_epoch_yvalues,
plot_it_xvalues,
plot_it_y_values,
val_acc_epochs_x,
val_acc_epochs_y,
],
[
"plot_epoch_xvalues.npy",
"plot_epoch_yvalues.npy",
"plot_it_xvalues.npy",
"plot_it_y_values.npy",
"val_acc_epochs_x",
"val_acc_epochs_y",
],
)
if du.is_master_proc(cfg):
# update shared variable -- iff process is master process
# if distributed training
if cfg.NUM_GPUS > 1:
best_val_acc.value = temp_best_val_acc
best_val_epoch.value = temp_best_val_epoch
else:
best_val_acc = temp_best_val_acc
best_val_epoch = temp_best_val_epoch
"""
SAVES the best model checkpoint
"""
checkpoint_file = cu.state_save_checkpoint(
cfg=cfg,
info="vlBest_acc_" + str(temp_best_val_acc),
model_state=best_model_state,
optimizer_state=best_opt_state,
epoch=temp_best_val_epoch,
)
logger.info("Wrote checkpoint to: {}".format(checkpoint_file))
if not cfg.NUM_GPUS > 1:
return best_val_acc, best_val_epoch
def single_proc_train(
val_acc, val_epoch, trainDataset, valDataset, dataObj, cfg, trial, isPruning
):
"""Performs single process training."""
# Setup logging
lu.setup_logging(cfg)
# Fix the RNG seeds (see RNG comment in core/config.py for discussion)
np.random.seed(cfg.RNG_SEED)
torch.manual_seed(cfg.RNG_SEED)
# Configure the CUDNN backend
torch.backends.cudnn.benchmark = cfg.CUDNN.BENCHMARK
# Train the model
if cfg.NUM_GPUS > 1:
train_model(
val_acc, val_epoch, trainDataset, valDataset, dataObj, cfg, trial, isPruning
)
else:
return train_model(
val_acc, val_epoch, trainDataset, valDataset, dataObj, cfg, trial, isPruning
)
def ensemble_sampling(
args,
cfg,
main_args,
temp_out_dir,
trainDataset,
valDataset,
noAugDataset,
dataObj,
debug=True,
):
temp_cfg = copy.deepcopy(cfg)
if debug:
logger.info("Inside Ensemble sampling function")
cfg.merge_from_file(args.cfg_file)
cfg.merge_from_list(main_args)
num_ensembles = args.num_ensembles
ENS_DIR_SUFFIX = "ens_model_"
current_device = 0
# train num_ensemble models
print("==========================")
print(f"Num_Ensembles: {num_ensembles}")
print(f"main_args: {main_args}")
print(f"initial temp_out_dir: {temp_out_dir}")
print(f"cfg.ACTIVE_LEARNING.ACTIVATE: {cfg.ACTIVE_LEARNING.ACTIVATE}")
print(f"cfg.ACTIVE_LEARNING.LSET_PATH: {cfg.ACTIVE_LEARNING.LSET_PATH}")
print(f"cfg.ACTIVE_LEARNING.USET_PATH: {cfg.ACTIVE_LEARNING.USET_PATH}")
print(f"cfg.ACTIVE_LEARNING.VALSET_PATH: {cfg.ACTIVE_LEARNING.VALSET_PATH}")
print(f"cfg.ACTIVE_LEARNING.SAMPLING_FN: {cfg.ACTIVE_LEARNING.SAMPLING_FN}")
print("==========================")
model_paths = []
for i in range(num_ensembles):
print("=== Training ensemble [{}/{}] ===".format(i + 1, num_ensembles))
cfg.defrost() # to make cfg mutable
"""
Switch off any regularization if there is any
"""
print(f"Rand_Aug was switched to {cfg.RANDAUG.ACTIVATE}")
if cfg.RANDAUG.ACTIVATE:
cfg.RANDAUG.ACTIVATE = False
print(f"Setting RandAug to --> {cfg.RANDAUG.ACTIVATE}")
print(f"SWA was switched to {cfg.SWA_MODE.ACTIVATE}")
if cfg.SWA_MODE.ACTIVATE:
cfg.SWA_MODE.ACTIVATE = False
print(f"Setting SWA MODE to --> {cfg.SWA_MODE.ACTIVATE}")
cfg.OPTIM.MAX_EPOCH = args.ens_epochs
print(f"Max epochs for training ensemble: {cfg.OPTIM.MAX_EPOCH}")
cfg.RNG_SEED += i
cfg.ACTIVE_LEARNING.BUDGET_SIZE = args.budget_size
cfg.TEST.BATCH_SIZE = args.test_batch_size
cfg.TEST.DATASET = args.dataset
cfg.TRAIN.BATCH_SIZE = args.train_batch_size
cfg.TRAIN.DATASET = args.dataset
cfg.TRAIN.EVAL_PERIOD = args.eval_period
cfg.TRAIN.CHECKPOINT_PERIOD = args.checkpoint_period
cfg.TRAIN.IMBALANCED = args.isimbalanced
cfg.ENSEMBLE.NUM_MODELS = num_ensembles
cfg.ENSEMBLE.MODEL_TYPE = [str(cfg.MODEL.TYPE)]
print(f"====== Ensemble OPTIM LR: {cfg.OPTIM.BASE_LR}=====")
print("=== SEED: {} ===".format(cfg.RNG_SEED))
cfg.OUT_DIR = temp_out_dir + ENS_DIR_SUFFIX + str(i + 1) + "/"
model_paths.append(cfg.OUT_DIR)
print(f"cfg.OUT_DIR: {cfg.OUT_DIR}")
print(f"cfg.ACTIVE_LEARNING.BUDGET_SIZE: {cfg.ACTIVE_LEARNING.BUDGET_SIZE}")
if os.path.exists(cfg.OUT_DIR):
print(
f"Skipping ensemble {i+1} learning as it already exists: {cfg.OUT_DIR}"
)
else:
al_main(cfg, args, trainDataset, valDataset, dataObj, None, isSkipCfg=True)
cfg.defrost()
if debug:
print(f"[Before] model_paths: {model_paths}")
model_paths = [
get_best_model_path(None, [], 0, "", False, directPath=md_path)
for md_path in model_paths
]
if debug:
print(f"[After] model_paths: {model_paths}")
temp_args = [model_paths, num_ensembles, noAugDataset, dataObj, temp_out_dir]
active_sampling(cfg, ensemble_args=temp_args, debug=False)
# Get original CFG back
cfg = copy.deepcopy(temp_cfg)
return 0
# this calls distributed training
def al_main(
cfg, args, trainDataset, valDataset, dataObj, al_args=None, isSkipCfg=False
):
"""Main function running AL cycles"""
if not isSkipCfg:
# Load config options
cfg.merge_from_file(args.cfg_file)
if al_args is not None:
cfg.merge_from_list(al_args)
assert_cfg()
cfg.freeze()
# Ensure that the output dir exists
os.makedirs(cfg.OUT_DIR, exist_ok=True)
# Save the config
dump_cfg(cfg)
# Perform training
if cfg.NUM_GPUS > 1:
print("============================")
print("Number of Gpus available for multiprocessing: {}".format(cfg.NUM_GPUS))
print("============================")
best_val_acc, best_val_epoch = mpu.multi_proc_run(
num_proc=cfg.NUM_GPUS,
fun=single_proc_train,
fun_args=(trainDataset, valDataset, dataObj, cfg, 0, True),
)
else:
temp_val_acc = 0.0
temp_val_epoch = 0
# val_acc, val_epoch, trainDataset, valDataset, dataObj, cfg
best_val_acc, best_val_epoch = single_proc_train(
temp_val_acc,
temp_val_epoch,
trainDataset,
valDataset,
dataObj,
cfg,
0,
True,
)
cfg.defrost() # Make cfg mutable for other operations
return best_val_acc, best_val_epoch
def main(cfg):
# Parse cmd line args
args = parse_args()
best_val_accuracies = []
test_accuracies = []
test_model_paths = [] # For verification purposes
best_val_epochs = []
temp_model_path = ""
al_model_phase = args.al_mode
print("== al_model_phase: {} ==".format(al_model_phase))
al_start = args.init_partition
sampling_fn = args.sampling_fn if al_model_phase else None
dataset_name = args.dataset
if al_model_phase:
al_step = args.step_partition
al_stop = al_start + args.al_max_iter * al_step
data_splits = [round(i, 1) for i in np.arange(al_start, al_stop, al_step)]
else:
data_splits = [args.init_partition]
al_max_iter = len(data_splits)
i_start = 1 if al_max_iter > 1 else 0
# compulsory arguments needed irrespective of active learning or not
main_args = get_main_args(args)
temp_out_dir = ""
directory_specific = "vanilla"
if args.isTransferExp:
print(
f"========= [Running Transfer Experiment; DIRECTORY SPECIFIC SET TO {args.transfer_dir_specific}] ========="
)
directory_specific = args.transfer_dir_specific
else:
if args.swa_mode and args.rand_aug:
directory_specific = "swa_rand_aug"
elif args.swa_mode:
directory_specific = "swa"
elif args.rand_aug:
directory_specific = "rand_aug"
else:
print("========= [NO ADVANCED REGULARIZATION TRICK ACTIVATED] =========")
print(f"Directory_specific: {directory_specific}")
# ONLY SWA MODE
# Construct datasets
from al_utils.data import Data as custom_Data
if args.dataset in ["CIFAR10", "CIFAR100", "SVHN", "MNIST", "STL10"]:
dataObj = custom_Data(dataset=args.dataset, israndAug=args.rand_aug, args=args)
logger.info("==== Loading trainDataset ====")
trainDataset, n_TrainDatapts = dataObj.getDataset(
save_dir=args.train_dir, isTrain=True, isDownload=True
)
# To get reference to data which has no transformations applied
oldmode = dataObj.eval_mode
dataObj.eval_mode = True # To remove any transforms
logger.info("==== Loading valDataset ====")
valDataset, _ = dataObj.getDataset(
save_dir=args.train_dir, isTrain=True, isDownload=True
)
logger.info("==== Loading noAugDataset ====")
noAugDataset, _ = dataObj.getDataset(
save_dir=args.train_dir, isTrain=True, isDownload=True
)
dataObj.eval_mode = oldmode
elif args.dataset == "IMAGENET":
trainDataset = None
valDataset = None
noAugDataset = None
dataObj = None
# All these are defined later as they need final values of cfg and yet cfg is not properly set
pass
else:
logger.info(f"{args.dataset} dataset not handled yet.")
raise NotImplementedError
if args.only_swa:
# USAGE: When we only want to run SWA on some model weights
cfg.RANDAUG.ACTIVATE = args.rand_aug
cfg.MODEL.DEPTH = args.model_depth
cfg.MODEL.TYPE = args.model_type
cfg.TRAIN.DATASET = args.dataset
cfg.TRAIN.BATCH_SIZE = args.train_batch_size
cfg.TEST.BATCH_SIZE = args.test_batch_size
# To reflect our cmd arguments and config file changes in cfg
cfg.merge_from_file(args.cfg_file)
cfg.merge_from_list(main_args)
cfg.ACTIVE_LEARNING.LSET_PATH = args.lSetPath
cfg.ACTIVE_LEARNING.USET_PATH = args.uSetPath
cfg.ACTIVE_LEARNING.VALSET_PATH = args.valSetPath
temp_out_dir = (
args.out_dir
+ dataset_name
+ "/"
+ str(args.init_partition)
+ "/"
+ args.model_type
+ "_depth_"
+ str(args.model_depth)
+ "/"
+ directory_specific
+ "/"
)
logger.info(f"Temp_out_dir: {temp_out_dir}")
if args.only_swa_partition == args.init_partition:
temp_l_SetPath = args.lSetPath
temp_u_SetPath = args.uSetPath
else:
temp_l_SetPath = (
args.out_dir
+ args.dataset
+ "/"
+ str(args.only_swa_partition - args.step_partition)
+ "/"
+ args.model_type
+ "_depth_"
+ str(args.model_depth)
+ "/"
+ directory_specific
+ "/"
+ args.sampling_fn
+ "/lSet.npy"
)
temp_u_SetPath = (
args.out_dir
+ args.dataset
+ "/"
+ str(args.only_swa_partition - args.step_partition)
+ "/"
+ args.model_type
+ "_depth_"
+ str(args.model_depth)
+ "/"
+ directory_specific
+ "/"
+ args.sampling_fn
+ "/uSet.npy"
)
latest_model_path = get_latest_model_path(
dir_path=temp_out_dir + "checkpoints/"
)
print("temp_out_dir: {}".format(temp_out_dir))
print("lsetPath: {}".format(temp_l_SetPath))
print("uSetPath: {}".format(temp_u_SetPath))
print("valSetPath: {}".format(args.valSetPath))
print("latest_model_path: {}".format(latest_model_path))
args.device_ids = np.arange(cfg.NUM_GPUS)
argListSWA = [
args,
latest_model_path,
temp_l_SetPath,
temp_u_SetPath,
temp_out_dir + "checkpoints/",
trainDataset,
noAugDataset,
cfg,
]
SWA_subprocess_call(argListSWA, debug=True)
return
# SWA will be called here if applied
for i in range(i_start, al_max_iter):
if al_model_phase:
# Hierarchy followed -- [al_results/partition_size/dataset_name/model_type/directory_specific/sampling_fn/data_splits]
if data_splits[i] == round(args.init_partition + args.step_partition, 1):
# First time active learning
al_args, temp_out_dir = get_al_args(
args, data_splits, i, directory_specific, alStart=True
)
else:
al_args, temp_out_dir = get_al_args(
args, data_splits, i, directory_specific, alStart=False
)
cfg.merge_from_file(args.cfg_file)
cfg.merge_from_list(main_args + al_args)
assert_cfg()
# Should we do active sampling or not?
# If lSet, uSet and activeSet found in their target directories
# then we skip sampling part for that particular iteration
skip_sampling = True
check_path = (
args.out_dir
+ args.dataset
+ "/"
+ str(data_splits[i])
+ "/"
+ str(args.seed_id)
+ "/"
+ args.model_type
+ "_depth_"
+ str(args.model_depth)
+ "/"
+ directory_specific
+ "/"
+ args.sampling_fn
+ "/"
)
print("==============================")
print(f"check_path: {check_path}")
print("==============================")
req_fnames = ["lSet", "uSet", "activeSet"]
for fname in req_fnames:
if os.path.exists(check_path + fname + ".npy") and os.path.exists(
check_path + fname + ".txt"
):
continue
else:
skip_sampling = False
break
if not skip_sampling:
# do active sampling
if cfg.ACTIVE_LEARNING.SAMPLING_FN in ["vaal", "vaal_minus_disc"]:
temp_old_im_size = cfg.TRAIN.IM_SIZE
if cfg.TRAIN.DATASET == "IMAGENET":
cfg.TRAIN.IM_SIZE = args.vaal_im_size
vaal_sampling_util(cfg, dataObj, debug=True)
if cfg.TRAIN.DATASET == "IMAGENET":
cfg.TRAIN.IM_SIZE = temp_old_im_size
elif cfg.ACTIVE_LEARNING.SAMPLING_FN.startswith("ensemble"):
ensemble_sampling(
args,
cfg,
main_args,
temp_out_dir,
trainDataset,
valDataset,
noAugDataset,
dataObj,
debug=True,
)
else:
active_sampling(cfg, debug=True)
else:
print(
"Sampling Skipped as index sets exists at path: {}".format(
check_path
)
)
# update lSetPath, uSetPath
al_args = update_lset_uset_paths(
al_args,
args.out_dir
+ args.dataset
+ "/"
+ str(data_splits[i])
+ "/"
+ str(args.seed_id)
+ "/"
+ args.model_type
+ "_depth_"
+ str(args.model_depth)
+ "/"
+ directory_specific
+ "/"
+ args.sampling_fn
+ "/lSet.npy",
args.out_dir
+ args.dataset
+ "/"
+ str(data_splits[i])
+ "/"
+ str(args.seed_id)
+ "/"
+ args.model_type
+ "_depth_"
+ str(args.model_depth)
+ "/"
+ directory_specific
+ "/"
+ args.sampling_fn
+ "/uSet.npy",
)
else:
# base classifier phase
temp_out_dir = (
args.out_dir
+ dataset_name
+ "/"
+ str(data_splits[i])
+ "/"
+ str(args.seed_id)
+ "/"
+ args.model_type
+ "_depth_"
+ str(args.model_depth)
+ "/"
+ directory_specific
+ "/"
)
al_args = [
"ACTIVE_LEARNING.LSET_PATH",
args.lSetPath,
"ACTIVE_LEARNING.USET_PATH",
args.uSetPath,
"OUT_DIR",
temp_out_dir,
"ACTIVE_LEARNING.VALSET_PATH",
args.valSetPath,
"ACTIVE_LEARNING.ACTIVATE",
args.al_mode,
"ACTIVE_LEARNING.DATA_SPLIT",
args.init_partition,
"DIR_SPECIFIC",
directory_specific,
]
# Make out_directory for saving results later
os.makedirs(temp_out_dir, exist_ok=True)
temp_al_args = al_args
al_args = main_args + al_args
print("========[CMD ARGUMNETS]=======")
print("al_args: {}".format(al_args))
print("Using data_splits: {}".format(data_splits))
print("==============================")
print("============================")
print("Running AL iteration #{}".format(i))
print("~~~~temp_out_dir: {}".format(temp_out_dir))
if cfg.ACTIVE_LEARNING.SAMPLING_FN.startswith("ensemble"):
cfg.OUT_DIR = temp_out_dir
# Because this happens after active learning process then
if cfg.ACTIVE_LEARNING.ACTIVATE and cfg.ACTIVE_LEARNING.NOISY_ORACLE > 0.0:
if cfg.TRAIN.DATASET == "IMAGENET":
raise NotImplementedError
print("============= ADDING NOISE =============")
noise_percent = cfg.ACTIVE_LEARNING.NOISY_ORACLE
# temp_data_split = cfg.ACTIVE_LEARNING.DATA_SPLIT
activeSet = np.load(
os.path.join(cfg.OUT_DIR, "activeSet.npy"), allow_pickle=True
)
noise_idx = np.arange(start=0, stop=len(activeSet))
np.random.shuffle(noise_idx)
noise_idx = noise_idx[0 : int(noise_percent * len(activeSet))]
print("len(noise_idx): ", len(noise_idx))
active_noise_idx = activeSet[noise_idx]
for idx in active_noise_idx:
trainDataset.targets[idx] = np.random.randint(
0, cfg.MODEL.NUM_CLASSES, 1
)[0]
print("=============== DONE ================")
if cfg.TRAIN.TRANSFER_EXP == False and os.path.exists(
temp_out_dir + "checkpoints/"
):
print(
f"=== Skipped Learning as path: [{temp_out_dir}checkpoints/] exists...==="
)
best_val_acc = 0
best_val_epoch = 0
print("temp_out_dir: ", temp_out_dir)
cfg.merge_from_file(os.path.join(temp_out_dir, "config.yaml"))
cfg.PORT = args.port
else:
best_val_acc, best_val_epoch = al_main(
cfg, args, trainDataset, valDataset, dataObj, al_args
)
if cfg.TRAIN.TRANSFER_EXP:
temp_out_dir = cu.get_checkpoint_dir_wo_checkpoint(cfg) + "/"
print("temp_out_dir: {}".format(temp_out_dir))
latest_model_path = get_latest_model_path(dir_path=temp_out_dir)
else:
latest_model_path = get_latest_model_path(
dir_path=temp_out_dir + "checkpoints/"
)
print("temp_out_dir: {}".format(temp_out_dir))
if best_val_epoch == 0 and best_val_acc == 0:
model_info = os.path.split(latest_model_path)[1]
best_val_epoch = int(model_info.split("_")[-1].split(".")[0])
best_val_acc = float(model_info.split("_")[2])
print("latest_model_path: {}".format(latest_model_path))
## RUN SWA
temp_l_SetPath = temp_al_args[1]
temp_u_SetPath = temp_al_args[3]
print(
"Best Val Acc: {}, Best Val Epoch: {}".format(
best_val_acc, best_val_epoch + 1
)
)
print("============================")
best_val_accuracies.append(best_val_acc)
best_val_epochs.append(best_val_epoch)
if args.swa_mode and args.swa_freq > 0:
# This means we want to run SWA else we won't
args.device_ids = np.arange(cfg.NUM_GPUS)
swa_temp_out_dir = temp_out_dir # + "checkpoints/"
if swa_temp_out_dir.find("checkpoints") != -1:
# remove checkpoints directory
swa_temp_out_dir = swa_temp_out_dir[
: swa_temp_out_dir.index("checkpoints")
]
swa_temp_out_dir = os.path.join(swa_temp_out_dir, "checkpoints/")
argListSWA = [
args,
latest_model_path,
temp_l_SetPath,
temp_u_SetPath,
swa_temp_out_dir,
trainDataset,
noAugDataset,
cfg,
]
print("RUNNING SWA FROM HERE ..........")
print("Check data paths")
print("LsetPath: ", cfg.ACTIVE_LEARNING.LSET_PATH)
print("UsetPath: ", cfg.ACTIVE_LEARNING.USET_PATH)
SWA_subprocess_call(argListSWA, debug=True)
print("=====BEST MODEL=====")
print("temp_out_dir: {}".format(temp_out_dir))
temp_al_start = True if i == 0 else False
if i == 0:
best_model_path = get_best_model_path(
args, data_splits, i, directory_specific, temp_al_start
)
else:
best_model_path = get_best_model_path(
args,
data_splits,
i,
directory_specific,
temp_al_start,
directPath=temp_out_dir,
)
print("best_model_path: {}".format(best_model_path))
if cfg.TRAIN.TRANSFER_EXP:
import copy
temp_cfg = copy.deepcopy(cfg)
temp_cfg.OUT_DIR = cu.get_checkpoint_dir_wo_checkpoint(cfg)
print("cfg.OUT_DIR : {}".format(cfg.OUT_DIR))
print("temp_cfg.OUT_DIR: {}".format(temp_cfg.OUT_DIR))
temp_cfg.freeze()
custom_dump_cfg(temp_cfg=temp_cfg)
temp_cfg.defrost()
##test model via subprocess
temp_test_acc = test_net_subprocess_call(
temp_out_dir, best_model_path, debug=True
)
test_accuracies.append(temp_test_acc)
test_model_paths.append(best_model_path)
if al_max_iter > 1:
for i in range(len(data_splits) - 1):
# print("For {}% split, best val accuracy: {} achieved at epoch: {}"\
# .format(data_splits[i+1], best_val_accuracies[i], best_val_epochs[i]))
print(
"For {}% split, test accuracy: {:.3f} where model was loaded from path: {}".format(
data_splits[i + 1], test_accuracies[i], test_model_paths[i]
)
)
if __name__ == "__main__":
from pycls.core.config import cfg
main(cfg)
| #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# Modified by <NAME> from official pycls codebase inorder to add the AL functionality
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Train a classification model."""
import argparse
import numpy as np
import os
import optuna
import sys
import torch
import pickle
import subprocess as sp
import copy
from pycls.core.config import assert_cfg
from pycls.core.config import dump_cfg
from pycls.core.config import custom_dump_cfg
from pycls.utils.meters import TestMeter
from pycls.utils.meters import TrainMeter
from pycls.utils.meters import ValMeter
import pycls.core.losses as losses
import pycls.core.model_builder as model_builder
import pycls.core.optimizer as optim
import pycls.utils.benchmark as bu
import pycls.utils.checkpoint as cu
import pycls.utils.distributed as du
import pycls.utils.logging as lu
import pycls.utils.metrics as mu
import pycls.utils.multiprocessing as mpu
import pycls.utils.net as nu
import pycls.datasets.loader as imagenet_loader
from helper.args_util import get_main_args
from helper.args_util import parse_args
from helper.args_util import get_al_args
from helper.subprocess_utils import vaal_sampling_util
from helper.subprocess_utils import active_sampling
from helper.subprocess_utils import test_net_subprocess_call
from helper.subprocess_utils import SWA_subprocess_call
from helper.path_extractor import get_latest_model_path
from helper.path_extractor import get_best_model_path
from helper.path_extractor import update_lset_uset_paths
logger = lu.get_logger(__name__)
plot_epoch_xvalues = []
plot_epoch_yvalues = []
plot_it_xvalues = []
plot_it_y_values = []
def plot_arrays(cfg, x_vals, y_vals, x_name, y_name, dataset_name, isDebug=False):
"""Basic utility to plot X vs Y line graphs.
Args:
cfg: Reference to the config yaml
x_vals: values on x-axis
y_vals: values on y-axis
x_name: Label on x-axis
y_name: Label on y-axis
dataset_name: Dataset name.
isDebug (bool, optional): Switch for debug mode. Defaults to False.
"""
if not du.is_master_proc(cfg):
return
import matplotlib.pyplot as plt
temp_name = "{}_vs_{}".format(x_name, y_name)
plt.xlabel(x_name)
plt.ylabel(y_name)
plt.title("Dataset: {}; {}".format(dataset_name, temp_name))
plt.plot(x_vals, y_vals)
if isDebug:
print(f"plot_saved at {cfg.OUT_DIR+temp_name}.png")
if cfg.TRAIN.TRANSFER_EXP:
temp_path = (
os.path.join(
"transfer_experiment",
cfg.MODEL.TRANSFER_MODEL_TYPE
+ "_depth_"
+ str(cfg.MODEL.TRANSFER_MODEL_DEPTH),
)
+ "/"
)
plt.savefig(cfg.OUT_DIR + temp_path + temp_name + ".png")
plt.savefig(cfg.OUT_DIR + temp_name + ".png")
plt.close()
def save_plot_values(
cfg, temp_arrays, temp_names, isParallel=True, saveInTextFormat=False, isDebug=True
):
"""Saves arrays provided in the list in npy format"""
# return if not master process
if isParallel:
if not du.is_master_proc(cfg):
return
for i in range(len(temp_arrays)):
temp_arrays[i] = np.array(temp_arrays[i])
temp_dir = cfg.OUT_DIR
if cfg.TRAIN.TRANSFER_EXP:
temp_dir += (
os.path.join(
"transfer_experiment",
cfg.MODEL.TRANSFER_MODEL_TYPE
+ "_depth_"
+ str(cfg.MODEL.TRANSFER_MODEL_DEPTH),
)
+ "/"
)
if not os.path.exists(temp_dir):
os.makedirs(temp_dir)
if saveInTextFormat:
if isDebug:
print(
f"Saving {temp_names[i]} at {temp_dir+temp_names[i]}.txt in text format!!"
)
np.savetxt(temp_dir + temp_names[i] + ".txt", temp_arrays[i], fmt="%d")
else:
if isDebug:
print(
f"Saving {temp_names[i]} at {temp_dir+temp_names[i]}.npy in numpy format!!"
)
np.save(temp_dir + temp_names[i] + ".npy", temp_arrays[i])
def is_eval_epoch(cfg, cur_epoch):
"""Determines if the model should be evaluated at the current epoch."""
return (cur_epoch + 1) % cfg.TRAIN.EVAL_PERIOD == 0 or (
cur_epoch + 1
) == cfg.OPTIM.MAX_EPOCH
def log_model_info(model):
"""Logs model info"""
logger.info("Model:\n{}".format(model))
logger.info("Params: {:,}".format(mu.params_count(model)))
logger.info("Flops: {:,}".format(mu.flops_count(model)))
def train_epoch(
train_loader,
model,
loss_fun,
optimizer,
train_meter,
cur_epoch,
cfg,
clf_iter_count,
clf_change_lr_iter,
clf_max_iter,
):
"""Performs one epoch of training."""
if cfg.NUM_GPUS > 1:
train_loader.sampler.set_epoch(cur_epoch)
# Update the learning rate
lr = optim.get_epoch_lr(cfg, cur_epoch)
if cfg.OPTIM.TYPE == "sgd":
optim.set_lr(optimizer, lr)
# Enable training mode
model.train()
train_meter.iter_tic() # This basically notes the start time in timer class defined in utils/timer.py
len_train_loader = len(train_loader)
for cur_iter, (inputs, labels) in enumerate(train_loader):
# ensuring that inputs are floatTensor as model weights are
inputs = inputs.type(torch.cuda.FloatTensor)
inputs, labels = inputs.cuda(), labels.cuda(non_blocking=True)
# Perform the forward pass
preds = model(inputs)
# Compute the loss
loss = loss_fun(preds, labels)
# Perform the backward pass
optimizer.zero_grad()
loss.backward()
# Update the parametersSWA
optimizer.step()
# Compute the errors
top1_err, top5_err = mu.topk_errors(preds, labels, [1, 5])
# Combine the stats across the GPUs
if cfg.NUM_GPUS > 1:
# Average error and losses across GPUs
# Also this this calls wait method on reductions so we are ensured
# to obtain synchronized results
loss, top1_err = du.scaled_all_reduce(cfg, [loss, top1_err])
# Copy the stats from GPU to CPU (sync point)
loss, top1_err = loss.item(), top1_err.item()
# #ONLY MASTER PROCESS SHOULD WRITE TO TENSORBOARD
if du.is_master_proc(cfg):
if cur_iter is not 0 and cur_iter % 5 == 0:
# because cur_epoch starts with 0
plot_it_xvalues.append((cur_epoch) * len_train_loader + cur_iter)
plot_it_y_values.append(loss)
save_plot_values(
cfg,
[plot_it_xvalues, plot_it_y_values],
["plot_it_xvalues.npy", "plot_it_y_values.npy"],
isDebug=False,
)
plot_arrays(
cfg,
x_vals=plot_it_xvalues,
y_vals=plot_it_y_values,
x_name="Iterations",
y_name="Loss",
dataset_name=cfg.TRAIN.DATASET,
)
# Compute the difference in time now from start time initialized just before this for loop.
train_meter.iter_toc()
train_meter.update_stats(
top1_err=top1_err, loss=loss, lr=lr, mb_size=inputs.size(0) * cfg.NUM_GPUS
)
train_meter.log_iter_stats(cur_epoch, cur_iter)
train_meter.iter_tic()
# Log epoch stats
train_meter.log_epoch_stats(cur_epoch)
train_meter.reset()
return loss, clf_iter_count
@torch.no_grad()
def test_epoch(cfg, test_loader, model, test_meter, cur_epoch):
"""Evaluates the model on the test set."""
# Enable eval mode
model.eval()
test_meter.iter_tic()
misclassifications = 0.0
totalSamples = 0.0
for cur_iter, (inputs, labels) in enumerate(test_loader):
# Transfer the data to the current GPU device
inputs, labels = inputs.cuda(), labels.cuda(non_blocking=True)
inputs = inputs.type(torch.cuda.FloatTensor)
# Compute the predictions
preds = model(inputs)
# Compute the errors
top1_err, top5_err = mu.topk_errors(preds, labels, [1, 5])
# Combine the errors across the GPUs
if cfg.NUM_GPUS > 1:
top1_err = du.scaled_all_reduce(cfg, [top1_err])
# as above returns a list
top1_err = top1_err[0]
# Copy the errors from GPU to CPU (sync point)
top1_err = top1_err.item()
# Multiply by Number of GPU's as top1_err is scaled by 1/Num_GPUs
misclassifications += top1_err * inputs.size(0) * cfg.NUM_GPUS
totalSamples += inputs.size(0) * cfg.NUM_GPUS
test_meter.iter_toc()
# Update and log stats
test_meter.update_stats(
top1_err=top1_err, mb_size=inputs.size(0) * cfg.NUM_GPUS
)
test_meter.log_iter_stats(cur_epoch, cur_iter)
test_meter.iter_tic()
# Log epoch stats
test_meter.log_epoch_stats(cur_epoch)
test_meter.reset()
return misclassifications / totalSamples
def train_model(
best_val_acc,
best_val_epoch,
trainDataset,
valDataset,
dataObj,
cfg,
trial,
isPruning,
):
"""Trains the model."""
global plot_epoch_xvalues
global plot_epoch_yvalues
global plot_it_xvalues
global plot_it_y_values
plot_epoch_xvalues = []
plot_epoch_yvalues = []
plot_it_xvalues = []
plot_it_y_values = []
# Build the model (before the loaders to speed up debugging)
model = model_builder.build_model(
cfg, active_sampling=cfg.ACTIVE_LEARNING.ACTIVATE, isDistributed=True
)
# Define the loss function
if cfg.TRAIN.IMBALANCED:
if cfg.TRAIN.DATASET == "IMAGENET":
raise NotImplementedError
temp_lSet, _, _ = dataObj.loadPartitions(
lSetPath=cfg.ACTIVE_LEARNING.LSET_PATH,
uSetPath=cfg.ACTIVE_LEARNING.USET_PATH,
valSetPath=cfg.ACTIVE_LEARNING.VALSET_PATH,
)
temp_weights = dataObj.getClassWeightsFromDataset(
dataset=trainDataset, index_set=temp_lSet, bs=cfg.TRAIN.BATCH_SIZE
)
# print(f"temp_weights: {temp_weights}")
loss_fun = torch.nn.CrossEntropyLoss(
weight=temp_weights.cuda(torch.cuda.current_device())
)
print("Weighted cross entropy loss chosen as loss function")
print(
"Sum of weights: {} and weights.shape: {}".format(
torch.sum(temp_weights), temp_weights.shape
)
)
else:
loss_fun = losses.get_loss_fun()
# Construct the optimizer
optimizer = optim.construct_optimizer(cfg, model)
print("========OPTIMIZER========")
print("optimizer: {}".format(optimizer))
print("=========================")
start_epoch = 0
# Load initial weights if there are any
if cfg.TRAIN.WEIGHTS:
start_epoch = cu.load_checkpoint(cfg, cfg.TRAIN.WEIGHTS, model, optimizer)
logger.info("=================================")
logger.info("Loaded initial weights from: {}".format(cfg.TRAIN.WEIGHTS))
logger.info("Base LR: {}".format(cfg.OPTIM.BASE_LR))
logger.info("=================================")
# If active learning mode then there has to be some starting point model
if cfg.ACTIVE_LEARNING.ACTIVATE:
if cfg.TRAIN.DATASET in ["CIFAR10", "CIFAR100", "SVHN", "MNIST", "STL10"]:
print("==================================")
print(
"We are not finetuning over the provided dataset {}".format(
cfg.TRAIN.DATASET
)
)
print(
"So Although we can load best model from path: {} -- but we won't do on CIFAR datsets".format(
cfg.ACTIVE_LEARNING.MODEL_LOAD_DIR
)
)
print("Exiting model loafing function")
print("==================================")
else:
cu.load_checkpoint(cfg, cfg.ACTIVE_LEARNING.MODEL_LOAD_DIR, model)
logger.info("=================================")
logger.info(
"Loaded initial weights from: {}".format(
cfg.ACTIVE_LEARNING.MODEL_LOAD_DIR
)
)
logger.info("Base LR: {}".format(cfg.OPTIM.BASE_LR))
logger.info("=================================")
# check if randAug activated
if cfg.RANDAUG.ACTIVATE:
print("==========================================")
print(
"RandAug activated with N: {} and M: {}".format(
cfg.RANDAUG.N, cfg.RANDAUG.M
)
)
print("==========================================")
# Compute precise time
if start_epoch == 0 and cfg.PREC_TIME.ENABLED:
logger.info("Computing precise time...")
bu.compute_precise_time(model, loss_fun)
nu.reset_bn_stats(model)
# Create data loaders
lSet = []
uSet = []
# handles when we pass cifar/svhn datasets
if cfg.TRAIN.DATASET in ["CIFAR10", "CIFAR100", "SVHN", "MNIST", "STL10"]:
# get partitions
lSet, uSet, valSet = dataObj.loadPartitions(
lSetPath=cfg.ACTIVE_LEARNING.LSET_PATH,
uSetPath=cfg.ACTIVE_LEARNING.USET_PATH,
valSetPath=cfg.ACTIVE_LEARNING.VALSET_PATH,
)
print("====== Partitions Loaded =======")
print("lSet: {}, uSet:{}, valSet: {}".format(len(lSet), len(uSet), len(valSet)))
print("================================")
train_loader = dataObj.getDistributedIndexesDataLoader(
cfg=cfg,
indexes=lSet,
batch_size=int(cfg.TRAIN.BATCH_SIZE / cfg.NUM_GPUS),
data=trainDataset,
n_worker=cfg.DATA_LOADER.NUM_WORKERS,
pin_memory=cfg.DATA_LOADER.PIN_MEMORY,
drop_last=True,
)
valSetLoader = dataObj.getDistributedIndexesDataLoader(
cfg=cfg,
indexes=valSet,
batch_size=int(cfg.TEST.BATCH_SIZE / cfg.NUM_GPUS),
data=valDataset,
n_worker=cfg.DATA_LOADER.NUM_WORKERS,
pin_memory=cfg.DATA_LOADER.PIN_MEMORY,
drop_last=False,
allowRepeat=False,
)
# Loading test partition
logger.info("==== Loading TestDataset ====")
oldmode = dataObj.eval_mode
dataObj.eval_mode = True
testDataset, n_TestDatapts = dataObj.getDataset(
save_dir=cfg.TEST_DIR, isTrain=False, isDownload=True
)
print("Number of testing datapoints: {}".format(n_TestDatapts))
test_loader = dataObj.getDistributedIndexesDataLoader(
cfg=cfg,
indexes=None,
batch_size=int(cfg.TEST.BATCH_SIZE / cfg.NUM_GPUS),
data=testDataset,
n_worker=cfg.DATA_LOADER.NUM_WORKERS,
pin_memory=cfg.DATA_LOADER.PIN_MEMORY,
drop_last=False,
allowRepeat=False,
)
dataObj.eval_mode = oldmode
elif cfg.TRAIN.DATASET == "IMAGENET":
logger.info("==========================")
logger.info("Trying to load imagenet dataset")
logger.info("==========================")
train_loader, valSetLoader = imagenet_loader.get_data_loaders(cfg)
test_loader = imagenet_loader.construct_test_loader(cfg)
else:
logger.info(f"Dataset {cfg.TRAIN.DATASET} currently not supported")
raise NotImplementedError
# Create meters
train_meter = TrainMeter(len(train_loader), cfg)
val_meter = ValMeter(len(valSetLoader), cfg)
test_meter = TestMeter(len(test_loader), cfg)
# Perform the training loop
print("Len(train_loader): {}".format(len(train_loader)))
logger.info("Start epoch: {}".format(start_epoch + 1))
val_set_acc = 0.0
temp_best_val_acc = 0.0
temp_best_val_epoch = 0
##best checkpoint states
best_model_state = None
best_opt_state = None
val_acc_epochs_x = []
val_acc_epochs_y = []
clf_train_iterations = cfg.OPTIM.MAX_EPOCH * int(len(lSet) / cfg.TRAIN.BATCH_SIZE)
clf_change_lr_iter = clf_train_iterations // 25
clf_iter_count = 0
for cur_epoch in range(start_epoch, cfg.OPTIM.MAX_EPOCH):
# # Train for one epoch
train_loss, clf_iter_count = train_epoch(
train_loader,
model,
loss_fun,
optimizer,
train_meter,
cur_epoch,
cfg,
clf_iter_count,
clf_change_lr_iter,
clf_train_iterations,
)
# Compute precise BN stats
if cfg.BN.USE_PRECISE_STATS:
nu.compute_precise_bn_stats(model, train_loader)
# # Evaluate the model
if is_eval_epoch(cfg, cur_epoch):
# Original code passes on testLoader but we want to compute on val Set
val_set_err = test_epoch(cfg, valSetLoader, model, val_meter, cur_epoch)
val_set_acc = 100.0 - val_set_err
if temp_best_val_acc < val_set_acc:
temp_best_val_acc = val_set_acc
temp_best_val_epoch = cur_epoch + 1
# Save best model and optimizer state for checkpointing
model.eval()
best_model_state = (
model.module.state_dict()
if cfg.NUM_GPUS > 1
else model.state_dict()
)
best_opt_state = optimizer.state_dict()
model.train()
# log if master process
if du.is_master_proc(cfg):
# as we start from 0 epoch
val_acc_epochs_x.append(cur_epoch + 1)
val_acc_epochs_y.append(val_set_acc)
#######################
# Save a checkpoint
######################
if cfg.TRAIN.DATASET == "IMAGENET" and cu.is_checkpoint_epoch(cfg, cur_epoch):
# named_save_checkpoint saves model with cur_epoch+1 in name
checkpoint_file = cu.named_save_checkpoint(
cfg, "valSet_acc_" + str(val_set_acc), model, optimizer, cur_epoch
)
logger.info("Wrote checkpoint to: {}".format(checkpoint_file))
# ##Tensorboard for loss vs epoch
if du.is_master_proc(cfg):
plot_epoch_xvalues.append(cur_epoch)
plot_epoch_yvalues.append(train_loss)
save_plot_values(
cfg,
[
plot_epoch_xvalues,
plot_epoch_yvalues,
plot_it_xvalues,
plot_it_y_values,
val_acc_epochs_x,
val_acc_epochs_y,
],
[
"plot_epoch_xvalues.npy",
"plot_epoch_yvalues.npy",
"plot_it_xvalues.npy",
"plot_it_y_values.npy",
"val_acc_epochs_x",
"val_acc_epochs_y",
],
isDebug=False,
)
logger.info("Successfully logged numpy arrays!!")
##PLOT arrays
plot_arrays(
cfg,
x_vals=plot_epoch_xvalues,
y_vals=plot_epoch_yvalues,
x_name="Epochs",
y_name="Loss",
dataset_name=cfg.TRAIN.DATASET,
)
plot_arrays(
cfg,
x_vals=val_acc_epochs_x,
y_vals=val_acc_epochs_y,
x_name="Epochs",
y_name="Validation accuracy",
dataset_name=cfg.TRAIN.DATASET,
)
print("~~~ isPruning Flag: ", isPruning)
print("~~~ isEvalEpoch: ", is_eval_epoch(cfg, cur_epoch))
if (
isPruning
and cur_epoch != 0
and (cur_epoch % 20 == 0)
and is_eval_epoch(cfg, cur_epoch)
):
print("======================================\n")
print("Inside pruning: -- ", isPruning)
print("======================================\n")
trial.report(val_set_acc, cur_epoch)
if trial.should_prune():
print("======================================\n")
print("Getting pruned!!")
print("======================================\n")
raise optuna.exceptions.TrialPruned()
save_plot_values(
cfg,
[
plot_epoch_xvalues,
plot_epoch_yvalues,
plot_it_xvalues,
plot_it_y_values,
val_acc_epochs_x,
val_acc_epochs_y,
],
[
"plot_epoch_xvalues.npy",
"plot_epoch_yvalues.npy",
"plot_it_xvalues.npy",
"plot_it_y_values.npy",
"val_acc_epochs_x",
"val_acc_epochs_y",
],
)
if du.is_master_proc(cfg):
# update shared variable -- iff process is master process
# if distributed training
if cfg.NUM_GPUS > 1:
best_val_acc.value = temp_best_val_acc
best_val_epoch.value = temp_best_val_epoch
else:
best_val_acc = temp_best_val_acc
best_val_epoch = temp_best_val_epoch
"""
SAVES the best model checkpoint
"""
checkpoint_file = cu.state_save_checkpoint(
cfg=cfg,
info="vlBest_acc_" + str(temp_best_val_acc),
model_state=best_model_state,
optimizer_state=best_opt_state,
epoch=temp_best_val_epoch,
)
logger.info("Wrote checkpoint to: {}".format(checkpoint_file))
if not cfg.NUM_GPUS > 1:
return best_val_acc, best_val_epoch
def single_proc_train(
val_acc, val_epoch, trainDataset, valDataset, dataObj, cfg, trial, isPruning
):
"""Performs single process training."""
# Setup logging
lu.setup_logging(cfg)
# Fix the RNG seeds (see RNG comment in core/config.py for discussion)
np.random.seed(cfg.RNG_SEED)
torch.manual_seed(cfg.RNG_SEED)
# Configure the CUDNN backend
torch.backends.cudnn.benchmark = cfg.CUDNN.BENCHMARK
# Train the model
if cfg.NUM_GPUS > 1:
train_model(
val_acc, val_epoch, trainDataset, valDataset, dataObj, cfg, trial, isPruning
)
else:
return train_model(
val_acc, val_epoch, trainDataset, valDataset, dataObj, cfg, trial, isPruning
)
def ensemble_sampling(
args,
cfg,
main_args,
temp_out_dir,
trainDataset,
valDataset,
noAugDataset,
dataObj,
debug=True,
):
temp_cfg = copy.deepcopy(cfg)
if debug:
logger.info("Inside Ensemble sampling function")
cfg.merge_from_file(args.cfg_file)
cfg.merge_from_list(main_args)
num_ensembles = args.num_ensembles
ENS_DIR_SUFFIX = "ens_model_"
current_device = 0
# train num_ensemble models
print("==========================")
print(f"Num_Ensembles: {num_ensembles}")
print(f"main_args: {main_args}")
print(f"initial temp_out_dir: {temp_out_dir}")
print(f"cfg.ACTIVE_LEARNING.ACTIVATE: {cfg.ACTIVE_LEARNING.ACTIVATE}")
print(f"cfg.ACTIVE_LEARNING.LSET_PATH: {cfg.ACTIVE_LEARNING.LSET_PATH}")
print(f"cfg.ACTIVE_LEARNING.USET_PATH: {cfg.ACTIVE_LEARNING.USET_PATH}")
print(f"cfg.ACTIVE_LEARNING.VALSET_PATH: {cfg.ACTIVE_LEARNING.VALSET_PATH}")
print(f"cfg.ACTIVE_LEARNING.SAMPLING_FN: {cfg.ACTIVE_LEARNING.SAMPLING_FN}")
print("==========================")
model_paths = []
for i in range(num_ensembles):
print("=== Training ensemble [{}/{}] ===".format(i + 1, num_ensembles))
cfg.defrost() # to make cfg mutable
"""
Switch off any regularization if there is any
"""
print(f"Rand_Aug was switched to {cfg.RANDAUG.ACTIVATE}")
if cfg.RANDAUG.ACTIVATE:
cfg.RANDAUG.ACTIVATE = False
print(f"Setting RandAug to --> {cfg.RANDAUG.ACTIVATE}")
print(f"SWA was switched to {cfg.SWA_MODE.ACTIVATE}")
if cfg.SWA_MODE.ACTIVATE:
cfg.SWA_MODE.ACTIVATE = False
print(f"Setting SWA MODE to --> {cfg.SWA_MODE.ACTIVATE}")
cfg.OPTIM.MAX_EPOCH = args.ens_epochs
print(f"Max epochs for training ensemble: {cfg.OPTIM.MAX_EPOCH}")
cfg.RNG_SEED += i
cfg.ACTIVE_LEARNING.BUDGET_SIZE = args.budget_size
cfg.TEST.BATCH_SIZE = args.test_batch_size
cfg.TEST.DATASET = args.dataset
cfg.TRAIN.BATCH_SIZE = args.train_batch_size
cfg.TRAIN.DATASET = args.dataset
cfg.TRAIN.EVAL_PERIOD = args.eval_period
cfg.TRAIN.CHECKPOINT_PERIOD = args.checkpoint_period
cfg.TRAIN.IMBALANCED = args.isimbalanced
cfg.ENSEMBLE.NUM_MODELS = num_ensembles
cfg.ENSEMBLE.MODEL_TYPE = [str(cfg.MODEL.TYPE)]
print(f"====== Ensemble OPTIM LR: {cfg.OPTIM.BASE_LR}=====")
print("=== SEED: {} ===".format(cfg.RNG_SEED))
cfg.OUT_DIR = temp_out_dir + ENS_DIR_SUFFIX + str(i + 1) + "/"
model_paths.append(cfg.OUT_DIR)
print(f"cfg.OUT_DIR: {cfg.OUT_DIR}")
print(f"cfg.ACTIVE_LEARNING.BUDGET_SIZE: {cfg.ACTIVE_LEARNING.BUDGET_SIZE}")
if os.path.exists(cfg.OUT_DIR):
print(
f"Skipping ensemble {i+1} learning as it already exists: {cfg.OUT_DIR}"
)
else:
al_main(cfg, args, trainDataset, valDataset, dataObj, None, isSkipCfg=True)
cfg.defrost()
if debug:
print(f"[Before] model_paths: {model_paths}")
model_paths = [
get_best_model_path(None, [], 0, "", False, directPath=md_path)
for md_path in model_paths
]
if debug:
print(f"[After] model_paths: {model_paths}")
temp_args = [model_paths, num_ensembles, noAugDataset, dataObj, temp_out_dir]
active_sampling(cfg, ensemble_args=temp_args, debug=False)
# Get original CFG back
cfg = copy.deepcopy(temp_cfg)
return 0
# this calls distributed training
def al_main(
cfg, args, trainDataset, valDataset, dataObj, al_args=None, isSkipCfg=False
):
"""Main function running AL cycles"""
if not isSkipCfg:
# Load config options
cfg.merge_from_file(args.cfg_file)
if al_args is not None:
cfg.merge_from_list(al_args)
assert_cfg()
cfg.freeze()
# Ensure that the output dir exists
os.makedirs(cfg.OUT_DIR, exist_ok=True)
# Save the config
dump_cfg(cfg)
# Perform training
if cfg.NUM_GPUS > 1:
print("============================")
print("Number of Gpus available for multiprocessing: {}".format(cfg.NUM_GPUS))
print("============================")
best_val_acc, best_val_epoch = mpu.multi_proc_run(
num_proc=cfg.NUM_GPUS,
fun=single_proc_train,
fun_args=(trainDataset, valDataset, dataObj, cfg, 0, True),
)
else:
temp_val_acc = 0.0
temp_val_epoch = 0
# val_acc, val_epoch, trainDataset, valDataset, dataObj, cfg
best_val_acc, best_val_epoch = single_proc_train(
temp_val_acc,
temp_val_epoch,
trainDataset,
valDataset,
dataObj,
cfg,
0,
True,
)
cfg.defrost() # Make cfg mutable for other operations
return best_val_acc, best_val_epoch
def main(cfg):
# Parse cmd line args
args = parse_args()
best_val_accuracies = []
test_accuracies = []
test_model_paths = [] # For verification purposes
best_val_epochs = []
temp_model_path = ""
al_model_phase = args.al_mode
print("== al_model_phase: {} ==".format(al_model_phase))
al_start = args.init_partition
sampling_fn = args.sampling_fn if al_model_phase else None
dataset_name = args.dataset
if al_model_phase:
al_step = args.step_partition
al_stop = al_start + args.al_max_iter * al_step
data_splits = [round(i, 1) for i in np.arange(al_start, al_stop, al_step)]
else:
data_splits = [args.init_partition]
al_max_iter = len(data_splits)
i_start = 1 if al_max_iter > 1 else 0
# compulsory arguments needed irrespective of active learning or not
main_args = get_main_args(args)
temp_out_dir = ""
directory_specific = "vanilla"
if args.isTransferExp:
print(
f"========= [Running Transfer Experiment; DIRECTORY SPECIFIC SET TO {args.transfer_dir_specific}] ========="
)
directory_specific = args.transfer_dir_specific
else:
if args.swa_mode and args.rand_aug:
directory_specific = "swa_rand_aug"
elif args.swa_mode:
directory_specific = "swa"
elif args.rand_aug:
directory_specific = "rand_aug"
else:
print("========= [NO ADVANCED REGULARIZATION TRICK ACTIVATED] =========")
print(f"Directory_specific: {directory_specific}")
# ONLY SWA MODE
# Construct datasets
from al_utils.data import Data as custom_Data
if args.dataset in ["CIFAR10", "CIFAR100", "SVHN", "MNIST", "STL10"]:
dataObj = custom_Data(dataset=args.dataset, israndAug=args.rand_aug, args=args)
logger.info("==== Loading trainDataset ====")
trainDataset, n_TrainDatapts = dataObj.getDataset(
save_dir=args.train_dir, isTrain=True, isDownload=True
)
# To get reference to data which has no transformations applied
oldmode = dataObj.eval_mode
dataObj.eval_mode = True # To remove any transforms
logger.info("==== Loading valDataset ====")
valDataset, _ = dataObj.getDataset(
save_dir=args.train_dir, isTrain=True, isDownload=True
)
logger.info("==== Loading noAugDataset ====")
noAugDataset, _ = dataObj.getDataset(
save_dir=args.train_dir, isTrain=True, isDownload=True
)
dataObj.eval_mode = oldmode
elif args.dataset == "IMAGENET":
trainDataset = None
valDataset = None
noAugDataset = None
dataObj = None
# All these are defined later as they need final values of cfg and yet cfg is not properly set
pass
else:
logger.info(f"{args.dataset} dataset not handled yet.")
raise NotImplementedError
if args.only_swa:
# USAGE: When we only want to run SWA on some model weights
cfg.RANDAUG.ACTIVATE = args.rand_aug
cfg.MODEL.DEPTH = args.model_depth
cfg.MODEL.TYPE = args.model_type
cfg.TRAIN.DATASET = args.dataset
cfg.TRAIN.BATCH_SIZE = args.train_batch_size
cfg.TEST.BATCH_SIZE = args.test_batch_size
# To reflect our cmd arguments and config file changes in cfg
cfg.merge_from_file(args.cfg_file)
cfg.merge_from_list(main_args)
cfg.ACTIVE_LEARNING.LSET_PATH = args.lSetPath
cfg.ACTIVE_LEARNING.USET_PATH = args.uSetPath
cfg.ACTIVE_LEARNING.VALSET_PATH = args.valSetPath
temp_out_dir = (
args.out_dir
+ dataset_name
+ "/"
+ str(args.init_partition)
+ "/"
+ args.model_type
+ "_depth_"
+ str(args.model_depth)
+ "/"
+ directory_specific
+ "/"
)
logger.info(f"Temp_out_dir: {temp_out_dir}")
if args.only_swa_partition == args.init_partition:
temp_l_SetPath = args.lSetPath
temp_u_SetPath = args.uSetPath
else:
temp_l_SetPath = (
args.out_dir
+ args.dataset
+ "/"
+ str(args.only_swa_partition - args.step_partition)
+ "/"
+ args.model_type
+ "_depth_"
+ str(args.model_depth)
+ "/"
+ directory_specific
+ "/"
+ args.sampling_fn
+ "/lSet.npy"
)
temp_u_SetPath = (
args.out_dir
+ args.dataset
+ "/"
+ str(args.only_swa_partition - args.step_partition)
+ "/"
+ args.model_type
+ "_depth_"
+ str(args.model_depth)
+ "/"
+ directory_specific
+ "/"
+ args.sampling_fn
+ "/uSet.npy"
)
latest_model_path = get_latest_model_path(
dir_path=temp_out_dir + "checkpoints/"
)
print("temp_out_dir: {}".format(temp_out_dir))
print("lsetPath: {}".format(temp_l_SetPath))
print("uSetPath: {}".format(temp_u_SetPath))
print("valSetPath: {}".format(args.valSetPath))
print("latest_model_path: {}".format(latest_model_path))
args.device_ids = np.arange(cfg.NUM_GPUS)
argListSWA = [
args,
latest_model_path,
temp_l_SetPath,
temp_u_SetPath,
temp_out_dir + "checkpoints/",
trainDataset,
noAugDataset,
cfg,
]
SWA_subprocess_call(argListSWA, debug=True)
return
# SWA will be called here if applied
for i in range(i_start, al_max_iter):
if al_model_phase:
# Hierarchy followed -- [al_results/partition_size/dataset_name/model_type/directory_specific/sampling_fn/data_splits]
if data_splits[i] == round(args.init_partition + args.step_partition, 1):
# First time active learning
al_args, temp_out_dir = get_al_args(
args, data_splits, i, directory_specific, alStart=True
)
else:
al_args, temp_out_dir = get_al_args(
args, data_splits, i, directory_specific, alStart=False
)
cfg.merge_from_file(args.cfg_file)
cfg.merge_from_list(main_args + al_args)
assert_cfg()
# Should we do active sampling or not?
# If lSet, uSet and activeSet found in their target directories
# then we skip sampling part for that particular iteration
skip_sampling = True
check_path = (
args.out_dir
+ args.dataset
+ "/"
+ str(data_splits[i])
+ "/"
+ str(args.seed_id)
+ "/"
+ args.model_type
+ "_depth_"
+ str(args.model_depth)
+ "/"
+ directory_specific
+ "/"
+ args.sampling_fn
+ "/"
)
print("==============================")
print(f"check_path: {check_path}")
print("==============================")
req_fnames = ["lSet", "uSet", "activeSet"]
for fname in req_fnames:
if os.path.exists(check_path + fname + ".npy") and os.path.exists(
check_path + fname + ".txt"
):
continue
else:
skip_sampling = False
break
if not skip_sampling:
# do active sampling
if cfg.ACTIVE_LEARNING.SAMPLING_FN in ["vaal", "vaal_minus_disc"]:
temp_old_im_size = cfg.TRAIN.IM_SIZE
if cfg.TRAIN.DATASET == "IMAGENET":
cfg.TRAIN.IM_SIZE = args.vaal_im_size
vaal_sampling_util(cfg, dataObj, debug=True)
if cfg.TRAIN.DATASET == "IMAGENET":
cfg.TRAIN.IM_SIZE = temp_old_im_size
elif cfg.ACTIVE_LEARNING.SAMPLING_FN.startswith("ensemble"):
ensemble_sampling(
args,
cfg,
main_args,
temp_out_dir,
trainDataset,
valDataset,
noAugDataset,
dataObj,
debug=True,
)
else:
active_sampling(cfg, debug=True)
else:
print(
"Sampling Skipped as index sets exists at path: {}".format(
check_path
)
)
# update lSetPath, uSetPath
al_args = update_lset_uset_paths(
al_args,
args.out_dir
+ args.dataset
+ "/"
+ str(data_splits[i])
+ "/"
+ str(args.seed_id)
+ "/"
+ args.model_type
+ "_depth_"
+ str(args.model_depth)
+ "/"
+ directory_specific
+ "/"
+ args.sampling_fn
+ "/lSet.npy",
args.out_dir
+ args.dataset
+ "/"
+ str(data_splits[i])
+ "/"
+ str(args.seed_id)
+ "/"
+ args.model_type
+ "_depth_"
+ str(args.model_depth)
+ "/"
+ directory_specific
+ "/"
+ args.sampling_fn
+ "/uSet.npy",
)
else:
# base classifier phase
temp_out_dir = (
args.out_dir
+ dataset_name
+ "/"
+ str(data_splits[i])
+ "/"
+ str(args.seed_id)
+ "/"
+ args.model_type
+ "_depth_"
+ str(args.model_depth)
+ "/"
+ directory_specific
+ "/"
)
al_args = [
"ACTIVE_LEARNING.LSET_PATH",
args.lSetPath,
"ACTIVE_LEARNING.USET_PATH",
args.uSetPath,
"OUT_DIR",
temp_out_dir,
"ACTIVE_LEARNING.VALSET_PATH",
args.valSetPath,
"ACTIVE_LEARNING.ACTIVATE",
args.al_mode,
"ACTIVE_LEARNING.DATA_SPLIT",
args.init_partition,
"DIR_SPECIFIC",
directory_specific,
]
# Make out_directory for saving results later
os.makedirs(temp_out_dir, exist_ok=True)
temp_al_args = al_args
al_args = main_args + al_args
print("========[CMD ARGUMNETS]=======")
print("al_args: {}".format(al_args))
print("Using data_splits: {}".format(data_splits))
print("==============================")
print("============================")
print("Running AL iteration #{}".format(i))
print("~~~~temp_out_dir: {}".format(temp_out_dir))
if cfg.ACTIVE_LEARNING.SAMPLING_FN.startswith("ensemble"):
cfg.OUT_DIR = temp_out_dir
# Because this happens after active learning process then
if cfg.ACTIVE_LEARNING.ACTIVATE and cfg.ACTIVE_LEARNING.NOISY_ORACLE > 0.0:
if cfg.TRAIN.DATASET == "IMAGENET":
raise NotImplementedError
print("============= ADDING NOISE =============")
noise_percent = cfg.ACTIVE_LEARNING.NOISY_ORACLE
# temp_data_split = cfg.ACTIVE_LEARNING.DATA_SPLIT
activeSet = np.load(
os.path.join(cfg.OUT_DIR, "activeSet.npy"), allow_pickle=True
)
noise_idx = np.arange(start=0, stop=len(activeSet))
np.random.shuffle(noise_idx)
noise_idx = noise_idx[0 : int(noise_percent * len(activeSet))]
print("len(noise_idx): ", len(noise_idx))
active_noise_idx = activeSet[noise_idx]
for idx in active_noise_idx:
trainDataset.targets[idx] = np.random.randint(
0, cfg.MODEL.NUM_CLASSES, 1
)[0]
print("=============== DONE ================")
if cfg.TRAIN.TRANSFER_EXP == False and os.path.exists(
temp_out_dir + "checkpoints/"
):
print(
f"=== Skipped Learning as path: [{temp_out_dir}checkpoints/] exists...==="
)
best_val_acc = 0
best_val_epoch = 0
print("temp_out_dir: ", temp_out_dir)
cfg.merge_from_file(os.path.join(temp_out_dir, "config.yaml"))
cfg.PORT = args.port
else:
best_val_acc, best_val_epoch = al_main(
cfg, args, trainDataset, valDataset, dataObj, al_args
)
if cfg.TRAIN.TRANSFER_EXP:
temp_out_dir = cu.get_checkpoint_dir_wo_checkpoint(cfg) + "/"
print("temp_out_dir: {}".format(temp_out_dir))
latest_model_path = get_latest_model_path(dir_path=temp_out_dir)
else:
latest_model_path = get_latest_model_path(
dir_path=temp_out_dir + "checkpoints/"
)
print("temp_out_dir: {}".format(temp_out_dir))
if best_val_epoch == 0 and best_val_acc == 0:
model_info = os.path.split(latest_model_path)[1]
best_val_epoch = int(model_info.split("_")[-1].split(".")[0])
best_val_acc = float(model_info.split("_")[2])
print("latest_model_path: {}".format(latest_model_path))
## RUN SWA
temp_l_SetPath = temp_al_args[1]
temp_u_SetPath = temp_al_args[3]
print(
"Best Val Acc: {}, Best Val Epoch: {}".format(
best_val_acc, best_val_epoch + 1
)
)
print("============================")
best_val_accuracies.append(best_val_acc)
best_val_epochs.append(best_val_epoch)
if args.swa_mode and args.swa_freq > 0:
# This means we want to run SWA else we won't
args.device_ids = np.arange(cfg.NUM_GPUS)
swa_temp_out_dir = temp_out_dir # + "checkpoints/"
if swa_temp_out_dir.find("checkpoints") != -1:
# remove checkpoints directory
swa_temp_out_dir = swa_temp_out_dir[
: swa_temp_out_dir.index("checkpoints")
]
swa_temp_out_dir = os.path.join(swa_temp_out_dir, "checkpoints/")
argListSWA = [
args,
latest_model_path,
temp_l_SetPath,
temp_u_SetPath,
swa_temp_out_dir,
trainDataset,
noAugDataset,
cfg,
]
print("RUNNING SWA FROM HERE ..........")
print("Check data paths")
print("LsetPath: ", cfg.ACTIVE_LEARNING.LSET_PATH)
print("UsetPath: ", cfg.ACTIVE_LEARNING.USET_PATH)
SWA_subprocess_call(argListSWA, debug=True)
print("=====BEST MODEL=====")
print("temp_out_dir: {}".format(temp_out_dir))
temp_al_start = True if i == 0 else False
if i == 0:
best_model_path = get_best_model_path(
args, data_splits, i, directory_specific, temp_al_start
)
else:
best_model_path = get_best_model_path(
args,
data_splits,
i,
directory_specific,
temp_al_start,
directPath=temp_out_dir,
)
print("best_model_path: {}".format(best_model_path))
if cfg.TRAIN.TRANSFER_EXP:
import copy
temp_cfg = copy.deepcopy(cfg)
temp_cfg.OUT_DIR = cu.get_checkpoint_dir_wo_checkpoint(cfg)
print("cfg.OUT_DIR : {}".format(cfg.OUT_DIR))
print("temp_cfg.OUT_DIR: {}".format(temp_cfg.OUT_DIR))
temp_cfg.freeze()
custom_dump_cfg(temp_cfg=temp_cfg)
temp_cfg.defrost()
##test model via subprocess
temp_test_acc = test_net_subprocess_call(
temp_out_dir, best_model_path, debug=True
)
test_accuracies.append(temp_test_acc)
test_model_paths.append(best_model_path)
if al_max_iter > 1:
for i in range(len(data_splits) - 1):
# print("For {}% split, best val accuracy: {} achieved at epoch: {}"\
# .format(data_splits[i+1], best_val_accuracies[i], best_val_epochs[i]))
print(
"For {}% split, test accuracy: {:.3f} where model was loaded from path: {}".format(
data_splits[i + 1], test_accuracies[i], test_model_paths[i]
)
)
if __name__ == "__main__":
from pycls.core.config import cfg
main(cfg)
| en | 0.77079 | #!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. # Modified by <NAME> from official pycls codebase inorder to add the AL functionality # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. Train a classification model. Basic utility to plot X vs Y line graphs. Args: cfg: Reference to the config yaml x_vals: values on x-axis y_vals: values on y-axis x_name: Label on x-axis y_name: Label on y-axis dataset_name: Dataset name. isDebug (bool, optional): Switch for debug mode. Defaults to False. Saves arrays provided in the list in npy format # return if not master process Determines if the model should be evaluated at the current epoch. Logs model info Performs one epoch of training. # Update the learning rate # Enable training mode # This basically notes the start time in timer class defined in utils/timer.py # ensuring that inputs are floatTensor as model weights are # Perform the forward pass # Compute the loss # Perform the backward pass # Update the parametersSWA # Compute the errors # Combine the stats across the GPUs # Average error and losses across GPUs # Also this this calls wait method on reductions so we are ensured # to obtain synchronized results # Copy the stats from GPU to CPU (sync point) # #ONLY MASTER PROCESS SHOULD WRITE TO TENSORBOARD # because cur_epoch starts with 0 # Compute the difference in time now from start time initialized just before this for loop. # Log epoch stats Evaluates the model on the test set. # Enable eval mode # Transfer the data to the current GPU device # Compute the predictions # Compute the errors # Combine the errors across the GPUs # as above returns a list # Copy the errors from GPU to CPU (sync point) # Multiply by Number of GPU's as top1_err is scaled by 1/Num_GPUs # Update and log stats # Log epoch stats Trains the model. # Build the model (before the loaders to speed up debugging) # Define the loss function # print(f"temp_weights: {temp_weights}") # Construct the optimizer # Load initial weights if there are any # If active learning mode then there has to be some starting point model # check if randAug activated # Compute precise time # Create data loaders # handles when we pass cifar/svhn datasets # get partitions # Loading test partition # Create meters # Perform the training loop ##best checkpoint states # # Train for one epoch # Compute precise BN stats # # Evaluate the model # Original code passes on testLoader but we want to compute on val Set # Save best model and optimizer state for checkpointing # log if master process # as we start from 0 epoch ####################### # Save a checkpoint ###################### # named_save_checkpoint saves model with cur_epoch+1 in name # ##Tensorboard for loss vs epoch ##PLOT arrays # update shared variable -- iff process is master process # if distributed training SAVES the best model checkpoint Performs single process training. # Setup logging # Fix the RNG seeds (see RNG comment in core/config.py for discussion) # Configure the CUDNN backend # Train the model # train num_ensemble models # to make cfg mutable Switch off any regularization if there is any # Get original CFG back # this calls distributed training Main function running AL cycles # Load config options # Ensure that the output dir exists # Save the config # Perform training # val_acc, val_epoch, trainDataset, valDataset, dataObj, cfg # Make cfg mutable for other operations # Parse cmd line args # For verification purposes # compulsory arguments needed irrespective of active learning or not # ONLY SWA MODE # Construct datasets # To get reference to data which has no transformations applied # To remove any transforms # All these are defined later as they need final values of cfg and yet cfg is not properly set # USAGE: When we only want to run SWA on some model weights # To reflect our cmd arguments and config file changes in cfg # SWA will be called here if applied # Hierarchy followed -- [al_results/partition_size/dataset_name/model_type/directory_specific/sampling_fn/data_splits] # First time active learning # Should we do active sampling or not? # If lSet, uSet and activeSet found in their target directories # then we skip sampling part for that particular iteration # do active sampling # update lSetPath, uSetPath # base classifier phase # Make out_directory for saving results later #{}".format(i)) # Because this happens after active learning process then # temp_data_split = cfg.ACTIVE_LEARNING.DATA_SPLIT ## RUN SWA # This means we want to run SWA else we won't # + "checkpoints/" # remove checkpoints directory ##test model via subprocess # print("For {}% split, best val accuracy: {} achieved at epoch: {}"\ # .format(data_splits[i+1], best_val_accuracies[i], best_val_epochs[i])) | 1.901579 | 2 |
homematicip_tracking/__init__.py | Emrys-Merlin/homematicip_tracking | 1 | 6632135 | <filename>homematicip_tracking/__init__.py
"""Top-level package for HomematicIP Tracking."""
__author__ = """<NAME>"""
__email__ = '<EMAIL>'
__version__ = '0.1.0'
| <filename>homematicip_tracking/__init__.py
"""Top-level package for HomematicIP Tracking."""
__author__ = """<NAME>"""
__email__ = '<EMAIL>'
__version__ = '0.1.0'
| en | 0.763209 | Top-level package for HomematicIP Tracking. <NAME> | 1.081332 | 1 |
digits/task.py | Benedict93/DIGITS | 4,552 | 6632136 | # Copyright (c) 2014-2017, NVIDIA CORPORATION. All rights reserved.
from __future__ import absolute_import
import logging
import os.path
import platform
import re
import signal
import subprocess
import time
import flask
import gevent.event
from . import utils
from .config import config_value
from .status import Status, StatusCls
import digits.log
# NOTE: Increment this every time the pickled version changes
PICKLE_VERSION = 1
class Task(StatusCls):
"""
Base class for Tasks
A Task is a compute-heavy operation that runs in a separate executable
Communication is done by processing the stdout of the executable
"""
def __init__(self, job_dir, parents=None):
super(Task, self).__init__()
self.pickver_task = PICKLE_VERSION
self.job_dir = job_dir
self.job_id = os.path.basename(job_dir)
if parents is None:
self.parents = None
elif isinstance(parents, (list, tuple)):
self.parents = parents
elif isinstance(parents, Task):
self.parents = [parents]
else:
raise TypeError('parents is %s' % type(parents))
self.exception = None
self.traceback = None
self.aborted = gevent.event.Event()
self.set_logger()
self.p = None # Subprocess object for training
def __getstate__(self):
d = self.__dict__.copy()
if 'aborted' in d:
del d['aborted']
if 'logger' in d:
del d['logger']
if 'p' in d:
# Subprocess object for training is not pickleable
del d['p']
return d
def __setstate__(self, state):
self.__dict__ = state
self.aborted = gevent.event.Event()
self.set_logger()
def set_logger(self):
self.logger = digits.log.JobIdLoggerAdapter(
logging.getLogger('digits.webapp'),
{'job_id': self.job_id},
)
def name(self):
"""
Returns a string
"""
raise NotImplementedError
def html_id(self):
"""
Returns a string
"""
return 'task-%s' % id(self)
def on_status_update(self):
"""
Called when StatusCls.status.setter is used
"""
from digits.webapp import app, socketio
# Send socketio updates
message = {
'task': self.html_id(),
'update': 'status',
'status': self.status.name,
'css': self.status.css,
'show': (self.status in [Status.RUN, Status.ERROR]),
'running': self.status.is_running(),
}
with app.app_context():
message['html'] = flask.render_template('status_updates.html',
updates=self.status_history,
exception=self.exception,
traceback=self.traceback,
)
socketio.emit('task update',
message,
namespace='/jobs',
room=self.job_id,
)
from digits.webapp import scheduler
job = scheduler.get_job(self.job_id)
if job:
job.on_status_update()
def path(self, filename, relative=False):
"""
Returns a path to the given file
Arguments:
filename -- the requested file
Keyword arguments:
relative -- If False, return an absolute path to the file
If True, return a path relative to the jobs directory
"""
if not filename:
return None
if os.path.isabs(filename):
path = filename
else:
path = os.path.join(self.job_dir, filename)
if relative:
path = os.path.relpath(path, config_value('jobs_dir'))
return str(path).replace("\\", "/")
def ready_to_queue(self):
"""
Returns True if all parents are done
"""
if not self.parents:
return True
for parent in self.parents:
if parent.status != Status.DONE:
return False
return True
def offer_resources(self, resources):
"""
Check the available resources and return a set of requested resources
Arguments:
resources -- a copy of scheduler.resources
"""
raise NotImplementedError
def task_arguments(self, resources, env):
"""
Returns args used by subprocess.Popen to execute the task
Returns False if the args cannot be set properly
Arguments:
resources -- the resources assigned by the scheduler for this task
environ -- os.environ instance to run process in
"""
raise NotImplementedError
def before_run(self):
"""
Called before run() executes
Raises exceptions
"""
pass
def run(self, resources):
"""
Execute the task
Arguments:
resources -- the resources assigned by the scheduler for this task
"""
self.before_run()
env = os.environ.copy()
args = self.task_arguments(resources, env)
if not args:
self.logger.error('Could not create the arguments for Popen')
self.status = Status.ERROR
return False
# Convert them all to strings
args = [str(x) for x in args]
self.logger.info('%s task started.' % self.name())
self.status = Status.RUN
unrecognized_output = []
import sys
env['PYTHONPATH'] = os.pathsep.join(['.', self.job_dir, env.get('PYTHONPATH', '')] + sys.path)
# https://docs.python.org/2/library/subprocess.html#converting-argument-sequence
if platform.system() == 'Windows':
args = ' '.join(args)
self.logger.info('Task subprocess args: "{}"'.format(args))
else:
self.logger.info('Task subprocess args: "%s"' % ' '.join(args))
self.p = subprocess.Popen(args,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
cwd=self.job_dir,
close_fds=False if platform.system() == 'Windows' else True,
env=env,
)
try:
sigterm_time = None # When was the SIGTERM signal sent
sigterm_timeout = 2 # When should the SIGKILL signal be sent
while self.p.poll() is None:
for line in utils.nonblocking_readlines(self.p.stdout):
if self.aborted.is_set():
if sigterm_time is None:
# Attempt graceful shutdown
self.p.send_signal(signal.SIGTERM)
sigterm_time = time.time()
self.status = Status.ABORT
break
if line is not None:
# Remove whitespace
line = line.strip()
if line:
if not self.process_output(line):
self.logger.warning('%s unrecognized output: %s' % (self.name(), line.strip()))
unrecognized_output.append(line)
else:
time.sleep(0.05)
if sigterm_time is not None and (time.time() - sigterm_time > sigterm_timeout):
self.p.send_signal(signal.SIGKILL)
self.logger.warning('Sent SIGKILL to task "%s"' % self.name())
time.sleep(0.1)
time.sleep(0.01)
except:
self.p.terminate()
self.after_run()
raise
self.after_run()
if self.status != Status.RUN:
return False
elif self.p.returncode != 0:
self.logger.error('%s task failed with error code %d' % (self.name(), self.p.returncode))
if self.exception is None:
self.exception = 'error code %d' % self.p.returncode
if unrecognized_output:
if self.traceback is None:
self.traceback = '\n'.join(unrecognized_output)
else:
self.traceback = self.traceback + ('\n'.join(unrecognized_output))
self.after_runtime_error()
self.status = Status.ERROR
return False
else:
self.logger.info('%s task completed.' % self.name())
self.status = Status.DONE
return True
def abort(self):
"""
Abort the Task
"""
if self.status.is_running():
self.aborted.set()
def preprocess_output_digits(self, line):
"""
Takes line of output and parses it according to DIGITS's log format
Returns (timestamp, level, message) or (None, None, None)
"""
# NOTE: This must change when the logging format changes
# YYYY-MM-DD HH:MM:SS [LEVEL] message
match = re.match(r'(\S{10} \S{8}) \[(\w+)\s*\] (.*)$', line)
if match:
timestr = match.group(1)
timestamp = time.mktime(time.strptime(timestr, digits.log.DATE_FORMAT))
level = match.group(2)
message = match.group(3)
if level.startswith('DEB'):
level = 'debug'
elif level.startswith('INF'):
level = 'info'
elif level.startswith('WAR'):
level = 'warning'
elif level.startswith('ERR'):
level = 'error'
elif level.startswith('CRI'):
level = 'critical'
return (timestamp, level, message)
else:
return (None, None, None)
def process_output(self, line):
"""
Process a line of output from the task
Returns True if the output was able to be processed
Arguments:
line -- a line of output
"""
raise NotImplementedError
def est_done(self):
"""
Returns the estimated time in seconds until the task is done
"""
if self.status != Status.RUN or self.progress == 0:
return None
elapsed = time.time() - self.status_history[-1][1]
return (1 - self.progress) * elapsed // self.progress
def after_run(self):
"""
Called after run() executes
"""
pass
def after_runtime_error(self):
"""
Called after a runtime error during run()
"""
pass
def emit_progress_update(self):
"""
Call socketio.emit for task progress update, and trigger job progress update.
"""
from digits.webapp import socketio
socketio.emit('task update',
{
'task': self.html_id(),
'update': 'progress',
'percentage': int(round(100 * self.progress)),
'eta': utils.time_filters.print_time_diff(self.est_done()),
},
namespace='/jobs',
room=self.job_id,
)
from digits.webapp import scheduler
job = scheduler.get_job(self.job_id)
if job:
job.emit_progress_update()
| # Copyright (c) 2014-2017, NVIDIA CORPORATION. All rights reserved.
from __future__ import absolute_import
import logging
import os.path
import platform
import re
import signal
import subprocess
import time
import flask
import gevent.event
from . import utils
from .config import config_value
from .status import Status, StatusCls
import digits.log
# NOTE: Increment this every time the pickled version changes
PICKLE_VERSION = 1
class Task(StatusCls):
"""
Base class for Tasks
A Task is a compute-heavy operation that runs in a separate executable
Communication is done by processing the stdout of the executable
"""
def __init__(self, job_dir, parents=None):
super(Task, self).__init__()
self.pickver_task = PICKLE_VERSION
self.job_dir = job_dir
self.job_id = os.path.basename(job_dir)
if parents is None:
self.parents = None
elif isinstance(parents, (list, tuple)):
self.parents = parents
elif isinstance(parents, Task):
self.parents = [parents]
else:
raise TypeError('parents is %s' % type(parents))
self.exception = None
self.traceback = None
self.aborted = gevent.event.Event()
self.set_logger()
self.p = None # Subprocess object for training
def __getstate__(self):
d = self.__dict__.copy()
if 'aborted' in d:
del d['aborted']
if 'logger' in d:
del d['logger']
if 'p' in d:
# Subprocess object for training is not pickleable
del d['p']
return d
def __setstate__(self, state):
self.__dict__ = state
self.aborted = gevent.event.Event()
self.set_logger()
def set_logger(self):
self.logger = digits.log.JobIdLoggerAdapter(
logging.getLogger('digits.webapp'),
{'job_id': self.job_id},
)
def name(self):
"""
Returns a string
"""
raise NotImplementedError
def html_id(self):
"""
Returns a string
"""
return 'task-%s' % id(self)
def on_status_update(self):
"""
Called when StatusCls.status.setter is used
"""
from digits.webapp import app, socketio
# Send socketio updates
message = {
'task': self.html_id(),
'update': 'status',
'status': self.status.name,
'css': self.status.css,
'show': (self.status in [Status.RUN, Status.ERROR]),
'running': self.status.is_running(),
}
with app.app_context():
message['html'] = flask.render_template('status_updates.html',
updates=self.status_history,
exception=self.exception,
traceback=self.traceback,
)
socketio.emit('task update',
message,
namespace='/jobs',
room=self.job_id,
)
from digits.webapp import scheduler
job = scheduler.get_job(self.job_id)
if job:
job.on_status_update()
def path(self, filename, relative=False):
"""
Returns a path to the given file
Arguments:
filename -- the requested file
Keyword arguments:
relative -- If False, return an absolute path to the file
If True, return a path relative to the jobs directory
"""
if not filename:
return None
if os.path.isabs(filename):
path = filename
else:
path = os.path.join(self.job_dir, filename)
if relative:
path = os.path.relpath(path, config_value('jobs_dir'))
return str(path).replace("\\", "/")
def ready_to_queue(self):
"""
Returns True if all parents are done
"""
if not self.parents:
return True
for parent in self.parents:
if parent.status != Status.DONE:
return False
return True
def offer_resources(self, resources):
"""
Check the available resources and return a set of requested resources
Arguments:
resources -- a copy of scheduler.resources
"""
raise NotImplementedError
def task_arguments(self, resources, env):
"""
Returns args used by subprocess.Popen to execute the task
Returns False if the args cannot be set properly
Arguments:
resources -- the resources assigned by the scheduler for this task
environ -- os.environ instance to run process in
"""
raise NotImplementedError
def before_run(self):
"""
Called before run() executes
Raises exceptions
"""
pass
def run(self, resources):
"""
Execute the task
Arguments:
resources -- the resources assigned by the scheduler for this task
"""
self.before_run()
env = os.environ.copy()
args = self.task_arguments(resources, env)
if not args:
self.logger.error('Could not create the arguments for Popen')
self.status = Status.ERROR
return False
# Convert them all to strings
args = [str(x) for x in args]
self.logger.info('%s task started.' % self.name())
self.status = Status.RUN
unrecognized_output = []
import sys
env['PYTHONPATH'] = os.pathsep.join(['.', self.job_dir, env.get('PYTHONPATH', '')] + sys.path)
# https://docs.python.org/2/library/subprocess.html#converting-argument-sequence
if platform.system() == 'Windows':
args = ' '.join(args)
self.logger.info('Task subprocess args: "{}"'.format(args))
else:
self.logger.info('Task subprocess args: "%s"' % ' '.join(args))
self.p = subprocess.Popen(args,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
cwd=self.job_dir,
close_fds=False if platform.system() == 'Windows' else True,
env=env,
)
try:
sigterm_time = None # When was the SIGTERM signal sent
sigterm_timeout = 2 # When should the SIGKILL signal be sent
while self.p.poll() is None:
for line in utils.nonblocking_readlines(self.p.stdout):
if self.aborted.is_set():
if sigterm_time is None:
# Attempt graceful shutdown
self.p.send_signal(signal.SIGTERM)
sigterm_time = time.time()
self.status = Status.ABORT
break
if line is not None:
# Remove whitespace
line = line.strip()
if line:
if not self.process_output(line):
self.logger.warning('%s unrecognized output: %s' % (self.name(), line.strip()))
unrecognized_output.append(line)
else:
time.sleep(0.05)
if sigterm_time is not None and (time.time() - sigterm_time > sigterm_timeout):
self.p.send_signal(signal.SIGKILL)
self.logger.warning('Sent SIGKILL to task "%s"' % self.name())
time.sleep(0.1)
time.sleep(0.01)
except:
self.p.terminate()
self.after_run()
raise
self.after_run()
if self.status != Status.RUN:
return False
elif self.p.returncode != 0:
self.logger.error('%s task failed with error code %d' % (self.name(), self.p.returncode))
if self.exception is None:
self.exception = 'error code %d' % self.p.returncode
if unrecognized_output:
if self.traceback is None:
self.traceback = '\n'.join(unrecognized_output)
else:
self.traceback = self.traceback + ('\n'.join(unrecognized_output))
self.after_runtime_error()
self.status = Status.ERROR
return False
else:
self.logger.info('%s task completed.' % self.name())
self.status = Status.DONE
return True
def abort(self):
"""
Abort the Task
"""
if self.status.is_running():
self.aborted.set()
def preprocess_output_digits(self, line):
"""
Takes line of output and parses it according to DIGITS's log format
Returns (timestamp, level, message) or (None, None, None)
"""
# NOTE: This must change when the logging format changes
# YYYY-MM-DD HH:MM:SS [LEVEL] message
match = re.match(r'(\S{10} \S{8}) \[(\w+)\s*\] (.*)$', line)
if match:
timestr = match.group(1)
timestamp = time.mktime(time.strptime(timestr, digits.log.DATE_FORMAT))
level = match.group(2)
message = match.group(3)
if level.startswith('DEB'):
level = 'debug'
elif level.startswith('INF'):
level = 'info'
elif level.startswith('WAR'):
level = 'warning'
elif level.startswith('ERR'):
level = 'error'
elif level.startswith('CRI'):
level = 'critical'
return (timestamp, level, message)
else:
return (None, None, None)
def process_output(self, line):
"""
Process a line of output from the task
Returns True if the output was able to be processed
Arguments:
line -- a line of output
"""
raise NotImplementedError
def est_done(self):
"""
Returns the estimated time in seconds until the task is done
"""
if self.status != Status.RUN or self.progress == 0:
return None
elapsed = time.time() - self.status_history[-1][1]
return (1 - self.progress) * elapsed // self.progress
def after_run(self):
"""
Called after run() executes
"""
pass
def after_runtime_error(self):
"""
Called after a runtime error during run()
"""
pass
def emit_progress_update(self):
"""
Call socketio.emit for task progress update, and trigger job progress update.
"""
from digits.webapp import socketio
socketio.emit('task update',
{
'task': self.html_id(),
'update': 'progress',
'percentage': int(round(100 * self.progress)),
'eta': utils.time_filters.print_time_diff(self.est_done()),
},
namespace='/jobs',
room=self.job_id,
)
from digits.webapp import scheduler
job = scheduler.get_job(self.job_id)
if job:
job.emit_progress_update()
| en | 0.757931 | # Copyright (c) 2014-2017, NVIDIA CORPORATION. All rights reserved. # NOTE: Increment this every time the pickled version changes Base class for Tasks A Task is a compute-heavy operation that runs in a separate executable Communication is done by processing the stdout of the executable # Subprocess object for training # Subprocess object for training is not pickleable Returns a string Returns a string Called when StatusCls.status.setter is used # Send socketio updates Returns a path to the given file Arguments: filename -- the requested file Keyword arguments: relative -- If False, return an absolute path to the file If True, return a path relative to the jobs directory Returns True if all parents are done Check the available resources and return a set of requested resources Arguments: resources -- a copy of scheduler.resources Returns args used by subprocess.Popen to execute the task Returns False if the args cannot be set properly Arguments: resources -- the resources assigned by the scheduler for this task environ -- os.environ instance to run process in Called before run() executes Raises exceptions Execute the task Arguments: resources -- the resources assigned by the scheduler for this task # Convert them all to strings # https://docs.python.org/2/library/subprocess.html#converting-argument-sequence # When was the SIGTERM signal sent # When should the SIGKILL signal be sent # Attempt graceful shutdown # Remove whitespace Abort the Task Takes line of output and parses it according to DIGITS's log format Returns (timestamp, level, message) or (None, None, None) # NOTE: This must change when the logging format changes # YYYY-MM-DD HH:MM:SS [LEVEL] message Process a line of output from the task Returns True if the output was able to be processed Arguments: line -- a line of output Returns the estimated time in seconds until the task is done Called after run() executes Called after a runtime error during run() Call socketio.emit for task progress update, and trigger job progress update. | 2.046512 | 2 |
tests/test_memoization.py | toheedN/MemoizationTest | 0 | 6632137 | <gh_stars>0
import os
import sys
import time
from time import sleep
import pytest
# Setup project paths
sys.path.append(os.getcwd())
sys.path.append("../")
# local imports
from memoization import memoizer
from memoization.memoizer import get_cache_stats
@pytest.fixture(autouse=True)
def reset_stats():
"""
reset cache access stats for each case
:return:
"""
print("################# Pre-Test Conditions #####################")
print("################### Resetting Stats #######################")
memoizer.hits = 0
memoizer.misses = 0
memoizer.cache_stats = {}
def test_basic_memoization_and_cache_timeout():
"""
Basic test for momoization with timeout, we shall check both with and without timeout of the cache key
"""
def sum_up_all_values(*args):
"""
:param args: User input arguments for sum function
:return: sum of input arguments
"""
summed_value = 0
for arg_ in list(args):
summed_value += arg_
return summed_value
return_value = 10
cache_hit_count = 2
# List of numbers to add
test_arg = [1, 2, 3, 4]
memoized = memoizer.memoize(sum_up_all_values, timeout=2000)
assert memoized(*test_arg) == return_value
print(get_cache_stats())
# access value twice and check cache his count hits should be = 2 and misses = 0 for the key
assert memoized(*test_arg) == return_value
assert memoized(*test_arg) == return_value
localKey = str(test_arg[0]) + "_" + sum_up_all_values.__name__
assert get_cache_stats()[localKey]["hits"] == cache_hit_count and get_cache_stats()[localKey]["misses"] == 0
sleep(2)
assert memoized(*test_arg) == return_value
assert get_cache_stats()[localKey]["hits"] == cache_hit_count and get_cache_stats()[localKey]["misses"] == 1
# Cache Stats should be 2 his and one miss
print(f"Final Cache Stats : {get_cache_stats()}")
def test_memoization_first_func_param_as_key():
"""
Memoization to ensure that first parameter is used as key.
"""
return_value = 5
def test_function(*args):
return return_value
memoized = memoizer.memoize(test_function, timeout=10000)
test_arg = ["firstArgument", "secondArgument"]
# perform the expensive operation
assert memoized(*test_arg) == return_value
# Ensure key from cache_stats should be the first argument of the function we called
assert test_arg[0] + "_" + test_function.__name__ in get_cache_stats()
print(get_cache_stats())
def test_memoization_with_resolver():
"""
Test memoization with resolver, resolver should provide the key for cache
"""
def resolver_cb(*func_args, **kwargs):
"""
resolver fucntion to resolve the key for momoization
:param func_args: parameters provided to the function to momoize
:return: returns the resolved key
"""
resolved_key = "Sum = {}".format('_'.join(str(x) for x in list(func_args)))
return resolved_key
def test_function(*args, **kwargs):
return "Some costly calculation to save with resolved key"
memoized = memoizer.memoize(test_function, resolver_cb, 1000)
test_arg = [1, 2, 3, 4, 6]
assert memoized(*test_arg, userinput="test_kargs") == "Some costly calculation to save with resolved key"
# Ensure the key is same as the key generated by resolver
print(get_cache_stats())
resolved_key_Local = resolver_cb(*test_arg)+"_userinput_test_kargs"
assert resolved_key_Local in get_cache_stats()
def test_memoize_Different_data_types():
"""
Test memoization with different data types e.g sting,tuple, dictionary.
Note : All data types will be converted into sting to store as a key value pair
"""
def test_function(*args):
return "Some costly calculation to save with resolved key"
memoized = memoizer.memoize(test_function, resolver=None, timeout=10000)
# memoize list as a key
test_arg = "stringAsAKey"
assert memoized(test_arg) == "Some costly calculation to save with resolved key"
# memoize list as a key
test_arg = [1, 2, 3, 4, 6]
assert memoized(test_arg) == "Some costly calculation to save with resolved key"
# memoize list as a key
test_arg = {"key": "testKey"}
assert memoized(test_arg) == "Some costly calculation to save with resolved key"
# Memoize tuple
test_arg = ("key", "value")
assert memoized(test_arg) == "Some costly calculation to save with resolved key"
print(get_cache_stats())
# ensure there are 4 entries in cache wit different data types
assert len(get_cache_stats()) == 4
def test_memoization_with_non_callable_function():
"""
This test should raise and exception that function(func) "user_func cannot be a non-callable object"
:return:
"""
def key_resolver(*args):
"""
dummy resolver
:return:
"""
agrs_flat = ""
for argskey in list(args):
agrs_flat += str(argskey)
test_function = "test string"
# test_function is non-callable and without resolver, memorizer should raise
with pytest.raises(TypeError, match="user_func cannot be a non-callable object"):
memoizer.memoize(test_function, resolver=None, timeout=10000)
# test_function is non-callable and with resolver is valid, memorizer should raise function type error
with pytest.raises(TypeError, match="user_func cannot be a non-callable object"):
memoizer.memoize(test_function, resolver=key_resolver, timeout=10000)
def test_memoization_with_non_callable_resolver():
"""
This test should raise and exception that key resolver function "unable to derive key with non-callable object"
:return:
"""
test_result = 5
key_resolver = "InvlidResolver"
test_function = lambda key: test_result
# resolver is non-callable , memorizer should raise
with pytest.raises(TypeError, match=f'unable to derive key with non-callable object {key_resolver}'):
memoizer.memoize(test_function, resolver=key_resolver, timeout=10000)
def test_memoization_with_invalid_timeout_value():
"""
This test should raise and exception for timeout None or any other value than int or float"
:return:
"""
test_result = 5
test_function = lambda key: test_result
# timeout = 0 should raise exception
with pytest.raises(TypeError, match='timeout must be a positive integer or float grater than 0'):
memoizer.memoize(test_function, resolver=None, timeout=0)
# timeout = None should raise exception
with pytest.raises(TypeError, match='timeout must be a positive integer or float grater than 0'):
memoizer.memoize(test_function, resolver=None, timeout=0)
# timeout = some string value should raise exception
with pytest.raises(TypeError, match='timeout must be a positive integer or float grater than 0'):
memoizer.memoize(test_function, resolver=None, timeout="somestring")
def test_full_example_memoization_fibonacci_sequence():
"""
Performing end to end testing using famous example of fibonacci sequence
we will measure benefits in terms of execution time using timeit library
"""
func_results = 3524578
feb_value = None
feb_value_no_memo = None
def fibonacci_without_memoizer(feb_n):
"""
fibonacci series function with memoization
:param feb_n:
:return:
"""
# check that the input is a positive integer
if type(feb_n) != int:
raise TypeError("n must be a positive integer")
if feb_n < 1:
raise ValueError("n must be a positive integer")
if feb_n == 1:
return 1
elif feb_n == 2:
return 1
return fibonacci_without_memoizer(feb_n - 1) + fibonacci_without_memoizer(feb_n - 2)
def fibonacci(feb_n):
"""
fibonacci series function without memoiaton
:param feb_n:
"""
# check that the input is a positive integer
if type(feb_n) != int:
raise TypeError("n must be a positive integer")
if feb_n < 1:
raise ValueError("n must be a positive integer")
if feb_n == 1:
return 1
elif feb_n == 2:
return 1
return memoized(feb_n - 1) + memoized(feb_n - 2)
memoized = memoizer.memoize(fibonacci, None, timeout=10000)
# Calculate Fibnocci for only 33 with memoization
start_memo_feb = time.time()
for n in range(1, 34):
feb_value = memoized(n)
assert feb_value == func_results
end_memo_feb = time.time()
total_time_consumed_with_memo = end_memo_feb - start_memo_feb
print(f"Time Lapsed during the function: {round(total_time_consumed_with_memo, 2)} seconds")
# Calculate Fibnocci for 33 without memoization
start = time.time()
for n in range(1, 34):
feb_value_no_memo = fibonacci_without_memoizer(n)
assert feb_value_no_memo == func_results
end = time.time()
print(f"Time Lapsed during the function: {round((end - start), 2)} seconds")
print(
f"Without memoization fibonacci sequence function Took: {round((end - start), 2) - round(total_time_consumed_with_memo, 2)} more seconds")
if __name__ == '__main__':
pytest.main(args=['-sv', os.path.abspath(__file__)])
| import os
import sys
import time
from time import sleep
import pytest
# Setup project paths
sys.path.append(os.getcwd())
sys.path.append("../")
# local imports
from memoization import memoizer
from memoization.memoizer import get_cache_stats
@pytest.fixture(autouse=True)
def reset_stats():
"""
reset cache access stats for each case
:return:
"""
print("################# Pre-Test Conditions #####################")
print("################### Resetting Stats #######################")
memoizer.hits = 0
memoizer.misses = 0
memoizer.cache_stats = {}
def test_basic_memoization_and_cache_timeout():
"""
Basic test for momoization with timeout, we shall check both with and without timeout of the cache key
"""
def sum_up_all_values(*args):
"""
:param args: User input arguments for sum function
:return: sum of input arguments
"""
summed_value = 0
for arg_ in list(args):
summed_value += arg_
return summed_value
return_value = 10
cache_hit_count = 2
# List of numbers to add
test_arg = [1, 2, 3, 4]
memoized = memoizer.memoize(sum_up_all_values, timeout=2000)
assert memoized(*test_arg) == return_value
print(get_cache_stats())
# access value twice and check cache his count hits should be = 2 and misses = 0 for the key
assert memoized(*test_arg) == return_value
assert memoized(*test_arg) == return_value
localKey = str(test_arg[0]) + "_" + sum_up_all_values.__name__
assert get_cache_stats()[localKey]["hits"] == cache_hit_count and get_cache_stats()[localKey]["misses"] == 0
sleep(2)
assert memoized(*test_arg) == return_value
assert get_cache_stats()[localKey]["hits"] == cache_hit_count and get_cache_stats()[localKey]["misses"] == 1
# Cache Stats should be 2 his and one miss
print(f"Final Cache Stats : {get_cache_stats()}")
def test_memoization_first_func_param_as_key():
"""
Memoization to ensure that first parameter is used as key.
"""
return_value = 5
def test_function(*args):
return return_value
memoized = memoizer.memoize(test_function, timeout=10000)
test_arg = ["firstArgument", "secondArgument"]
# perform the expensive operation
assert memoized(*test_arg) == return_value
# Ensure key from cache_stats should be the first argument of the function we called
assert test_arg[0] + "_" + test_function.__name__ in get_cache_stats()
print(get_cache_stats())
def test_memoization_with_resolver():
"""
Test memoization with resolver, resolver should provide the key for cache
"""
def resolver_cb(*func_args, **kwargs):
"""
resolver fucntion to resolve the key for momoization
:param func_args: parameters provided to the function to momoize
:return: returns the resolved key
"""
resolved_key = "Sum = {}".format('_'.join(str(x) for x in list(func_args)))
return resolved_key
def test_function(*args, **kwargs):
return "Some costly calculation to save with resolved key"
memoized = memoizer.memoize(test_function, resolver_cb, 1000)
test_arg = [1, 2, 3, 4, 6]
assert memoized(*test_arg, userinput="test_kargs") == "Some costly calculation to save with resolved key"
# Ensure the key is same as the key generated by resolver
print(get_cache_stats())
resolved_key_Local = resolver_cb(*test_arg)+"_userinput_test_kargs"
assert resolved_key_Local in get_cache_stats()
def test_memoize_Different_data_types():
"""
Test memoization with different data types e.g sting,tuple, dictionary.
Note : All data types will be converted into sting to store as a key value pair
"""
def test_function(*args):
return "Some costly calculation to save with resolved key"
memoized = memoizer.memoize(test_function, resolver=None, timeout=10000)
# memoize list as a key
test_arg = "stringAsAKey"
assert memoized(test_arg) == "Some costly calculation to save with resolved key"
# memoize list as a key
test_arg = [1, 2, 3, 4, 6]
assert memoized(test_arg) == "Some costly calculation to save with resolved key"
# memoize list as a key
test_arg = {"key": "testKey"}
assert memoized(test_arg) == "Some costly calculation to save with resolved key"
# Memoize tuple
test_arg = ("key", "value")
assert memoized(test_arg) == "Some costly calculation to save with resolved key"
print(get_cache_stats())
# ensure there are 4 entries in cache wit different data types
assert len(get_cache_stats()) == 4
def test_memoization_with_non_callable_function():
"""
This test should raise and exception that function(func) "user_func cannot be a non-callable object"
:return:
"""
def key_resolver(*args):
"""
dummy resolver
:return:
"""
agrs_flat = ""
for argskey in list(args):
agrs_flat += str(argskey)
test_function = "test string"
# test_function is non-callable and without resolver, memorizer should raise
with pytest.raises(TypeError, match="user_func cannot be a non-callable object"):
memoizer.memoize(test_function, resolver=None, timeout=10000)
# test_function is non-callable and with resolver is valid, memorizer should raise function type error
with pytest.raises(TypeError, match="user_func cannot be a non-callable object"):
memoizer.memoize(test_function, resolver=key_resolver, timeout=10000)
def test_memoization_with_non_callable_resolver():
"""
This test should raise and exception that key resolver function "unable to derive key with non-callable object"
:return:
"""
test_result = 5
key_resolver = "InvlidResolver"
test_function = lambda key: test_result
# resolver is non-callable , memorizer should raise
with pytest.raises(TypeError, match=f'unable to derive key with non-callable object {key_resolver}'):
memoizer.memoize(test_function, resolver=key_resolver, timeout=10000)
def test_memoization_with_invalid_timeout_value():
"""
This test should raise and exception for timeout None or any other value than int or float"
:return:
"""
test_result = 5
test_function = lambda key: test_result
# timeout = 0 should raise exception
with pytest.raises(TypeError, match='timeout must be a positive integer or float grater than 0'):
memoizer.memoize(test_function, resolver=None, timeout=0)
# timeout = None should raise exception
with pytest.raises(TypeError, match='timeout must be a positive integer or float grater than 0'):
memoizer.memoize(test_function, resolver=None, timeout=0)
# timeout = some string value should raise exception
with pytest.raises(TypeError, match='timeout must be a positive integer or float grater than 0'):
memoizer.memoize(test_function, resolver=None, timeout="somestring")
def test_full_example_memoization_fibonacci_sequence():
"""
Performing end to end testing using famous example of fibonacci sequence
we will measure benefits in terms of execution time using timeit library
"""
func_results = 3524578
feb_value = None
feb_value_no_memo = None
def fibonacci_without_memoizer(feb_n):
"""
fibonacci series function with memoization
:param feb_n:
:return:
"""
# check that the input is a positive integer
if type(feb_n) != int:
raise TypeError("n must be a positive integer")
if feb_n < 1:
raise ValueError("n must be a positive integer")
if feb_n == 1:
return 1
elif feb_n == 2:
return 1
return fibonacci_without_memoizer(feb_n - 1) + fibonacci_without_memoizer(feb_n - 2)
def fibonacci(feb_n):
"""
fibonacci series function without memoiaton
:param feb_n:
"""
# check that the input is a positive integer
if type(feb_n) != int:
raise TypeError("n must be a positive integer")
if feb_n < 1:
raise ValueError("n must be a positive integer")
if feb_n == 1:
return 1
elif feb_n == 2:
return 1
return memoized(feb_n - 1) + memoized(feb_n - 2)
memoized = memoizer.memoize(fibonacci, None, timeout=10000)
# Calculate Fibnocci for only 33 with memoization
start_memo_feb = time.time()
for n in range(1, 34):
feb_value = memoized(n)
assert feb_value == func_results
end_memo_feb = time.time()
total_time_consumed_with_memo = end_memo_feb - start_memo_feb
print(f"Time Lapsed during the function: {round(total_time_consumed_with_memo, 2)} seconds")
# Calculate Fibnocci for 33 without memoization
start = time.time()
for n in range(1, 34):
feb_value_no_memo = fibonacci_without_memoizer(n)
assert feb_value_no_memo == func_results
end = time.time()
print(f"Time Lapsed during the function: {round((end - start), 2)} seconds")
print(
f"Without memoization fibonacci sequence function Took: {round((end - start), 2) - round(total_time_consumed_with_memo, 2)} more seconds")
if __name__ == '__main__':
pytest.main(args=['-sv', os.path.abspath(__file__)]) | en | 0.780333 | # Setup project paths # local imports reset cache access stats for each case :return: ################ Pre-Test Conditions #####################") ################## Resetting Stats #######################") Basic test for momoization with timeout, we shall check both with and without timeout of the cache key :param args: User input arguments for sum function :return: sum of input arguments # List of numbers to add # access value twice and check cache his count hits should be = 2 and misses = 0 for the key # Cache Stats should be 2 his and one miss Memoization to ensure that first parameter is used as key. # perform the expensive operation # Ensure key from cache_stats should be the first argument of the function we called Test memoization with resolver, resolver should provide the key for cache resolver fucntion to resolve the key for momoization :param func_args: parameters provided to the function to momoize :return: returns the resolved key # Ensure the key is same as the key generated by resolver Test memoization with different data types e.g sting,tuple, dictionary. Note : All data types will be converted into sting to store as a key value pair # memoize list as a key # memoize list as a key # memoize list as a key # Memoize tuple # ensure there are 4 entries in cache wit different data types This test should raise and exception that function(func) "user_func cannot be a non-callable object" :return: dummy resolver :return: # test_function is non-callable and without resolver, memorizer should raise # test_function is non-callable and with resolver is valid, memorizer should raise function type error This test should raise and exception that key resolver function "unable to derive key with non-callable object" :return: # resolver is non-callable , memorizer should raise This test should raise and exception for timeout None or any other value than int or float" :return: # timeout = 0 should raise exception # timeout = None should raise exception # timeout = some string value should raise exception Performing end to end testing using famous example of fibonacci sequence we will measure benefits in terms of execution time using timeit library fibonacci series function with memoization :param feb_n: :return: # check that the input is a positive integer fibonacci series function without memoiaton :param feb_n: # check that the input is a positive integer # Calculate Fibnocci for only 33 with memoization # Calculate Fibnocci for 33 without memoization | 2.286992 | 2 |
predict_churn.py | kodama3d/msds_600_week5 | 0 | 6632138 | import pandas as pd
import numpy as np
from pycaret.classification import predict_model, load_model
def load_data(filepath):
"""
Loads churn data into a DataFrame from a string filepath.
"""
df = pd.read_csv(filepath, index_col='customerID')
return df
def make_predictions(df):
"""
Uses the pycaret best model to make predictions on data in the df dataframe.
"""
model = load_model('BEST') # searching for the best model which changes based on modeling
predictions = predict_model(model, data=df)
predictions.rename({'Label': 'Churn_prediction'}, axis=1, inplace=True)
predictions['Churn_prediction'].replace({1: 'Churn', 0: 'No Churn'},
inplace=True)
predictions.rename({'Score': 'Percentage'}, axis=1, inplace=True)
return predictions[predictions.columns[6:8]]
if __name__ == "__main__":
"""
Runs full script if main is loaded
Transforms new data to create matching features to the model
"""
df = load_data('data/new_churn_data_unmodified.csv')
df.fillna(df['TotalCharges'].median(), inplace=True)
df.at[df['tenure'] == 0, 'tenure'] = np.nan
df['tenure'].fillna(df['tenure'].median(), inplace=True)
df['PhoneService'] = df['PhoneService'].replace({'No': 0, 'Yes': 1})
df['Contract'] = df['Contract'].replace({'Month-to-month': 0, 'One year': 1, 'Two year': 2})
df['PaymentMethod'] = df['PaymentMethod'].replace({'Electronic check': 0, 'Mailed check': 1,
'Bank transfer (automatic)': 2, 'Credit card (automatic)': 3})
predictions = make_predictions(df)
print('predictions:')
print(predictions)
| import pandas as pd
import numpy as np
from pycaret.classification import predict_model, load_model
def load_data(filepath):
"""
Loads churn data into a DataFrame from a string filepath.
"""
df = pd.read_csv(filepath, index_col='customerID')
return df
def make_predictions(df):
"""
Uses the pycaret best model to make predictions on data in the df dataframe.
"""
model = load_model('BEST') # searching for the best model which changes based on modeling
predictions = predict_model(model, data=df)
predictions.rename({'Label': 'Churn_prediction'}, axis=1, inplace=True)
predictions['Churn_prediction'].replace({1: 'Churn', 0: 'No Churn'},
inplace=True)
predictions.rename({'Score': 'Percentage'}, axis=1, inplace=True)
return predictions[predictions.columns[6:8]]
if __name__ == "__main__":
"""
Runs full script if main is loaded
Transforms new data to create matching features to the model
"""
df = load_data('data/new_churn_data_unmodified.csv')
df.fillna(df['TotalCharges'].median(), inplace=True)
df.at[df['tenure'] == 0, 'tenure'] = np.nan
df['tenure'].fillna(df['tenure'].median(), inplace=True)
df['PhoneService'] = df['PhoneService'].replace({'No': 0, 'Yes': 1})
df['Contract'] = df['Contract'].replace({'Month-to-month': 0, 'One year': 1, 'Two year': 2})
df['PaymentMethod'] = df['PaymentMethod'].replace({'Electronic check': 0, 'Mailed check': 1,
'Bank transfer (automatic)': 2, 'Credit card (automatic)': 3})
predictions = make_predictions(df)
print('predictions:')
print(predictions)
| en | 0.825329 | Loads churn data into a DataFrame from a string filepath. Uses the pycaret best model to make predictions on data in the df dataframe. # searching for the best model which changes based on modeling Runs full script if main is loaded Transforms new data to create matching features to the model | 3.452793 | 3 |
specs/PointOfInterest/WeatherStation/harvesters/portugal/portugal_weather_stations.py | fsismondi/data-models | 39 | 6632139 | <reponame>fsismondi/data-models<gh_stars>10-100
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
This program collects information about Portugal weather stations from IPMA and prepares config that can be used by
harvester itself to upload the list of Portugal weather stations to Orion Context Broker or export data required by
other weather harvesters:
- https://github.com/FIWARE/dataModels/tree/master/specs/Weather/WeatherObserved/harvesters/portugal
It also exports data to the CSV file (./stations.csv), that can be used to upload the list of weather stations
to Google Maps:
- https://www.google.com/maps/d/viewer?mid=1Sd5uNFd2um0GPog2EGkyrlzmBnEKzPQw .
Legal notes:
- http://www.ipma.pt/en/siteinfo/index.html?page=index.xml
Examples:
- get the list of stations from IPMA:
curl -X GET --header 'Accept: application/json' \
'http://api.ipma.pt/open-data/observation/meteorology/stations/obs-surface.geojson'
AsyncIO name convention:
async def name - entry point for asynchronous data processing/http requests and post processing
async def name_bounded - intermediate step to limit amount of parallel workers
async def name_one - worker process
"""
from aiohttp import ClientSession, ClientConnectorError
from argparse import ArgumentTypeError, ArgumentParser
from asyncio import Semaphore, ensure_future, gather, run, TimeoutError as ToE, set_event_loop_policy
from copy import deepcopy
from csv import DictWriter
from re import sub
from sys import stdout
from uvloop import EventLoopPolicy
from yajl import dumps, loads
from yaml import safe_load as load, dump
from requests import get, exceptions
import logging
default_limit_entities = 50 # amount of entities per 1 request to Orion
default_limit_targets = 50 # amount of parallel request to Orion
default_log_level = 'INFO'
default_orion = 'http://orion:1026' # Orion Contest Broker endpoint
http_ok = [200, 201, 204]
log_levels = ['ERROR', 'INFO', 'DEBUG']
logger = None
logger_req = None
stations_file_yml = 'stations.yml' # destination file for yml format
stations_file_csv = 'stations.csv' # destination file for csv format
tz_wet = 'Europe/Lisbon'
tz_azot = 'Atlantic/Azores'
tz_azot_codes = ['932', '501', '502', '504', '506', '507', '510', '511', '512', '513', '515']
url_stations = 'http://api.ipma.pt/open-data/observation/meteorology/stations/obs-surface.geojson'
template = {
'id': 'urn:ngsi-ld:PointOfInterest:WeatherStation-PT-',
'type': 'PointOfInterest',
'category': {
'type': 'array',
'value': [
'WeatherStation'
]
},
'address': {
'type': 'PostalAddress',
'value': {
'addressCountry': 'PT',
'addressLocality': None
}
},
'location': {
'type': 'geo:json',
'value': {
'type': 'Point',
'coordinates': None
}
},
'source': {
'type': 'URL',
'value': 'https://www.ipma.pt'
}
}
def collect_stations():
result = dict()
result['stations'] = dict()
content = None
resp = None
try:
resp = get(url_stations)
except exceptions.ConnectionError:
logger.error('Collecting the list of stations from IPMA failed due to connection problem')
exit(1)
if resp.status_code in http_ok:
content = loads(resp.text)['features']
else:
logger.error('Collecting the list of stations from IPMA failed due to the return code %s', resp.status_code)
exit(1)
for station in content:
station_code = str(station['properties']['idEstacao'])
result['stations'][station_code] = dict()
result['stations'][station_code]['locality'] = sanitize(station['properties']['localEstacao'])
result['stations'][station_code]['longitude'] = station['geometry']['coordinates'][0]
result['stations'][station_code]['latitude'] = station['geometry']['coordinates'][1]
if station_code in tz_azot_codes:
result['stations'][station_code]['timezone'] = tz_azot
else:
result['stations'][station_code]['timezone'] = tz_wet
return result
def log_level_to_int(log_level_string):
if log_level_string not in log_levels:
message = 'invalid choice: {0} (choose from {1})'.format(log_level_string, log_levels)
raise ArgumentTypeError(message)
return getattr(logging, log_level_string, logging.ERROR)
async def post(body):
logger.debug('Posting data to Orion started')
tasks = list()
headers = {
'Content-Type': 'application/json'
}
if service:
headers['FIWARE-SERVICE'] = service
if path:
headers['FIWARE-SERVICEPATH'] = path
sem = Semaphore(limit_targets)
# splitting list to list of lists to fit into limits
block = 0
items = 0
body_divided = dict()
body_divided[0] = list()
while True:
if len(body) > 0:
if items < limit_entities:
body_divided[block].append(body.pop())
items += 1
else:
items = 0
block += 1
body_divided[block] = list()
else:
break
async with ClientSession() as session:
for item in body_divided:
task = ensure_future(post_bounded(body_divided[item], headers, sem, session))
tasks.append(task)
response = await gather(*tasks)
response = list(set(response))
if True in response:
response.remove(True)
for item in response:
logger.error('Posting data to Orion failed due to the %s', item)
logger.debug('Posting data to Orion ended')
async def post_bounded(item, headers, sem, session):
async with sem:
return await post_one(item, headers, session)
async def post_one(item, headers, session):
payload = {
'actionType': 'APPEND',
'entities': item
}
payload = dumps(payload)
url = orion + '/v2/op/update'
try:
async with session.post(url, headers=headers, data=payload) as response:
status = response.status
except ClientConnectorError:
return 'connection problem'
except ToE:
return 'timeout problem'
if status not in http_ok:
return 'response code ' + str(status)
return True
async def prepare_schema(src_file, csv_flag=False):
logger.debug('Schema preparation started')
tasks = list()
for item in src_file['stations']:
task = ensure_future(prepare_schema_one(item, src_file['stations'][item], csv_flag))
tasks.append(task)
result = await gather(*tasks)
logger.debug('Schema preparation ended')
return result
async def prepare_schema_one(local_id, station, csv_flag):
if not csv_flag:
item = deepcopy(template)
item['location']['value']['coordinates'] = [station['longitude'], station['latitude']]
item['address']['value']['addressLocality'] = station['locality']
item['id'] = item['id'] + local_id
else:
item = deepcopy(station)
item['id'] = local_id
item['country'] = 'PT'
return item
def reply_status(stations):
logger.info('Orion: %s', orion)
logger.info('FIWARE Service: %s', service)
logger.info('FIWARE Service-Path: %s', path)
logger.info('Stations: %s', str(len(stations['stations'])))
logger.info('limit_entities: %s', str(limit_entities))
logger.info('limit_targets: %s', str(limit_targets))
logger.info('Log level: %s', args.log_level)
def sanitize(str_in):
return sub(r"[<(>)\"\'=;-]", "", str_in)
def setup_logger():
local_logger = logging.getLogger('root')
local_logger.setLevel(log_level_to_int(args.log_level))
handler = logging.StreamHandler(stdout)
handler.setLevel(log_level_to_int(args.log_level))
formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s', datefmt='%Y-%m-%dT%H:%M:%SZ')
handler.setFormatter(formatter)
local_logger.addHandler(handler)
local_logger_req = logging.getLogger('requests')
local_logger_req.setLevel(logging.WARNING)
return local_logger, local_logger_req
if __name__ == '__main__':
parser = ArgumentParser()
parser.add_argument('--export_csv',
action='store_true',
dest='csv',
help='Export the list of stations to CSV file (./stations.csv) and exit')
parser.add_argument('--export_yml',
action='store_true',
dest='yml',
help='Export the list of stations to YML file (./stations.yml) and exit')
parser.add_argument('--import',
action='store_true',
dest='import_yml',
help='Import the list of stations from file (./stations.yml)')
parser.add_argument('--limit-entities',
default=default_limit_entities,
dest='limit_entities',
help='Limit amount of entities per 1 post request to Orion')
parser.add_argument('--limit-targets',
default=default_limit_targets,
dest='limit_targets',
help='Limit amount of parallel requests to Orion')
parser.add_argument('--log-level',
default=default_log_level,
dest='log_level',
help='Set the logging output level. {0}'.format(log_levels),
nargs='?')
parser.add_argument('--orion',
action='store',
default=default_orion,
dest='orion',
help='Orion Context Broker endpoint')
parser.add_argument('--path',
action='store',
dest='path',
help='FIWARE Service Path')
parser.add_argument('--service',
action='store',
dest="service",
help='FIWARE Service')
args = parser.parse_args()
limit_entities = int(args.limit_entities)
limit_targets = int(args.limit_targets)
orion = args.orion
if 'path' in args:
path = args.path
if 'service' in args:
service = args.service
logger, logger_req = setup_logger()
set_event_loop_policy(EventLoopPolicy())
logger.info('Started')
res = None
if not args.import_yml:
logger.debug('Initial data collection started')
res = collect_stations()
logger.debug('Initial data collection ended')
else:
try:
with open(stations_file_yml, 'r') as file:
res = load(file)
except FileNotFoundError:
logger.error('Station file is not present')
exit(1)
if args.csv:
fieldnames = ['id', 'country', 'locality', 'latitude', 'longitude', 'timezone']
res = run(prepare_schema(res, True))
with open(stations_file_csv, 'w', encoding='utf8') as file:
writer = DictWriter(file, fieldnames=fieldnames)
writer.writeheader()
for element in res:
writer.writerow(element)
if args.yml:
with open(stations_file_yml, 'w', encoding='utf8') as file:
file.write(dump(res, indent=4, allow_unicode=True))
if not args.yml and not args.csv:
reply_status(res)
res = run(prepare_schema(res))
run(post(res))
logger.info('Ended')
exit(0)
| #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
This program collects information about Portugal weather stations from IPMA and prepares config that can be used by
harvester itself to upload the list of Portugal weather stations to Orion Context Broker or export data required by
other weather harvesters:
- https://github.com/FIWARE/dataModels/tree/master/specs/Weather/WeatherObserved/harvesters/portugal
It also exports data to the CSV file (./stations.csv), that can be used to upload the list of weather stations
to Google Maps:
- https://www.google.com/maps/d/viewer?mid=1Sd5uNFd2um0GPog2EGkyrlzmBnEKzPQw .
Legal notes:
- http://www.ipma.pt/en/siteinfo/index.html?page=index.xml
Examples:
- get the list of stations from IPMA:
curl -X GET --header 'Accept: application/json' \
'http://api.ipma.pt/open-data/observation/meteorology/stations/obs-surface.geojson'
AsyncIO name convention:
async def name - entry point for asynchronous data processing/http requests and post processing
async def name_bounded - intermediate step to limit amount of parallel workers
async def name_one - worker process
"""
from aiohttp import ClientSession, ClientConnectorError
from argparse import ArgumentTypeError, ArgumentParser
from asyncio import Semaphore, ensure_future, gather, run, TimeoutError as ToE, set_event_loop_policy
from copy import deepcopy
from csv import DictWriter
from re import sub
from sys import stdout
from uvloop import EventLoopPolicy
from yajl import dumps, loads
from yaml import safe_load as load, dump
from requests import get, exceptions
import logging
default_limit_entities = 50 # amount of entities per 1 request to Orion
default_limit_targets = 50 # amount of parallel request to Orion
default_log_level = 'INFO'
default_orion = 'http://orion:1026' # Orion Contest Broker endpoint
http_ok = [200, 201, 204]
log_levels = ['ERROR', 'INFO', 'DEBUG']
logger = None
logger_req = None
stations_file_yml = 'stations.yml' # destination file for yml format
stations_file_csv = 'stations.csv' # destination file for csv format
tz_wet = 'Europe/Lisbon'
tz_azot = 'Atlantic/Azores'
tz_azot_codes = ['932', '501', '502', '504', '506', '507', '510', '511', '512', '513', '515']
url_stations = 'http://api.ipma.pt/open-data/observation/meteorology/stations/obs-surface.geojson'
template = {
'id': 'urn:ngsi-ld:PointOfInterest:WeatherStation-PT-',
'type': 'PointOfInterest',
'category': {
'type': 'array',
'value': [
'WeatherStation'
]
},
'address': {
'type': 'PostalAddress',
'value': {
'addressCountry': 'PT',
'addressLocality': None
}
},
'location': {
'type': 'geo:json',
'value': {
'type': 'Point',
'coordinates': None
}
},
'source': {
'type': 'URL',
'value': 'https://www.ipma.pt'
}
}
def collect_stations():
result = dict()
result['stations'] = dict()
content = None
resp = None
try:
resp = get(url_stations)
except exceptions.ConnectionError:
logger.error('Collecting the list of stations from IPMA failed due to connection problem')
exit(1)
if resp.status_code in http_ok:
content = loads(resp.text)['features']
else:
logger.error('Collecting the list of stations from IPMA failed due to the return code %s', resp.status_code)
exit(1)
for station in content:
station_code = str(station['properties']['idEstacao'])
result['stations'][station_code] = dict()
result['stations'][station_code]['locality'] = sanitize(station['properties']['localEstacao'])
result['stations'][station_code]['longitude'] = station['geometry']['coordinates'][0]
result['stations'][station_code]['latitude'] = station['geometry']['coordinates'][1]
if station_code in tz_azot_codes:
result['stations'][station_code]['timezone'] = tz_azot
else:
result['stations'][station_code]['timezone'] = tz_wet
return result
def log_level_to_int(log_level_string):
if log_level_string not in log_levels:
message = 'invalid choice: {0} (choose from {1})'.format(log_level_string, log_levels)
raise ArgumentTypeError(message)
return getattr(logging, log_level_string, logging.ERROR)
async def post(body):
logger.debug('Posting data to Orion started')
tasks = list()
headers = {
'Content-Type': 'application/json'
}
if service:
headers['FIWARE-SERVICE'] = service
if path:
headers['FIWARE-SERVICEPATH'] = path
sem = Semaphore(limit_targets)
# splitting list to list of lists to fit into limits
block = 0
items = 0
body_divided = dict()
body_divided[0] = list()
while True:
if len(body) > 0:
if items < limit_entities:
body_divided[block].append(body.pop())
items += 1
else:
items = 0
block += 1
body_divided[block] = list()
else:
break
async with ClientSession() as session:
for item in body_divided:
task = ensure_future(post_bounded(body_divided[item], headers, sem, session))
tasks.append(task)
response = await gather(*tasks)
response = list(set(response))
if True in response:
response.remove(True)
for item in response:
logger.error('Posting data to Orion failed due to the %s', item)
logger.debug('Posting data to Orion ended')
async def post_bounded(item, headers, sem, session):
async with sem:
return await post_one(item, headers, session)
async def post_one(item, headers, session):
payload = {
'actionType': 'APPEND',
'entities': item
}
payload = dumps(payload)
url = orion + '/v2/op/update'
try:
async with session.post(url, headers=headers, data=payload) as response:
status = response.status
except ClientConnectorError:
return 'connection problem'
except ToE:
return 'timeout problem'
if status not in http_ok:
return 'response code ' + str(status)
return True
async def prepare_schema(src_file, csv_flag=False):
logger.debug('Schema preparation started')
tasks = list()
for item in src_file['stations']:
task = ensure_future(prepare_schema_one(item, src_file['stations'][item], csv_flag))
tasks.append(task)
result = await gather(*tasks)
logger.debug('Schema preparation ended')
return result
async def prepare_schema_one(local_id, station, csv_flag):
if not csv_flag:
item = deepcopy(template)
item['location']['value']['coordinates'] = [station['longitude'], station['latitude']]
item['address']['value']['addressLocality'] = station['locality']
item['id'] = item['id'] + local_id
else:
item = deepcopy(station)
item['id'] = local_id
item['country'] = 'PT'
return item
def reply_status(stations):
logger.info('Orion: %s', orion)
logger.info('FIWARE Service: %s', service)
logger.info('FIWARE Service-Path: %s', path)
logger.info('Stations: %s', str(len(stations['stations'])))
logger.info('limit_entities: %s', str(limit_entities))
logger.info('limit_targets: %s', str(limit_targets))
logger.info('Log level: %s', args.log_level)
def sanitize(str_in):
return sub(r"[<(>)\"\'=;-]", "", str_in)
def setup_logger():
local_logger = logging.getLogger('root')
local_logger.setLevel(log_level_to_int(args.log_level))
handler = logging.StreamHandler(stdout)
handler.setLevel(log_level_to_int(args.log_level))
formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s', datefmt='%Y-%m-%dT%H:%M:%SZ')
handler.setFormatter(formatter)
local_logger.addHandler(handler)
local_logger_req = logging.getLogger('requests')
local_logger_req.setLevel(logging.WARNING)
return local_logger, local_logger_req
if __name__ == '__main__':
parser = ArgumentParser()
parser.add_argument('--export_csv',
action='store_true',
dest='csv',
help='Export the list of stations to CSV file (./stations.csv) and exit')
parser.add_argument('--export_yml',
action='store_true',
dest='yml',
help='Export the list of stations to YML file (./stations.yml) and exit')
parser.add_argument('--import',
action='store_true',
dest='import_yml',
help='Import the list of stations from file (./stations.yml)')
parser.add_argument('--limit-entities',
default=default_limit_entities,
dest='limit_entities',
help='Limit amount of entities per 1 post request to Orion')
parser.add_argument('--limit-targets',
default=default_limit_targets,
dest='limit_targets',
help='Limit amount of parallel requests to Orion')
parser.add_argument('--log-level',
default=default_log_level,
dest='log_level',
help='Set the logging output level. {0}'.format(log_levels),
nargs='?')
parser.add_argument('--orion',
action='store',
default=default_orion,
dest='orion',
help='Orion Context Broker endpoint')
parser.add_argument('--path',
action='store',
dest='path',
help='FIWARE Service Path')
parser.add_argument('--service',
action='store',
dest="service",
help='FIWARE Service')
args = parser.parse_args()
limit_entities = int(args.limit_entities)
limit_targets = int(args.limit_targets)
orion = args.orion
if 'path' in args:
path = args.path
if 'service' in args:
service = args.service
logger, logger_req = setup_logger()
set_event_loop_policy(EventLoopPolicy())
logger.info('Started')
res = None
if not args.import_yml:
logger.debug('Initial data collection started')
res = collect_stations()
logger.debug('Initial data collection ended')
else:
try:
with open(stations_file_yml, 'r') as file:
res = load(file)
except FileNotFoundError:
logger.error('Station file is not present')
exit(1)
if args.csv:
fieldnames = ['id', 'country', 'locality', 'latitude', 'longitude', 'timezone']
res = run(prepare_schema(res, True))
with open(stations_file_csv, 'w', encoding='utf8') as file:
writer = DictWriter(file, fieldnames=fieldnames)
writer.writeheader()
for element in res:
writer.writerow(element)
if args.yml:
with open(stations_file_yml, 'w', encoding='utf8') as file:
file.write(dump(res, indent=4, allow_unicode=True))
if not args.yml and not args.csv:
reply_status(res)
res = run(prepare_schema(res))
run(post(res))
logger.info('Ended')
exit(0) | en | 0.756923 | #!/usr/bin/env python3 # -*- coding: utf-8 -*- This program collects information about Portugal weather stations from IPMA and prepares config that can be used by harvester itself to upload the list of Portugal weather stations to Orion Context Broker or export data required by other weather harvesters: - https://github.com/FIWARE/dataModels/tree/master/specs/Weather/WeatherObserved/harvesters/portugal It also exports data to the CSV file (./stations.csv), that can be used to upload the list of weather stations to Google Maps: - https://www.google.com/maps/d/viewer?mid=1Sd5uNFd2um0GPog2EGkyrlzmBnEKzPQw . Legal notes: - http://www.ipma.pt/en/siteinfo/index.html?page=index.xml Examples: - get the list of stations from IPMA: curl -X GET --header 'Accept: application/json' \ 'http://api.ipma.pt/open-data/observation/meteorology/stations/obs-surface.geojson' AsyncIO name convention: async def name - entry point for asynchronous data processing/http requests and post processing async def name_bounded - intermediate step to limit amount of parallel workers async def name_one - worker process # amount of entities per 1 request to Orion # amount of parallel request to Orion # Orion Contest Broker endpoint # destination file for yml format # destination file for csv format # splitting list to list of lists to fit into limits | 2.389421 | 2 |
bobtemplates/ielectric/pyramid/+package.dottedname+/site/models.py | djowett/bobtemplates.ielectric | 0 | 6632140 | import transaction
#from sqlalchemy import Column
#from sqlalchemy import Integer
#from sqlalchemy import Unicode
from sqlalchemy.exc import IntegrityError
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import scoped_session
from sqlalchemy.orm import sessionmaker
from zope.sqlalchemy import ZopeTransactionExtension
DBSession = scoped_session(sessionmaker(extension=ZopeTransactionExtension()))
Base = declarative_base()
| import transaction
#from sqlalchemy import Column
#from sqlalchemy import Integer
#from sqlalchemy import Unicode
from sqlalchemy.exc import IntegrityError
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import scoped_session
from sqlalchemy.orm import sessionmaker
from zope.sqlalchemy import ZopeTransactionExtension
DBSession = scoped_session(sessionmaker(extension=ZopeTransactionExtension()))
Base = declarative_base()
| es | 0.088944 | #from sqlalchemy import Column #from sqlalchemy import Integer #from sqlalchemy import Unicode | 1.832441 | 2 |
neat_yet_again/neat/relation.py | tilezen/neat | 0 | 6632141 | <gh_stars>0
# a relation in the algebraic data model
class Relation(object):
def __init__(self, attribute_names, tuples=None):
self.attribute_names = attribute_names
if tuples:
for t in tuples:
assert len(t) == len(self.attribute_names), \
"Relation tuple %r incompatible with attributes %r." \
% (t, self.attribute_names)
if isinstance(tuples, set):
self.tuples = tuples
else:
self.tuples = set(tuples)
else:
self.tuples = set()
def union(self, other):
self._assert_compatible(other, "union")
return Relation(self.attribute_names, self.tuples.union(other.tuples))
def intersection(self, other):
self._assert_compatible(other, "intersection")
return Relation(self.attribute_names, self.tuples.intersection(other.tuples))
def difference(self, other):
self._assert_compatible(other, "difference")
return Relation(self.attribute_names, self.tuples.difference(other.tuples))
def projection(self, new_attribute_names):
indices = list()
for n in new_attribute_names:
assert n in self.attribute_names, \
"Can't project attribute %r, not in relation %r" \
% (n, self.attribute_names)
indices.append(self.attribute_names.index(n))
new_tuples = set()
for t in self.tuples:
new_t = tuple([t[i] for i in indices])
new_tuples.add(new_t)
return Relation(new_attribute_names, new_tuples)
def selection(self, pred):
new_tuples = set()
for t in self.tuples:
if pred(t):
new_tuples.add(t)
return Relation(self.attribute_names, new_tuples)
def rename(self, new_attribute_names):
assert len(new_attribute_names) == len(self.attribute_names), \
"New attribute names wrong length: len(%r) != len(%r)" \
% (new_attribute_names, self.attribute_names)
return Relation(new_attribute_names, self.tuples)
def join_func(self, arg_names, result_name, func):
idx = self._indices_for(arg_names)
new_tuples = set()
for t in self.tuples:
args = [t[i] for i in idx]
result = func(*args)
new_tuples.add(tuple(list(t) + [result]))
return Relation(self.attribute_names + [result_name], new_tuples)
def natural_join(self, other):
self_attr_set = set(self.attribute_names)
other_attr_set = set(other.attribute_names)
join_attrs = self_attr_set & other_attr_set
additional_attrs = list(other_attr_set - self_attr_set)
assert len(join_attrs) > 0, \
"Natural join needs at least one shared attribute " \
"between %r and %r" \
% (self.attribute_names, other.attribute_names)
join_idx = zip(self._indices_for(join_attrs),
other._indices_for(join_attrs))
additional_idx = other._indices_for(additional_attrs)
new_tuples = set()
for t1 in self.tuples:
for t2 in other.tuples:
match = True
for i1, i2 in join_idx:
if t1[i1] != t2[i2]:
match = False
break
if match:
new_t = tuple(list(t1) + [t2[i] for i in additional_idx])
new_tuples.add(new_t)
return Relation(self.attribute_names + additional_attrs, new_tuples)
def __eq__(self, other):
if isinstance(other, self.__class__):
return self.attribute_names == other.attribute_names and \
self.tuples == other.tuples
else:
return False
def __ne__(self, other):
return not self.__eq__(other)
def __repr__(self):
return "Relation(%r, %r)" % (self.attribute_names, list(self.tuples))
def _indices_for(self, attributes):
indices = list()
for n in attributes:
indices.append(self.attribute_names.index(n))
return indices
def _assert_compatible(self, other, op_name):
assert self.attribute_names == other.attribute_names, \
"Relation attributes incompatible in %s: %r != %r" \
% (op_name, self.attribute_names, other.attribute_names)
| # a relation in the algebraic data model
class Relation(object):
def __init__(self, attribute_names, tuples=None):
self.attribute_names = attribute_names
if tuples:
for t in tuples:
assert len(t) == len(self.attribute_names), \
"Relation tuple %r incompatible with attributes %r." \
% (t, self.attribute_names)
if isinstance(tuples, set):
self.tuples = tuples
else:
self.tuples = set(tuples)
else:
self.tuples = set()
def union(self, other):
self._assert_compatible(other, "union")
return Relation(self.attribute_names, self.tuples.union(other.tuples))
def intersection(self, other):
self._assert_compatible(other, "intersection")
return Relation(self.attribute_names, self.tuples.intersection(other.tuples))
def difference(self, other):
self._assert_compatible(other, "difference")
return Relation(self.attribute_names, self.tuples.difference(other.tuples))
def projection(self, new_attribute_names):
indices = list()
for n in new_attribute_names:
assert n in self.attribute_names, \
"Can't project attribute %r, not in relation %r" \
% (n, self.attribute_names)
indices.append(self.attribute_names.index(n))
new_tuples = set()
for t in self.tuples:
new_t = tuple([t[i] for i in indices])
new_tuples.add(new_t)
return Relation(new_attribute_names, new_tuples)
def selection(self, pred):
new_tuples = set()
for t in self.tuples:
if pred(t):
new_tuples.add(t)
return Relation(self.attribute_names, new_tuples)
def rename(self, new_attribute_names):
assert len(new_attribute_names) == len(self.attribute_names), \
"New attribute names wrong length: len(%r) != len(%r)" \
% (new_attribute_names, self.attribute_names)
return Relation(new_attribute_names, self.tuples)
def join_func(self, arg_names, result_name, func):
idx = self._indices_for(arg_names)
new_tuples = set()
for t in self.tuples:
args = [t[i] for i in idx]
result = func(*args)
new_tuples.add(tuple(list(t) + [result]))
return Relation(self.attribute_names + [result_name], new_tuples)
def natural_join(self, other):
self_attr_set = set(self.attribute_names)
other_attr_set = set(other.attribute_names)
join_attrs = self_attr_set & other_attr_set
additional_attrs = list(other_attr_set - self_attr_set)
assert len(join_attrs) > 0, \
"Natural join needs at least one shared attribute " \
"between %r and %r" \
% (self.attribute_names, other.attribute_names)
join_idx = zip(self._indices_for(join_attrs),
other._indices_for(join_attrs))
additional_idx = other._indices_for(additional_attrs)
new_tuples = set()
for t1 in self.tuples:
for t2 in other.tuples:
match = True
for i1, i2 in join_idx:
if t1[i1] != t2[i2]:
match = False
break
if match:
new_t = tuple(list(t1) + [t2[i] for i in additional_idx])
new_tuples.add(new_t)
return Relation(self.attribute_names + additional_attrs, new_tuples)
def __eq__(self, other):
if isinstance(other, self.__class__):
return self.attribute_names == other.attribute_names and \
self.tuples == other.tuples
else:
return False
def __ne__(self, other):
return not self.__eq__(other)
def __repr__(self):
return "Relation(%r, %r)" % (self.attribute_names, list(self.tuples))
def _indices_for(self, attributes):
indices = list()
for n in attributes:
indices.append(self.attribute_names.index(n))
return indices
def _assert_compatible(self, other, op_name):
assert self.attribute_names == other.attribute_names, \
"Relation attributes incompatible in %s: %r != %r" \
% (op_name, self.attribute_names, other.attribute_names) | en | 0.888294 | # a relation in the algebraic data model | 3.48145 | 3 |
crud.py | HB-LAND/pour-decisions | 3 | 6632142 | <gh_stars>1-10
"""CRUD operations."""
from model import db, connect_to_db, User, Wine, Cheese, Pair, Rating
def create_user(fname, lname, email, password):
"""Create and return a new user"""
user = User(fname=fname, lname=lname, email=email, password=password)
db.session.add(user)
db.session.commit()
return user
def create_wine(wine_name, wine_pronunciation, wine_color, wine_sparkling,
wine_region, wine_country, wine_bio, wine_img, wine_sub):
"""Create and return a wine."""
wine = Wine(wine_name=wine_name,
wine_pronunciation=wine_pronunciation,
wine_color=wine_color,
wine_sparkling=wine_sparkling,
wine_region=wine_region,
wine_country=wine_country,
wine_bio=wine_bio,
wine_img=wine_img,
wine_sub=wine_sub)
db.session.add(wine)
db.session.commit()
return wine
def create_cheese(cheese_name, cheese_pronunciation, cheese_region, cheese_density,
cheese_description, cheese_bio, cheese_animal, cheese_img,
cheese_sub):
"""Create and return a cheese."""
cheese = Cheese(cheese_name=cheese_name, cheese_pronunciation=cheese_pronunciation,
cheese_region=cheese_region, cheese_density=cheese_density,
cheese_description=cheese_description, cheese_bio=cheese_bio,
cheese_animal=cheese_animal, cheese_img=cheese_img,
cheese_sub=cheese_sub)
db.session.add(cheese)
db.session.commit()
return cheese
def create_pair(user_id, wine_id, cheese_id):
"""Creates a paired wine and cheese based on a user's selection"""
pair = Pair(user_id=user_id,
wine_id=wine_id,
cheese_id=cheese_id)
db.session.add(pair)
db.session.commit()
return pair
def create_rating(pair_id, user_id, pair_rating):
"""Creates a rating based on a user's like or dislike of a wine and cheese pairing"""
rating = Rating(pair_id=pair_id,
user_id=user_id,
pair_rating=pair_rating)
if __name__ == '__main__':
from server import app
connect_to_db(app)
| """CRUD operations."""
from model import db, connect_to_db, User, Wine, Cheese, Pair, Rating
def create_user(fname, lname, email, password):
"""Create and return a new user"""
user = User(fname=fname, lname=lname, email=email, password=password)
db.session.add(user)
db.session.commit()
return user
def create_wine(wine_name, wine_pronunciation, wine_color, wine_sparkling,
wine_region, wine_country, wine_bio, wine_img, wine_sub):
"""Create and return a wine."""
wine = Wine(wine_name=wine_name,
wine_pronunciation=wine_pronunciation,
wine_color=wine_color,
wine_sparkling=wine_sparkling,
wine_region=wine_region,
wine_country=wine_country,
wine_bio=wine_bio,
wine_img=wine_img,
wine_sub=wine_sub)
db.session.add(wine)
db.session.commit()
return wine
def create_cheese(cheese_name, cheese_pronunciation, cheese_region, cheese_density,
cheese_description, cheese_bio, cheese_animal, cheese_img,
cheese_sub):
"""Create and return a cheese."""
cheese = Cheese(cheese_name=cheese_name, cheese_pronunciation=cheese_pronunciation,
cheese_region=cheese_region, cheese_density=cheese_density,
cheese_description=cheese_description, cheese_bio=cheese_bio,
cheese_animal=cheese_animal, cheese_img=cheese_img,
cheese_sub=cheese_sub)
db.session.add(cheese)
db.session.commit()
return cheese
def create_pair(user_id, wine_id, cheese_id):
"""Creates a paired wine and cheese based on a user's selection"""
pair = Pair(user_id=user_id,
wine_id=wine_id,
cheese_id=cheese_id)
db.session.add(pair)
db.session.commit()
return pair
def create_rating(pair_id, user_id, pair_rating):
"""Creates a rating based on a user's like or dislike of a wine and cheese pairing"""
rating = Rating(pair_id=pair_id,
user_id=user_id,
pair_rating=pair_rating)
if __name__ == '__main__':
from server import app
connect_to_db(app) | en | 0.927473 | CRUD operations. Create and return a new user Create and return a wine. Create and return a cheese. Creates a paired wine and cheese based on a user's selection Creates a rating based on a user's like or dislike of a wine and cheese pairing | 3.561166 | 4 |
setup.py | zedshaw/zapps | 2 | 6632143 | ## this file is generated from settings in build.vel
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
# from options["setup"] in build.vel
config = {'description': 'A book compiler for programmers where code and prose are separate.', 'author': '<NAME>', 'author_email': '<EMAIL>', 'url': 'http://www.zedshaw.com/projects/zapps', 'version': '0.5', 'scripts': ['bin/zapps'], 'packages': ['zapps'], 'name': 'zapps'}
setup(**config)
| ## this file is generated from settings in build.vel
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
# from options["setup"] in build.vel
config = {'description': 'A book compiler for programmers where code and prose are separate.', 'author': '<NAME>', 'author_email': '<EMAIL>', 'url': 'http://www.zedshaw.com/projects/zapps', 'version': '0.5', 'scripts': ['bin/zapps'], 'packages': ['zapps'], 'name': 'zapps'}
setup(**config)
| en | 0.941835 | ## this file is generated from settings in build.vel # from options["setup"] in build.vel | 1.352238 | 1 |
setup.py | RichardScottOZ/subsurface | 1 | 6632144 | # -*- coding: utf 8 -*-
"""
Python installation file.
"""
from os import path
from setuptools import setup, find_packages
import re
this_directory = path.abspath(path.dirname(__file__))
verstr = '0.1.0'
VERSIONFILE = path.join(this_directory, "subsurface", "_version.py")
with open(VERSIONFILE, 'r', encoding='utf-8')as f:
verstrline = f.read().strip()
pattern = re.compile(r"__version__ = ['\"](.*)['\"]")
mo = pattern.search(verstrline)
if mo:
verstr = mo.group(1)
else:
raise RuntimeError("Unable to find version string in %s." % (VERSIONFILE,))
DESCRIPTION_FILE = path.join(this_directory, 'README.md')
with open(DESCRIPTION_FILE, 'r', encoding='utf-8') as f:
long_description = f.read()
REQUIREMENTS = ['numpy',
]
TEST_REQUIREMENTS = ['pytest',
]
CLASSIFIERS = ['Development Status :: 3 - Alpha',
'Intended Audience :: Science/Research',
'Natural Language :: English',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
]
setup(name='subsurface',
version=verstr,
packages=find_packages(exclude=('tests', 'docs')),
description='Subsurface data types and utilities',
long_description=long_description,
long_description_content_type='text/markdown',
url='https://softwareunderground.org',
author='<NAME>',
author_email='<EMAIL>',
license='Apache 2',
tests_require=TEST_REQUIREMENTS,
install_requires=REQUIREMENTS,
classifiers=CLASSIFIERS,
zip_safe=False,
)
| # -*- coding: utf 8 -*-
"""
Python installation file.
"""
from os import path
from setuptools import setup, find_packages
import re
this_directory = path.abspath(path.dirname(__file__))
verstr = '0.1.0'
VERSIONFILE = path.join(this_directory, "subsurface", "_version.py")
with open(VERSIONFILE, 'r', encoding='utf-8')as f:
verstrline = f.read().strip()
pattern = re.compile(r"__version__ = ['\"](.*)['\"]")
mo = pattern.search(verstrline)
if mo:
verstr = mo.group(1)
else:
raise RuntimeError("Unable to find version string in %s." % (VERSIONFILE,))
DESCRIPTION_FILE = path.join(this_directory, 'README.md')
with open(DESCRIPTION_FILE, 'r', encoding='utf-8') as f:
long_description = f.read()
REQUIREMENTS = ['numpy',
]
TEST_REQUIREMENTS = ['pytest',
]
CLASSIFIERS = ['Development Status :: 3 - Alpha',
'Intended Audience :: Science/Research',
'Natural Language :: English',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
]
setup(name='subsurface',
version=verstr,
packages=find_packages(exclude=('tests', 'docs')),
description='Subsurface data types and utilities',
long_description=long_description,
long_description_content_type='text/markdown',
url='https://softwareunderground.org',
author='<NAME>',
author_email='<EMAIL>',
license='Apache 2',
tests_require=TEST_REQUIREMENTS,
install_requires=REQUIREMENTS,
classifiers=CLASSIFIERS,
zip_safe=False,
)
| en | 0.61688 | # -*- coding: utf 8 -*- Python installation file. | 1.755898 | 2 |
Shivarj-Jadhav/code.py | Shivraj-Jadhav/greyatom-python-for-data-science | 0 | 6632145 | <gh_stars>0
# --------------
#Importing header files
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
data= pd.read_csv(path)
loan_status=data['Loan_Status'].value_counts()
plt.bar(loan_status.index, loan_status)
plt.show()
#Code starts here
# --------------
#Code starts here
property_and_loan=data.groupby(['Property_Area','Loan_Status'])
property_and_loan=property_and_loan.size().unstack()
property_and_loan.plot(kind='bar', stacked=False, figsize=(15,20))
plt.xlabel('Property Area')
plt.ylabel('Loan Status')
plt.xticks(rotation=45)
plt.show()
# --------------
#Code starts here
education_and_loan = data.groupby(['Education','Loan_Status'])
education_and_loan = education_and_loan.size().unstack()
education_and_loan.plot(kind='bar', stacked=True, figsize=(15,20))
plt.xlabel('Education Status')
plt.ylabel('Loan Status')
plt.xticks(rotation=45)
plt.show()
# --------------
#Code starts here
graduate=data[data['Education']=='Graduate']
not_graduate=data[data['Education']=='Not Graduate']
graduate['LoanAmount'].plot(kind='density', label='Graduate')
not_graduate['LoanAmount'].plot(kind='density', label='Not Graduate')
#For automatic legend display
plt.legend()
# --------------
#Code starts here
fig ,(ax_1,ax_2,ax_3) = plt.subplots(1,3, figsize=(20,8))
ax_1.scatter(['ApplicantIncome'],['LoanAmount'])
ax_1.set(title='Applicant Income')
ax_2.scatter(['CoapplicantIncome'],['LoanAmount'])
ax_2.set(title='Coapplicant Income')
data['TotalIncome']= data['ApplicantIncome']+ data['CoapplicantIncome']
ax_3.scatter(['TotalIncome'],['LoanAmount'])
ax_3.set(title='Total Income')
| # --------------
#Importing header files
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
data= pd.read_csv(path)
loan_status=data['Loan_Status'].value_counts()
plt.bar(loan_status.index, loan_status)
plt.show()
#Code starts here
# --------------
#Code starts here
property_and_loan=data.groupby(['Property_Area','Loan_Status'])
property_and_loan=property_and_loan.size().unstack()
property_and_loan.plot(kind='bar', stacked=False, figsize=(15,20))
plt.xlabel('Property Area')
plt.ylabel('Loan Status')
plt.xticks(rotation=45)
plt.show()
# --------------
#Code starts here
education_and_loan = data.groupby(['Education','Loan_Status'])
education_and_loan = education_and_loan.size().unstack()
education_and_loan.plot(kind='bar', stacked=True, figsize=(15,20))
plt.xlabel('Education Status')
plt.ylabel('Loan Status')
plt.xticks(rotation=45)
plt.show()
# --------------
#Code starts here
graduate=data[data['Education']=='Graduate']
not_graduate=data[data['Education']=='Not Graduate']
graduate['LoanAmount'].plot(kind='density', label='Graduate')
not_graduate['LoanAmount'].plot(kind='density', label='Not Graduate')
#For automatic legend display
plt.legend()
# --------------
#Code starts here
fig ,(ax_1,ax_2,ax_3) = plt.subplots(1,3, figsize=(20,8))
ax_1.scatter(['ApplicantIncome'],['LoanAmount'])
ax_1.set(title='Applicant Income')
ax_2.scatter(['CoapplicantIncome'],['LoanAmount'])
ax_2.set(title='Coapplicant Income')
data['TotalIncome']= data['ApplicantIncome']+ data['CoapplicantIncome']
ax_3.scatter(['TotalIncome'],['LoanAmount'])
ax_3.set(title='Total Income') | en | 0.360972 | # -------------- #Importing header files #Code starts here # -------------- #Code starts here # -------------- #Code starts here # -------------- #Code starts here #For automatic legend display # -------------- #Code starts here | 2.95295 | 3 |
hubspot/crm/timeline/api/events_api.py | fakepop/hubspot-api-python | 0 | 6632146 | <filename>hubspot/crm/timeline/api/events_api.py
# coding: utf-8
"""
Timeline events
This feature allows an app to create and configure custom events that can show up in the timelines of certain CRM objects like contacts, companies, tickets, or deals. You'll find multiple use cases for this API in the sections below. # noqa: E501
The version of the OpenAPI document: v3
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from hubspot.crm.timeline.api_client import ApiClient
from hubspot.crm.timeline.exceptions import ApiTypeError, ApiValueError # noqa: F401
class EventsApi(object):
"""NOTE: This class is auto generated by OpenAPI Generator
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def create(self, timeline_event, **kwargs): # noqa: E501
"""Create a single event # noqa: E501
Creates an instance of a timeline event based on an event template. Once created, this event is immutable on the object timeline and cannot be modified. If the event template was configured to update object properties via `objectPropertyName`, this call will also attempt to updates those properties, or add them if they don't exist. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create(timeline_event, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param TimelineEvent timeline_event: The timeline event definition. (required)
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: TimelineEventResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs["_return_http_data_only"] = True
return self.create_with_http_info(timeline_event, **kwargs) # noqa: E501
def create_with_http_info(self, timeline_event, **kwargs): # noqa: E501
"""Create a single event # noqa: E501
Creates an instance of a timeline event based on an event template. Once created, this event is immutable on the object timeline and cannot be modified. If the event template was configured to update object properties via `objectPropertyName`, this call will also attempt to updates those properties, or add them if they don't exist. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_with_http_info(timeline_event, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param TimelineEvent timeline_event: The timeline event definition. (required)
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(TimelineEventResponse, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ["timeline_event"]
all_params.extend(
[
"async_req",
"_return_http_data_only",
"_preload_content",
"_request_timeout",
]
)
for key, val in six.iteritems(local_var_params["kwargs"]):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'" " to method create" % key
)
local_var_params[key] = val
del local_var_params["kwargs"]
# verify the required parameter 'timeline_event' is set
if self.api_client.client_side_validation and (
"timeline_event" not in local_var_params
or local_var_params["timeline_event"] is None # noqa: E501
): # noqa: E501
raise ApiValueError(
"Missing the required parameter `timeline_event` when calling `create`"
) # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if "timeline_event" in local_var_params:
body_params = local_var_params["timeline_event"]
# HTTP header `Accept`
header_params["Accept"] = self.api_client.select_header_accept(
["application/json", "*/*"]
) # noqa: E501
# HTTP header `Content-Type`
header_params[
"Content-Type"
] = self.api_client.select_header_content_type( # noqa: E501
["application/json"]
) # noqa: E501
# Authentication setting
auth_settings = ["oauth2"] # noqa: E501
return self.api_client.call_api(
"/crm/v3/timeline/events",
"POST",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type="TimelineEventResponse", # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get("async_req"),
_return_http_data_only=local_var_params.get(
"_return_http_data_only"
), # noqa: E501
_preload_content=local_var_params.get("_preload_content", True),
_request_timeout=local_var_params.get("_request_timeout"),
collection_formats=collection_formats,
)
def create_batch(self, batch_input_timeline_event, **kwargs): # noqa: E501
"""Creates multiple events # noqa: E501
Creates multiple instances of timeline events based on an event template. Once created, these event are immutable on the object timeline and cannot be modified. If the event template was configured to update object properties via `objectPropertyName`, this call will also attempt to updates those properties, or add them if they don't exist. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_batch(batch_input_timeline_event, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param BatchInputTimelineEvent batch_input_timeline_event: The timeline event definition. (required)
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: BatchResponseTimelineEventResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs["_return_http_data_only"] = True
return self.create_batch_with_http_info(
batch_input_timeline_event, **kwargs
) # noqa: E501
def create_batch_with_http_info(
self, batch_input_timeline_event, **kwargs
): # noqa: E501
"""Creates multiple events # noqa: E501
Creates multiple instances of timeline events based on an event template. Once created, these event are immutable on the object timeline and cannot be modified. If the event template was configured to update object properties via `objectPropertyName`, this call will also attempt to updates those properties, or add them if they don't exist. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_batch_with_http_info(batch_input_timeline_event, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param BatchInputTimelineEvent batch_input_timeline_event: The timeline event definition. (required)
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(BatchResponseTimelineEventResponse, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ["batch_input_timeline_event"]
all_params.extend(
[
"async_req",
"_return_http_data_only",
"_preload_content",
"_request_timeout",
]
)
for key, val in six.iteritems(local_var_params["kwargs"]):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method create_batch" % key
)
local_var_params[key] = val
del local_var_params["kwargs"]
# verify the required parameter 'batch_input_timeline_event' is set
if self.api_client.client_side_validation and (
"batch_input_timeline_event" not in local_var_params
or local_var_params["batch_input_timeline_event"] is None # noqa: E501
): # noqa: E501
raise ApiValueError(
"Missing the required parameter `batch_input_timeline_event` when calling `create_batch`"
) # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if "batch_input_timeline_event" in local_var_params:
body_params = local_var_params["batch_input_timeline_event"]
# HTTP header `Accept`
header_params["Accept"] = self.api_client.select_header_accept(
["application/json", "*/*"]
) # noqa: E501
# HTTP header `Content-Type`
header_params[
"Content-Type"
] = self.api_client.select_header_content_type( # noqa: E501
["application/json"]
) # noqa: E501
# Authentication setting
auth_settings = ["oauth2"] # noqa: E501
return self.api_client.call_api(
"/crm/v3/timeline/events/batch/create",
"POST",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type="BatchResponseTimelineEventResponse", # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get("async_req"),
_return_http_data_only=local_var_params.get(
"_return_http_data_only"
), # noqa: E501
_preload_content=local_var_params.get("_preload_content", True),
_request_timeout=local_var_params.get("_request_timeout"),
collection_formats=collection_formats,
)
def get_by_id(self, event_template_id, event_id, **kwargs): # noqa: E501
"""Gets the event # noqa: E501
This returns the previously created event. It contains all existing info for the event, but not necessarily the CRM object. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_by_id(event_template_id, event_id, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str event_template_id: The event template ID. (required)
:param str event_id: The event ID. (required)
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: TimelineEventResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs["_return_http_data_only"] = True
return self.get_by_id_with_http_info(
event_template_id, event_id, **kwargs
) # noqa: E501
def get_by_id_with_http_info(
self, event_template_id, event_id, **kwargs
): # noqa: E501
"""Gets the event # noqa: E501
This returns the previously created event. It contains all existing info for the event, but not necessarily the CRM object. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_by_id_with_http_info(event_template_id, event_id, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str event_template_id: The event template ID. (required)
:param str event_id: The event ID. (required)
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(TimelineEventResponse, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ["event_template_id", "event_id"]
all_params.extend(
[
"async_req",
"_return_http_data_only",
"_preload_content",
"_request_timeout",
]
)
for key, val in six.iteritems(local_var_params["kwargs"]):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method get_by_id" % key
)
local_var_params[key] = val
del local_var_params["kwargs"]
# verify the required parameter 'event_template_id' is set
if self.api_client.client_side_validation and (
"event_template_id" not in local_var_params
or local_var_params["event_template_id"] is None # noqa: E501
): # noqa: E501
raise ApiValueError(
"Missing the required parameter `event_template_id` when calling `get_by_id`"
) # noqa: E501
# verify the required parameter 'event_id' is set
if self.api_client.client_side_validation and (
"event_id" not in local_var_params
or local_var_params["event_id"] is None # noqa: E501
): # noqa: E501
raise ApiValueError(
"Missing the required parameter `event_id` when calling `get_by_id`"
) # noqa: E501
collection_formats = {}
path_params = {}
if "event_template_id" in local_var_params:
path_params["eventTemplateId"] = local_var_params[
"event_template_id"
] # noqa: E501
if "event_id" in local_var_params:
path_params["eventId"] = local_var_params["event_id"] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params["Accept"] = self.api_client.select_header_accept(
["application/json", "*/*"]
) # noqa: E501
# Authentication setting
auth_settings = ["oauth2"] # noqa: E501
return self.api_client.call_api(
"/crm/v3/timeline/events/{eventTemplateId}/{eventId}",
"GET",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type="TimelineEventResponse", # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get("async_req"),
_return_http_data_only=local_var_params.get(
"_return_http_data_only"
), # noqa: E501
_preload_content=local_var_params.get("_preload_content", True),
_request_timeout=local_var_params.get("_request_timeout"),
collection_formats=collection_formats,
)
def get_detail_by_id(self, event_template_id, event_id, **kwargs): # noqa: E501
"""Gets the detailTemplate as rendered # noqa: E501
This will take the `detailTemplate` from the event template and return an object rendering the specified event. If the template references `extraData` that isn't found in the event, it will be ignored and we'll render without it. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_detail_by_id(event_template_id, event_id, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str event_template_id: The event template ID. (required)
:param str event_id: The event ID. (required)
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: EventDetail
If the method is called asynchronously,
returns the request thread.
"""
kwargs["_return_http_data_only"] = True
return self.get_detail_by_id_with_http_info(
event_template_id, event_id, **kwargs
) # noqa: E501
def get_detail_by_id_with_http_info(
self, event_template_id, event_id, **kwargs
): # noqa: E501
"""Gets the detailTemplate as rendered # noqa: E501
This will take the `detailTemplate` from the event template and return an object rendering the specified event. If the template references `extraData` that isn't found in the event, it will be ignored and we'll render without it. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_detail_by_id_with_http_info(event_template_id, event_id, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str event_template_id: The event template ID. (required)
:param str event_id: The event ID. (required)
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(EventDetail, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ["event_template_id", "event_id"]
all_params.extend(
[
"async_req",
"_return_http_data_only",
"_preload_content",
"_request_timeout",
]
)
for key, val in six.iteritems(local_var_params["kwargs"]):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method get_detail_by_id" % key
)
local_var_params[key] = val
del local_var_params["kwargs"]
# verify the required parameter 'event_template_id' is set
if self.api_client.client_side_validation and (
"event_template_id" not in local_var_params
or local_var_params["event_template_id"] is None # noqa: E501
): # noqa: E501
raise ApiValueError(
"Missing the required parameter `event_template_id` when calling `get_detail_by_id`"
) # noqa: E501
# verify the required parameter 'event_id' is set
if self.api_client.client_side_validation and (
"event_id" not in local_var_params
or local_var_params["event_id"] is None # noqa: E501
): # noqa: E501
raise ApiValueError(
"Missing the required parameter `event_id` when calling `get_detail_by_id`"
) # noqa: E501
collection_formats = {}
path_params = {}
if "event_template_id" in local_var_params:
path_params["eventTemplateId"] = local_var_params[
"event_template_id"
] # noqa: E501
if "event_id" in local_var_params:
path_params["eventId"] = local_var_params["event_id"] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params["Accept"] = self.api_client.select_header_accept(
["application/json", "*/*"]
) # noqa: E501
# Authentication setting
auth_settings = ["oauth2"] # noqa: E501
return self.api_client.call_api(
"/crm/v3/timeline/events/{eventTemplateId}/{eventId}/detail",
"GET",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type="EventDetail", # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get("async_req"),
_return_http_data_only=local_var_params.get(
"_return_http_data_only"
), # noqa: E501
_preload_content=local_var_params.get("_preload_content", True),
_request_timeout=local_var_params.get("_request_timeout"),
collection_formats=collection_formats,
)
def get_render_by_id(self, event_template_id, event_id, **kwargs): # noqa: E501
"""Renders the header or detail as HTML # noqa: E501
This will take either the `headerTemplate` or `detailTemplate` from the event template and render for the specified event as HTML. If the template references `extraData` that isn't found in the event, it will be ignored and we'll render without it. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_render_by_id(event_template_id, event_id, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str event_template_id: The event template ID. (required)
:param str event_id: The event ID. (required)
:param bool detail: Set to 'true', we want to render the `detailTemplate` instead of the `headerTemplate`.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: str
If the method is called asynchronously,
returns the request thread.
"""
kwargs["_return_http_data_only"] = True
return self.get_render_by_id_with_http_info(
event_template_id, event_id, **kwargs
) # noqa: E501
def get_render_by_id_with_http_info(
self, event_template_id, event_id, **kwargs
): # noqa: E501
"""Renders the header or detail as HTML # noqa: E501
This will take either the `headerTemplate` or `detailTemplate` from the event template and render for the specified event as HTML. If the template references `extraData` that isn't found in the event, it will be ignored and we'll render without it. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_render_by_id_with_http_info(event_template_id, event_id, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str event_template_id: The event template ID. (required)
:param str event_id: The event ID. (required)
:param bool detail: Set to 'true', we want to render the `detailTemplate` instead of the `headerTemplate`.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(str, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ["event_template_id", "event_id", "detail"]
all_params.extend(
[
"async_req",
"_return_http_data_only",
"_preload_content",
"_request_timeout",
]
)
for key, val in six.iteritems(local_var_params["kwargs"]):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method get_render_by_id" % key
)
local_var_params[key] = val
del local_var_params["kwargs"]
# verify the required parameter 'event_template_id' is set
if self.api_client.client_side_validation and (
"event_template_id" not in local_var_params
or local_var_params["event_template_id"] is None # noqa: E501
): # noqa: E501
raise ApiValueError(
"Missing the required parameter `event_template_id` when calling `get_render_by_id`"
) # noqa: E501
# verify the required parameter 'event_id' is set
if self.api_client.client_side_validation and (
"event_id" not in local_var_params
or local_var_params["event_id"] is None # noqa: E501
): # noqa: E501
raise ApiValueError(
"Missing the required parameter `event_id` when calling `get_render_by_id`"
) # noqa: E501
collection_formats = {}
path_params = {}
if "event_template_id" in local_var_params:
path_params["eventTemplateId"] = local_var_params[
"event_template_id"
] # noqa: E501
if "event_id" in local_var_params:
path_params["eventId"] = local_var_params["event_id"] # noqa: E501
query_params = []
if (
"detail" in local_var_params and local_var_params["detail"] is not None
): # noqa: E501
query_params.append(("detail", local_var_params["detail"])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params["Accept"] = self.api_client.select_header_accept(
["text/html", "*/*"]
) # noqa: E501
# Authentication setting
auth_settings = ["oauth2"] # noqa: E501
return self.api_client.call_api(
"/crm/v3/timeline/events/{eventTemplateId}/{eventId}/render",
"GET",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type="str", # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get("async_req"),
_return_http_data_only=local_var_params.get(
"_return_http_data_only"
), # noqa: E501
_preload_content=local_var_params.get("_preload_content", True),
_request_timeout=local_var_params.get("_request_timeout"),
collection_formats=collection_formats,
)
| <filename>hubspot/crm/timeline/api/events_api.py
# coding: utf-8
"""
Timeline events
This feature allows an app to create and configure custom events that can show up in the timelines of certain CRM objects like contacts, companies, tickets, or deals. You'll find multiple use cases for this API in the sections below. # noqa: E501
The version of the OpenAPI document: v3
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from hubspot.crm.timeline.api_client import ApiClient
from hubspot.crm.timeline.exceptions import ApiTypeError, ApiValueError # noqa: F401
class EventsApi(object):
"""NOTE: This class is auto generated by OpenAPI Generator
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def create(self, timeline_event, **kwargs): # noqa: E501
"""Create a single event # noqa: E501
Creates an instance of a timeline event based on an event template. Once created, this event is immutable on the object timeline and cannot be modified. If the event template was configured to update object properties via `objectPropertyName`, this call will also attempt to updates those properties, or add them if they don't exist. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create(timeline_event, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param TimelineEvent timeline_event: The timeline event definition. (required)
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: TimelineEventResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs["_return_http_data_only"] = True
return self.create_with_http_info(timeline_event, **kwargs) # noqa: E501
def create_with_http_info(self, timeline_event, **kwargs): # noqa: E501
"""Create a single event # noqa: E501
Creates an instance of a timeline event based on an event template. Once created, this event is immutable on the object timeline and cannot be modified. If the event template was configured to update object properties via `objectPropertyName`, this call will also attempt to updates those properties, or add them if they don't exist. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_with_http_info(timeline_event, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param TimelineEvent timeline_event: The timeline event definition. (required)
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(TimelineEventResponse, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ["timeline_event"]
all_params.extend(
[
"async_req",
"_return_http_data_only",
"_preload_content",
"_request_timeout",
]
)
for key, val in six.iteritems(local_var_params["kwargs"]):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'" " to method create" % key
)
local_var_params[key] = val
del local_var_params["kwargs"]
# verify the required parameter 'timeline_event' is set
if self.api_client.client_side_validation and (
"timeline_event" not in local_var_params
or local_var_params["timeline_event"] is None # noqa: E501
): # noqa: E501
raise ApiValueError(
"Missing the required parameter `timeline_event` when calling `create`"
) # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if "timeline_event" in local_var_params:
body_params = local_var_params["timeline_event"]
# HTTP header `Accept`
header_params["Accept"] = self.api_client.select_header_accept(
["application/json", "*/*"]
) # noqa: E501
# HTTP header `Content-Type`
header_params[
"Content-Type"
] = self.api_client.select_header_content_type( # noqa: E501
["application/json"]
) # noqa: E501
# Authentication setting
auth_settings = ["oauth2"] # noqa: E501
return self.api_client.call_api(
"/crm/v3/timeline/events",
"POST",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type="TimelineEventResponse", # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get("async_req"),
_return_http_data_only=local_var_params.get(
"_return_http_data_only"
), # noqa: E501
_preload_content=local_var_params.get("_preload_content", True),
_request_timeout=local_var_params.get("_request_timeout"),
collection_formats=collection_formats,
)
def create_batch(self, batch_input_timeline_event, **kwargs): # noqa: E501
"""Creates multiple events # noqa: E501
Creates multiple instances of timeline events based on an event template. Once created, these event are immutable on the object timeline and cannot be modified. If the event template was configured to update object properties via `objectPropertyName`, this call will also attempt to updates those properties, or add them if they don't exist. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_batch(batch_input_timeline_event, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param BatchInputTimelineEvent batch_input_timeline_event: The timeline event definition. (required)
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: BatchResponseTimelineEventResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs["_return_http_data_only"] = True
return self.create_batch_with_http_info(
batch_input_timeline_event, **kwargs
) # noqa: E501
def create_batch_with_http_info(
self, batch_input_timeline_event, **kwargs
): # noqa: E501
"""Creates multiple events # noqa: E501
Creates multiple instances of timeline events based on an event template. Once created, these event are immutable on the object timeline and cannot be modified. If the event template was configured to update object properties via `objectPropertyName`, this call will also attempt to updates those properties, or add them if they don't exist. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_batch_with_http_info(batch_input_timeline_event, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param BatchInputTimelineEvent batch_input_timeline_event: The timeline event definition. (required)
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(BatchResponseTimelineEventResponse, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ["batch_input_timeline_event"]
all_params.extend(
[
"async_req",
"_return_http_data_only",
"_preload_content",
"_request_timeout",
]
)
for key, val in six.iteritems(local_var_params["kwargs"]):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method create_batch" % key
)
local_var_params[key] = val
del local_var_params["kwargs"]
# verify the required parameter 'batch_input_timeline_event' is set
if self.api_client.client_side_validation and (
"batch_input_timeline_event" not in local_var_params
or local_var_params["batch_input_timeline_event"] is None # noqa: E501
): # noqa: E501
raise ApiValueError(
"Missing the required parameter `batch_input_timeline_event` when calling `create_batch`"
) # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if "batch_input_timeline_event" in local_var_params:
body_params = local_var_params["batch_input_timeline_event"]
# HTTP header `Accept`
header_params["Accept"] = self.api_client.select_header_accept(
["application/json", "*/*"]
) # noqa: E501
# HTTP header `Content-Type`
header_params[
"Content-Type"
] = self.api_client.select_header_content_type( # noqa: E501
["application/json"]
) # noqa: E501
# Authentication setting
auth_settings = ["oauth2"] # noqa: E501
return self.api_client.call_api(
"/crm/v3/timeline/events/batch/create",
"POST",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type="BatchResponseTimelineEventResponse", # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get("async_req"),
_return_http_data_only=local_var_params.get(
"_return_http_data_only"
), # noqa: E501
_preload_content=local_var_params.get("_preload_content", True),
_request_timeout=local_var_params.get("_request_timeout"),
collection_formats=collection_formats,
)
def get_by_id(self, event_template_id, event_id, **kwargs): # noqa: E501
"""Gets the event # noqa: E501
This returns the previously created event. It contains all existing info for the event, but not necessarily the CRM object. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_by_id(event_template_id, event_id, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str event_template_id: The event template ID. (required)
:param str event_id: The event ID. (required)
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: TimelineEventResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs["_return_http_data_only"] = True
return self.get_by_id_with_http_info(
event_template_id, event_id, **kwargs
) # noqa: E501
def get_by_id_with_http_info(
self, event_template_id, event_id, **kwargs
): # noqa: E501
"""Gets the event # noqa: E501
This returns the previously created event. It contains all existing info for the event, but not necessarily the CRM object. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_by_id_with_http_info(event_template_id, event_id, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str event_template_id: The event template ID. (required)
:param str event_id: The event ID. (required)
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(TimelineEventResponse, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ["event_template_id", "event_id"]
all_params.extend(
[
"async_req",
"_return_http_data_only",
"_preload_content",
"_request_timeout",
]
)
for key, val in six.iteritems(local_var_params["kwargs"]):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method get_by_id" % key
)
local_var_params[key] = val
del local_var_params["kwargs"]
# verify the required parameter 'event_template_id' is set
if self.api_client.client_side_validation and (
"event_template_id" not in local_var_params
or local_var_params["event_template_id"] is None # noqa: E501
): # noqa: E501
raise ApiValueError(
"Missing the required parameter `event_template_id` when calling `get_by_id`"
) # noqa: E501
# verify the required parameter 'event_id' is set
if self.api_client.client_side_validation and (
"event_id" not in local_var_params
or local_var_params["event_id"] is None # noqa: E501
): # noqa: E501
raise ApiValueError(
"Missing the required parameter `event_id` when calling `get_by_id`"
) # noqa: E501
collection_formats = {}
path_params = {}
if "event_template_id" in local_var_params:
path_params["eventTemplateId"] = local_var_params[
"event_template_id"
] # noqa: E501
if "event_id" in local_var_params:
path_params["eventId"] = local_var_params["event_id"] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params["Accept"] = self.api_client.select_header_accept(
["application/json", "*/*"]
) # noqa: E501
# Authentication setting
auth_settings = ["oauth2"] # noqa: E501
return self.api_client.call_api(
"/crm/v3/timeline/events/{eventTemplateId}/{eventId}",
"GET",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type="TimelineEventResponse", # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get("async_req"),
_return_http_data_only=local_var_params.get(
"_return_http_data_only"
), # noqa: E501
_preload_content=local_var_params.get("_preload_content", True),
_request_timeout=local_var_params.get("_request_timeout"),
collection_formats=collection_formats,
)
def get_detail_by_id(self, event_template_id, event_id, **kwargs): # noqa: E501
"""Gets the detailTemplate as rendered # noqa: E501
This will take the `detailTemplate` from the event template and return an object rendering the specified event. If the template references `extraData` that isn't found in the event, it will be ignored and we'll render without it. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_detail_by_id(event_template_id, event_id, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str event_template_id: The event template ID. (required)
:param str event_id: The event ID. (required)
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: EventDetail
If the method is called asynchronously,
returns the request thread.
"""
kwargs["_return_http_data_only"] = True
return self.get_detail_by_id_with_http_info(
event_template_id, event_id, **kwargs
) # noqa: E501
def get_detail_by_id_with_http_info(
self, event_template_id, event_id, **kwargs
): # noqa: E501
"""Gets the detailTemplate as rendered # noqa: E501
This will take the `detailTemplate` from the event template and return an object rendering the specified event. If the template references `extraData` that isn't found in the event, it will be ignored and we'll render without it. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_detail_by_id_with_http_info(event_template_id, event_id, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str event_template_id: The event template ID. (required)
:param str event_id: The event ID. (required)
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(EventDetail, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ["event_template_id", "event_id"]
all_params.extend(
[
"async_req",
"_return_http_data_only",
"_preload_content",
"_request_timeout",
]
)
for key, val in six.iteritems(local_var_params["kwargs"]):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method get_detail_by_id" % key
)
local_var_params[key] = val
del local_var_params["kwargs"]
# verify the required parameter 'event_template_id' is set
if self.api_client.client_side_validation and (
"event_template_id" not in local_var_params
or local_var_params["event_template_id"] is None # noqa: E501
): # noqa: E501
raise ApiValueError(
"Missing the required parameter `event_template_id` when calling `get_detail_by_id`"
) # noqa: E501
# verify the required parameter 'event_id' is set
if self.api_client.client_side_validation and (
"event_id" not in local_var_params
or local_var_params["event_id"] is None # noqa: E501
): # noqa: E501
raise ApiValueError(
"Missing the required parameter `event_id` when calling `get_detail_by_id`"
) # noqa: E501
collection_formats = {}
path_params = {}
if "event_template_id" in local_var_params:
path_params["eventTemplateId"] = local_var_params[
"event_template_id"
] # noqa: E501
if "event_id" in local_var_params:
path_params["eventId"] = local_var_params["event_id"] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params["Accept"] = self.api_client.select_header_accept(
["application/json", "*/*"]
) # noqa: E501
# Authentication setting
auth_settings = ["oauth2"] # noqa: E501
return self.api_client.call_api(
"/crm/v3/timeline/events/{eventTemplateId}/{eventId}/detail",
"GET",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type="EventDetail", # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get("async_req"),
_return_http_data_only=local_var_params.get(
"_return_http_data_only"
), # noqa: E501
_preload_content=local_var_params.get("_preload_content", True),
_request_timeout=local_var_params.get("_request_timeout"),
collection_formats=collection_formats,
)
def get_render_by_id(self, event_template_id, event_id, **kwargs): # noqa: E501
"""Renders the header or detail as HTML # noqa: E501
This will take either the `headerTemplate` or `detailTemplate` from the event template and render for the specified event as HTML. If the template references `extraData` that isn't found in the event, it will be ignored and we'll render without it. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_render_by_id(event_template_id, event_id, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str event_template_id: The event template ID. (required)
:param str event_id: The event ID. (required)
:param bool detail: Set to 'true', we want to render the `detailTemplate` instead of the `headerTemplate`.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: str
If the method is called asynchronously,
returns the request thread.
"""
kwargs["_return_http_data_only"] = True
return self.get_render_by_id_with_http_info(
event_template_id, event_id, **kwargs
) # noqa: E501
def get_render_by_id_with_http_info(
self, event_template_id, event_id, **kwargs
): # noqa: E501
"""Renders the header or detail as HTML # noqa: E501
This will take either the `headerTemplate` or `detailTemplate` from the event template and render for the specified event as HTML. If the template references `extraData` that isn't found in the event, it will be ignored and we'll render without it. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_render_by_id_with_http_info(event_template_id, event_id, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str event_template_id: The event template ID. (required)
:param str event_id: The event ID. (required)
:param bool detail: Set to 'true', we want to render the `detailTemplate` instead of the `headerTemplate`.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(str, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ["event_template_id", "event_id", "detail"]
all_params.extend(
[
"async_req",
"_return_http_data_only",
"_preload_content",
"_request_timeout",
]
)
for key, val in six.iteritems(local_var_params["kwargs"]):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method get_render_by_id" % key
)
local_var_params[key] = val
del local_var_params["kwargs"]
# verify the required parameter 'event_template_id' is set
if self.api_client.client_side_validation and (
"event_template_id" not in local_var_params
or local_var_params["event_template_id"] is None # noqa: E501
): # noqa: E501
raise ApiValueError(
"Missing the required parameter `event_template_id` when calling `get_render_by_id`"
) # noqa: E501
# verify the required parameter 'event_id' is set
if self.api_client.client_side_validation and (
"event_id" not in local_var_params
or local_var_params["event_id"] is None # noqa: E501
): # noqa: E501
raise ApiValueError(
"Missing the required parameter `event_id` when calling `get_render_by_id`"
) # noqa: E501
collection_formats = {}
path_params = {}
if "event_template_id" in local_var_params:
path_params["eventTemplateId"] = local_var_params[
"event_template_id"
] # noqa: E501
if "event_id" in local_var_params:
path_params["eventId"] = local_var_params["event_id"] # noqa: E501
query_params = []
if (
"detail" in local_var_params and local_var_params["detail"] is not None
): # noqa: E501
query_params.append(("detail", local_var_params["detail"])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params["Accept"] = self.api_client.select_header_accept(
["text/html", "*/*"]
) # noqa: E501
# Authentication setting
auth_settings = ["oauth2"] # noqa: E501
return self.api_client.call_api(
"/crm/v3/timeline/events/{eventTemplateId}/{eventId}/render",
"GET",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type="str", # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get("async_req"),
_return_http_data_only=local_var_params.get(
"_return_http_data_only"
), # noqa: E501
_preload_content=local_var_params.get("_preload_content", True),
_request_timeout=local_var_params.get("_request_timeout"),
collection_formats=collection_formats,
)
| en | 0.698287 | # coding: utf-8 Timeline events This feature allows an app to create and configure custom events that can show up in the timelines of certain CRM objects like contacts, companies, tickets, or deals. You'll find multiple use cases for this API in the sections below. # noqa: E501 The version of the OpenAPI document: v3 Generated by: https://openapi-generator.tech # noqa: F401 # python 2 and python 3 compatibility library # noqa: F401 NOTE: This class is auto generated by OpenAPI Generator Ref: https://openapi-generator.tech Do not edit the class manually. # noqa: E501 Create a single event # noqa: E501 Creates an instance of a timeline event based on an event template. Once created, this event is immutable on the object timeline and cannot be modified. If the event template was configured to update object properties via `objectPropertyName`, this call will also attempt to updates those properties, or add them if they don't exist. # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.create(timeline_event, async_req=True) >>> result = thread.get() :param async_req bool: execute request asynchronously :param TimelineEvent timeline_event: The timeline event definition. (required) :param _preload_content: if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. :param _request_timeout: timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. :return: TimelineEventResponse If the method is called asynchronously, returns the request thread. # noqa: E501 # noqa: E501 Create a single event # noqa: E501 Creates an instance of a timeline event based on an event template. Once created, this event is immutable on the object timeline and cannot be modified. If the event template was configured to update object properties via `objectPropertyName`, this call will also attempt to updates those properties, or add them if they don't exist. # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.create_with_http_info(timeline_event, async_req=True) >>> result = thread.get() :param async_req bool: execute request asynchronously :param TimelineEvent timeline_event: The timeline event definition. (required) :param _return_http_data_only: response data without head status code and headers :param _preload_content: if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. :param _request_timeout: timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. :return: tuple(TimelineEventResponse, status_code(int), headers(HTTPHeaderDict)) If the method is called asynchronously, returns the request thread. # verify the required parameter 'timeline_event' is set # noqa: E501 # noqa: E501 # noqa: E501 # HTTP header `Accept` # noqa: E501 # HTTP header `Content-Type` # noqa: E501 # noqa: E501 # Authentication setting # noqa: E501 # noqa: E501 # noqa: E501 # noqa: E501 Creates multiple events # noqa: E501 Creates multiple instances of timeline events based on an event template. Once created, these event are immutable on the object timeline and cannot be modified. If the event template was configured to update object properties via `objectPropertyName`, this call will also attempt to updates those properties, or add them if they don't exist. # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.create_batch(batch_input_timeline_event, async_req=True) >>> result = thread.get() :param async_req bool: execute request asynchronously :param BatchInputTimelineEvent batch_input_timeline_event: The timeline event definition. (required) :param _preload_content: if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. :param _request_timeout: timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. :return: BatchResponseTimelineEventResponse If the method is called asynchronously, returns the request thread. # noqa: E501 # noqa: E501 Creates multiple events # noqa: E501 Creates multiple instances of timeline events based on an event template. Once created, these event are immutable on the object timeline and cannot be modified. If the event template was configured to update object properties via `objectPropertyName`, this call will also attempt to updates those properties, or add them if they don't exist. # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.create_batch_with_http_info(batch_input_timeline_event, async_req=True) >>> result = thread.get() :param async_req bool: execute request asynchronously :param BatchInputTimelineEvent batch_input_timeline_event: The timeline event definition. (required) :param _return_http_data_only: response data without head status code and headers :param _preload_content: if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. :param _request_timeout: timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. :return: tuple(BatchResponseTimelineEventResponse, status_code(int), headers(HTTPHeaderDict)) If the method is called asynchronously, returns the request thread. # verify the required parameter 'batch_input_timeline_event' is set # noqa: E501 # noqa: E501 # noqa: E501 # HTTP header `Accept` # noqa: E501 # HTTP header `Content-Type` # noqa: E501 # noqa: E501 # Authentication setting # noqa: E501 # noqa: E501 # noqa: E501 # noqa: E501 Gets the event # noqa: E501 This returns the previously created event. It contains all existing info for the event, but not necessarily the CRM object. # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.get_by_id(event_template_id, event_id, async_req=True) >>> result = thread.get() :param async_req bool: execute request asynchronously :param str event_template_id: The event template ID. (required) :param str event_id: The event ID. (required) :param _preload_content: if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. :param _request_timeout: timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. :return: TimelineEventResponse If the method is called asynchronously, returns the request thread. # noqa: E501 # noqa: E501 Gets the event # noqa: E501 This returns the previously created event. It contains all existing info for the event, but not necessarily the CRM object. # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.get_by_id_with_http_info(event_template_id, event_id, async_req=True) >>> result = thread.get() :param async_req bool: execute request asynchronously :param str event_template_id: The event template ID. (required) :param str event_id: The event ID. (required) :param _return_http_data_only: response data without head status code and headers :param _preload_content: if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. :param _request_timeout: timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. :return: tuple(TimelineEventResponse, status_code(int), headers(HTTPHeaderDict)) If the method is called asynchronously, returns the request thread. # verify the required parameter 'event_template_id' is set # noqa: E501 # noqa: E501 # noqa: E501 # verify the required parameter 'event_id' is set # noqa: E501 # noqa: E501 # noqa: E501 # noqa: E501 # noqa: E501 # HTTP header `Accept` # noqa: E501 # Authentication setting # noqa: E501 # noqa: E501 # noqa: E501 # noqa: E501 Gets the detailTemplate as rendered # noqa: E501 This will take the `detailTemplate` from the event template and return an object rendering the specified event. If the template references `extraData` that isn't found in the event, it will be ignored and we'll render without it. # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.get_detail_by_id(event_template_id, event_id, async_req=True) >>> result = thread.get() :param async_req bool: execute request asynchronously :param str event_template_id: The event template ID. (required) :param str event_id: The event ID. (required) :param _preload_content: if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. :param _request_timeout: timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. :return: EventDetail If the method is called asynchronously, returns the request thread. # noqa: E501 # noqa: E501 Gets the detailTemplate as rendered # noqa: E501 This will take the `detailTemplate` from the event template and return an object rendering the specified event. If the template references `extraData` that isn't found in the event, it will be ignored and we'll render without it. # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.get_detail_by_id_with_http_info(event_template_id, event_id, async_req=True) >>> result = thread.get() :param async_req bool: execute request asynchronously :param str event_template_id: The event template ID. (required) :param str event_id: The event ID. (required) :param _return_http_data_only: response data without head status code and headers :param _preload_content: if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. :param _request_timeout: timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. :return: tuple(EventDetail, status_code(int), headers(HTTPHeaderDict)) If the method is called asynchronously, returns the request thread. # verify the required parameter 'event_template_id' is set # noqa: E501 # noqa: E501 # noqa: E501 # verify the required parameter 'event_id' is set # noqa: E501 # noqa: E501 # noqa: E501 # noqa: E501 # noqa: E501 # HTTP header `Accept` # noqa: E501 # Authentication setting # noqa: E501 # noqa: E501 # noqa: E501 # noqa: E501 Renders the header or detail as HTML # noqa: E501 This will take either the `headerTemplate` or `detailTemplate` from the event template and render for the specified event as HTML. If the template references `extraData` that isn't found in the event, it will be ignored and we'll render without it. # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.get_render_by_id(event_template_id, event_id, async_req=True) >>> result = thread.get() :param async_req bool: execute request asynchronously :param str event_template_id: The event template ID. (required) :param str event_id: The event ID. (required) :param bool detail: Set to 'true', we want to render the `detailTemplate` instead of the `headerTemplate`. :param _preload_content: if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. :param _request_timeout: timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. :return: str If the method is called asynchronously, returns the request thread. # noqa: E501 # noqa: E501 Renders the header or detail as HTML # noqa: E501 This will take either the `headerTemplate` or `detailTemplate` from the event template and render for the specified event as HTML. If the template references `extraData` that isn't found in the event, it will be ignored and we'll render without it. # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.get_render_by_id_with_http_info(event_template_id, event_id, async_req=True) >>> result = thread.get() :param async_req bool: execute request asynchronously :param str event_template_id: The event template ID. (required) :param str event_id: The event ID. (required) :param bool detail: Set to 'true', we want to render the `detailTemplate` instead of the `headerTemplate`. :param _return_http_data_only: response data without head status code and headers :param _preload_content: if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. :param _request_timeout: timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. :return: tuple(str, status_code(int), headers(HTTPHeaderDict)) If the method is called asynchronously, returns the request thread. # verify the required parameter 'event_template_id' is set # noqa: E501 # noqa: E501 # noqa: E501 # verify the required parameter 'event_id' is set # noqa: E501 # noqa: E501 # noqa: E501 # noqa: E501 # noqa: E501 # noqa: E501 # noqa: E501 # HTTP header `Accept` # noqa: E501 # Authentication setting # noqa: E501 # noqa: E501 # noqa: E501 | 2.340574 | 2 |
example/random_.py | iamaris/ppf | 2 | 6632147 | import random, sys, getopt
def _print_gauss():
g = random.Random(1234)
print [g.gauss(mu = 0, sigma = 1) for i in range(100)]
def _print_lognormal_variate():
g = random.Random(1234)
print [g.lognormvariate(mu = 0, sigma = 1) for i in range(100)]
def _usage():
print "usage: %s" % sys.argv[0]
print "Try `python %s -h' for more information." % sys.argv[0]
def _help():
print "usage: %s" % sys.argv[0]
print "-h (--help) : print this help message and exit"
print "-v (--version) : print the version number and exit"
if __name__ == '__main__':
try:
opts, args, = getopt.getopt(sys.argv[1:], "vh", ["version", "help", ])
except getopt.GetoptError:
_usage()
sys.exit(2)
for o, a in opts:
if o in ("-h", "--help"):
_help()
sys.exit()
if o in ("-v", "--version"):
print "'%s', Version 0.0.0" % sys.argv[0]
sys.exit()
_print_gauss()
_print_lognormal_variate()
| import random, sys, getopt
def _print_gauss():
g = random.Random(1234)
print [g.gauss(mu = 0, sigma = 1) for i in range(100)]
def _print_lognormal_variate():
g = random.Random(1234)
print [g.lognormvariate(mu = 0, sigma = 1) for i in range(100)]
def _usage():
print "usage: %s" % sys.argv[0]
print "Try `python %s -h' for more information." % sys.argv[0]
def _help():
print "usage: %s" % sys.argv[0]
print "-h (--help) : print this help message and exit"
print "-v (--version) : print the version number and exit"
if __name__ == '__main__':
try:
opts, args, = getopt.getopt(sys.argv[1:], "vh", ["version", "help", ])
except getopt.GetoptError:
_usage()
sys.exit(2)
for o, a in opts:
if o in ("-h", "--help"):
_help()
sys.exit()
if o in ("-v", "--version"):
print "'%s', Version 0.0.0" % sys.argv[0]
sys.exit()
_print_gauss()
_print_lognormal_variate()
| none | 1 | 2.682816 | 3 |
|
src/alembic_utils/depends.py | tdamsma/alembic_utils | 74 | 6632148 | import logging
from contextlib import contextmanager
from typing import Generator, List
from sqlalchemy import exc as sqla_exc
from sqlalchemy.orm import Session
from alembic_utils.simulate import simulate_entity
logger = logging.getLogger(__name__)
def solve_resolution_order(sess: Session, entities):
"""Solve for an entity resolution order that increases the probability that
a migration will suceed if, for example, two new views are created and one
refers to the other
This strategy will only solve for simple cases
"""
resolved = []
# Resolve the entities with 0 dependencies first (faster)
logger.info("Resolving entities with no dependencies")
for entity in entities:
try:
with simulate_entity(sess, entity):
resolved.append(entity)
except (sqla_exc.ProgrammingError, sqla_exc.InternalError) as exc:
continue
# Resolve entities with possible dependencies
for _ in range(len(entities)):
logger.info("Resolving entities with dependencies. This may take a minute")
n_resolved = len(resolved)
for entity in entities:
if entity in resolved:
continue
try:
with simulate_entity(sess, entity, dependencies=resolved):
resolved.append(entity)
except (sqla_exc.ProgrammingError, sqla_exc.InternalError):
continue
if len(resolved) == n_resolved:
# No new entities resolved in the last iteration. Exit
break
for entity in entities:
if entity not in resolved:
resolved.append(entity)
return resolved
@contextmanager
def recreate_dropped(connection) -> Generator[Session, None, None]:
"""Recreate any dropped all ReplaceableEntities that were dropped within block
This is useful for making cascading updates. For example, updating a table's column type when it has dependent views.
def upgrade() -> None:
my_view = PGView(...)
with recreate_dropped(op.get_bind()) as conn:
op.drop_entity(my_view)
# change an integer column to a bigint
op.alter_column(
table_name="account",
column_name="id",
schema="public"
type_=sa.BIGINT()
existing_type=sa.Integer(),
)
"""
from alembic_utils.pg_function import PGFunction
from alembic_utils.pg_materialized_view import PGMaterializedView
from alembic_utils.pg_trigger import PGTrigger
from alembic_utils.pg_view import PGView
from alembic_utils.replaceable_entity import ReplaceableEntity
# Do not include permissions here e.g. PGGrantTable. If columns granted to users are dropped, it will cause an error
def collect_all_db_entities(sess: Session) -> List[ReplaceableEntity]:
"""Collect all entities from the database"""
return [
*PGFunction.from_database(sess, "%"),
*PGTrigger.from_database(sess, "%"),
*PGView.from_database(sess, "%"),
*PGMaterializedView.from_database(sess, "%"),
]
sess = Session(bind=connection)
# All existing entities, before the upgrade
before = collect_all_db_entities(sess)
# In the yield, do a
# op.drop_entity(my_mat_view, cascade=True)
# op.create_entity(my_mat_view)
try:
yield sess
except:
sess.rollback()
raise
# All existing entities, after the upgrade
after = collect_all_db_entities(sess)
after_identities = {x.identity for x in after}
# Entities that were not impacted, or that we have "recovered"
resolved = []
unresolved = []
# First, ignore the ones that were not impacted by the upgrade
for ent in before:
if ent.identity in after_identities:
resolved.append(ent)
else:
unresolved.append(ent)
# Attempt to find an acceptable order of creation for the unresolved entities
ordered_unresolved = solve_resolution_order(sess, unresolved)
# Attempt to recreate the missing entities in the specified order
for ent in ordered_unresolved:
sess.execute(ent.to_sql_statement_create())
# Sanity check that everything is now fine
sanity_check = collect_all_db_entities(sess)
# Fail and rollback if the sanity check is wrong
try:
assert len(before) == len(sanity_check)
except:
sess.rollback()
raise
# Close out the session
sess.commit()
| import logging
from contextlib import contextmanager
from typing import Generator, List
from sqlalchemy import exc as sqla_exc
from sqlalchemy.orm import Session
from alembic_utils.simulate import simulate_entity
logger = logging.getLogger(__name__)
def solve_resolution_order(sess: Session, entities):
"""Solve for an entity resolution order that increases the probability that
a migration will suceed if, for example, two new views are created and one
refers to the other
This strategy will only solve for simple cases
"""
resolved = []
# Resolve the entities with 0 dependencies first (faster)
logger.info("Resolving entities with no dependencies")
for entity in entities:
try:
with simulate_entity(sess, entity):
resolved.append(entity)
except (sqla_exc.ProgrammingError, sqla_exc.InternalError) as exc:
continue
# Resolve entities with possible dependencies
for _ in range(len(entities)):
logger.info("Resolving entities with dependencies. This may take a minute")
n_resolved = len(resolved)
for entity in entities:
if entity in resolved:
continue
try:
with simulate_entity(sess, entity, dependencies=resolved):
resolved.append(entity)
except (sqla_exc.ProgrammingError, sqla_exc.InternalError):
continue
if len(resolved) == n_resolved:
# No new entities resolved in the last iteration. Exit
break
for entity in entities:
if entity not in resolved:
resolved.append(entity)
return resolved
@contextmanager
def recreate_dropped(connection) -> Generator[Session, None, None]:
"""Recreate any dropped all ReplaceableEntities that were dropped within block
This is useful for making cascading updates. For example, updating a table's column type when it has dependent views.
def upgrade() -> None:
my_view = PGView(...)
with recreate_dropped(op.get_bind()) as conn:
op.drop_entity(my_view)
# change an integer column to a bigint
op.alter_column(
table_name="account",
column_name="id",
schema="public"
type_=sa.BIGINT()
existing_type=sa.Integer(),
)
"""
from alembic_utils.pg_function import PGFunction
from alembic_utils.pg_materialized_view import PGMaterializedView
from alembic_utils.pg_trigger import PGTrigger
from alembic_utils.pg_view import PGView
from alembic_utils.replaceable_entity import ReplaceableEntity
# Do not include permissions here e.g. PGGrantTable. If columns granted to users are dropped, it will cause an error
def collect_all_db_entities(sess: Session) -> List[ReplaceableEntity]:
"""Collect all entities from the database"""
return [
*PGFunction.from_database(sess, "%"),
*PGTrigger.from_database(sess, "%"),
*PGView.from_database(sess, "%"),
*PGMaterializedView.from_database(sess, "%"),
]
sess = Session(bind=connection)
# All existing entities, before the upgrade
before = collect_all_db_entities(sess)
# In the yield, do a
# op.drop_entity(my_mat_view, cascade=True)
# op.create_entity(my_mat_view)
try:
yield sess
except:
sess.rollback()
raise
# All existing entities, after the upgrade
after = collect_all_db_entities(sess)
after_identities = {x.identity for x in after}
# Entities that were not impacted, or that we have "recovered"
resolved = []
unresolved = []
# First, ignore the ones that were not impacted by the upgrade
for ent in before:
if ent.identity in after_identities:
resolved.append(ent)
else:
unresolved.append(ent)
# Attempt to find an acceptable order of creation for the unresolved entities
ordered_unresolved = solve_resolution_order(sess, unresolved)
# Attempt to recreate the missing entities in the specified order
for ent in ordered_unresolved:
sess.execute(ent.to_sql_statement_create())
# Sanity check that everything is now fine
sanity_check = collect_all_db_entities(sess)
# Fail and rollback if the sanity check is wrong
try:
assert len(before) == len(sanity_check)
except:
sess.rollback()
raise
# Close out the session
sess.commit()
| en | 0.86706 | Solve for an entity resolution order that increases the probability that a migration will suceed if, for example, two new views are created and one refers to the other This strategy will only solve for simple cases # Resolve the entities with 0 dependencies first (faster) # Resolve entities with possible dependencies # No new entities resolved in the last iteration. Exit Recreate any dropped all ReplaceableEntities that were dropped within block This is useful for making cascading updates. For example, updating a table's column type when it has dependent views. def upgrade() -> None: my_view = PGView(...) with recreate_dropped(op.get_bind()) as conn: op.drop_entity(my_view) # change an integer column to a bigint op.alter_column( table_name="account", column_name="id", schema="public" type_=sa.BIGINT() existing_type=sa.Integer(), ) # Do not include permissions here e.g. PGGrantTable. If columns granted to users are dropped, it will cause an error Collect all entities from the database # All existing entities, before the upgrade # In the yield, do a # op.drop_entity(my_mat_view, cascade=True) # op.create_entity(my_mat_view) # All existing entities, after the upgrade # Entities that were not impacted, or that we have "recovered" # First, ignore the ones that were not impacted by the upgrade # Attempt to find an acceptable order of creation for the unresolved entities # Attempt to recreate the missing entities in the specified order # Sanity check that everything is now fine # Fail and rollback if the sanity check is wrong # Close out the session | 2.307106 | 2 |
gltf2usd/_gltf2usd/gltf2/Animation.py | beta-uy/gltf2usd | 0 | 6632149 | <gh_stars>0
from bisect import bisect_left
from ..gltf2usdUtils import GLTF2USDUtils
from pxr import Gf
class AnimationSampler:
def __init__(self, sampler_entry, animation):
self._animation = animation
self._input_accessor_index = sampler_entry['input']
self._input_accessor = self._animation._gltf_loader.json_data['accessors'][self._input_accessor_index]
self._interpolation = sampler_entry['interpolation'] if ('interpolation' in sampler_entry) else 'LINEAR'
self._output_accessor_index = sampler_entry['output']
self._output_accessor = self._animation._gltf_loader.json_data['accessors'][self._output_accessor_index]
self._input_count = self._input_accessor['count']
self._input_min = self._input_accessor['min']
self._input_max = self._input_accessor['max']
self._output_count = self._output_accessor['count']
self._output_min = self._output_accessor['min'] if ('min' in self._output_accessor) else None
self._output_max = self._output_accessor['max'] if ('max' in self._output_accessor) else None
self._input_data = None
self._output_data = None
self._input_data = None
self._output_data = None
def get_input_count(self):
return self._input_count
def get_input_min(self):
return self._input_min
def get_input_max(self):
return self._input_max
def get_output_count(self):
return self._output_count
def get_output_min(self):
return self._output_min
def get_output_max(self):
return self._output_max
def get_input_data(self):
if not self._input_data:
accessor = self._animation._gltf_loader.json_data['accessors'][self._input_accessor_index]
self._input_data = self._animation._gltf_loader.get_data(accessor)
return self._input_data
def get_output_data(self):
if not self._output_data:
accessor = self._animation._gltf_loader.json_data['accessors'][self._output_accessor_index]
self._output_data = self._animation._gltf_loader.get_data(accessor)
return self._output_data
def get_interpolated_output_data(self, input_sample):
input_data = self.get_input_data()
output_data = self.get_output_data()
closest_pos = bisect_left(input_data, input_sample)
if closest_pos == 0:
value = output_data[0]
if len(value) == 4:
return Gf.Quatf(value[3], value[0], value[1], value[2])
else:
return value
elif closest_pos == len(input_data):
value = output_data[-1]
if len(value) == 4:
return Gf.Quatf(value[3], value[0], value[1], value[2])
else:
return value
else:
left_output_sample = output_data[closest_pos - 1]
right_output_sample = output_data[closest_pos]
factor = float(input_sample - input_data[closest_pos-1])/(input_data[closest_pos] - input_data[closest_pos - 1])
if self._interpolation == 'LINEAR':
return self._linear_interpolate_values(left_output_sample, right_output_sample, factor)
elif self._interpolation == 'STEP':
return self._step_interpolate_values(left_output_sample, right_output_sample, factor)
else:
print('cubic spline interpolation not yet implemented! Defaulting to linear for now...')
return self._linear_interpolate_values(left_output_sample, right_output_sample, factor)
def _linear_interpolate_values(self, value0, value1, factor):
if len(value0) == 3:
one_minus_factor = 1 - factor
#translation or scale interpolation
return [
(factor * value0[0] + (one_minus_factor * value1[0])),
(factor * value0[1] + (one_minus_factor * value1[1])),
(factor * value0[2] + (one_minus_factor * value1[2]))
]
elif len(value0) == 4:
#quaternion interpolation
result = GLTF2USDUtils.slerp(value0, value1, factor)
return result
else:
raise Exception('unsupported value type')
def _step_interpolate_values(self, value0, value1, factor):
if len(value0) == 3:
#translation or scale interpolation
return value0
elif len(value0) == 4:
#quaternion interpolation
return Gf.Quatf(value0[3], value0[0], value0[1], value0[2])
else:
raise Exception('unsupported value type')
class AnimationChannelTarget:
def __init__(self, animation_channel_target_entry):
self._node_index = animation_channel_target_entry['node']
self._path = animation_channel_target_entry['path']
@property
def path(self):
return self._path
class AnimationChannel:
def __init__(self, channel_entry, animation):
self._sampler_index = channel_entry['sampler']
self._target = AnimationChannelTarget(channel_entry['target'])
self._animation = animation
@property
def target(self):
return self._target
def get_sampler_index(self):
return self._sampler_index
@property
def sampler(self):
return self._animation._samplers[self._sampler_index]
class Animation:
def __init__(self, animation_entry, index, gltf_loader):
self._gltf_loader = gltf_loader
self._name = animation_entry['name'] if ('name' in animation_entry) else 'animation_{}'.format(index)
self._samplers = [AnimationSampler(sampler, self) for sampler in animation_entry['samplers']]
self._channels = [AnimationChannel(channel, self) for channel in animation_entry['channels']]
def get_animation_channel_for_node_and_path(self, node, path):
for channel in self._channels:
if (channel._target._node_index == node.get_index() and channel._target._path == path):
return channel
return None
def get_animation_channels_for_node(self, node):
return [channel for channel in self._channels if (channel._target._node_index == node.get_index())]
def get_channels(self):
return self._channels
def get_samplers(self):
return self._samplers
def get_sampler_at_index(self, index):
return self._samplers[index]
| from bisect import bisect_left
from ..gltf2usdUtils import GLTF2USDUtils
from pxr import Gf
class AnimationSampler:
def __init__(self, sampler_entry, animation):
self._animation = animation
self._input_accessor_index = sampler_entry['input']
self._input_accessor = self._animation._gltf_loader.json_data['accessors'][self._input_accessor_index]
self._interpolation = sampler_entry['interpolation'] if ('interpolation' in sampler_entry) else 'LINEAR'
self._output_accessor_index = sampler_entry['output']
self._output_accessor = self._animation._gltf_loader.json_data['accessors'][self._output_accessor_index]
self._input_count = self._input_accessor['count']
self._input_min = self._input_accessor['min']
self._input_max = self._input_accessor['max']
self._output_count = self._output_accessor['count']
self._output_min = self._output_accessor['min'] if ('min' in self._output_accessor) else None
self._output_max = self._output_accessor['max'] if ('max' in self._output_accessor) else None
self._input_data = None
self._output_data = None
self._input_data = None
self._output_data = None
def get_input_count(self):
return self._input_count
def get_input_min(self):
return self._input_min
def get_input_max(self):
return self._input_max
def get_output_count(self):
return self._output_count
def get_output_min(self):
return self._output_min
def get_output_max(self):
return self._output_max
def get_input_data(self):
if not self._input_data:
accessor = self._animation._gltf_loader.json_data['accessors'][self._input_accessor_index]
self._input_data = self._animation._gltf_loader.get_data(accessor)
return self._input_data
def get_output_data(self):
if not self._output_data:
accessor = self._animation._gltf_loader.json_data['accessors'][self._output_accessor_index]
self._output_data = self._animation._gltf_loader.get_data(accessor)
return self._output_data
def get_interpolated_output_data(self, input_sample):
input_data = self.get_input_data()
output_data = self.get_output_data()
closest_pos = bisect_left(input_data, input_sample)
if closest_pos == 0:
value = output_data[0]
if len(value) == 4:
return Gf.Quatf(value[3], value[0], value[1], value[2])
else:
return value
elif closest_pos == len(input_data):
value = output_data[-1]
if len(value) == 4:
return Gf.Quatf(value[3], value[0], value[1], value[2])
else:
return value
else:
left_output_sample = output_data[closest_pos - 1]
right_output_sample = output_data[closest_pos]
factor = float(input_sample - input_data[closest_pos-1])/(input_data[closest_pos] - input_data[closest_pos - 1])
if self._interpolation == 'LINEAR':
return self._linear_interpolate_values(left_output_sample, right_output_sample, factor)
elif self._interpolation == 'STEP':
return self._step_interpolate_values(left_output_sample, right_output_sample, factor)
else:
print('cubic spline interpolation not yet implemented! Defaulting to linear for now...')
return self._linear_interpolate_values(left_output_sample, right_output_sample, factor)
def _linear_interpolate_values(self, value0, value1, factor):
if len(value0) == 3:
one_minus_factor = 1 - factor
#translation or scale interpolation
return [
(factor * value0[0] + (one_minus_factor * value1[0])),
(factor * value0[1] + (one_minus_factor * value1[1])),
(factor * value0[2] + (one_minus_factor * value1[2]))
]
elif len(value0) == 4:
#quaternion interpolation
result = GLTF2USDUtils.slerp(value0, value1, factor)
return result
else:
raise Exception('unsupported value type')
def _step_interpolate_values(self, value0, value1, factor):
if len(value0) == 3:
#translation or scale interpolation
return value0
elif len(value0) == 4:
#quaternion interpolation
return Gf.Quatf(value0[3], value0[0], value0[1], value0[2])
else:
raise Exception('unsupported value type')
class AnimationChannelTarget:
def __init__(self, animation_channel_target_entry):
self._node_index = animation_channel_target_entry['node']
self._path = animation_channel_target_entry['path']
@property
def path(self):
return self._path
class AnimationChannel:
def __init__(self, channel_entry, animation):
self._sampler_index = channel_entry['sampler']
self._target = AnimationChannelTarget(channel_entry['target'])
self._animation = animation
@property
def target(self):
return self._target
def get_sampler_index(self):
return self._sampler_index
@property
def sampler(self):
return self._animation._samplers[self._sampler_index]
class Animation:
def __init__(self, animation_entry, index, gltf_loader):
self._gltf_loader = gltf_loader
self._name = animation_entry['name'] if ('name' in animation_entry) else 'animation_{}'.format(index)
self._samplers = [AnimationSampler(sampler, self) for sampler in animation_entry['samplers']]
self._channels = [AnimationChannel(channel, self) for channel in animation_entry['channels']]
def get_animation_channel_for_node_and_path(self, node, path):
for channel in self._channels:
if (channel._target._node_index == node.get_index() and channel._target._path == path):
return channel
return None
def get_animation_channels_for_node(self, node):
return [channel for channel in self._channels if (channel._target._node_index == node.get_index())]
def get_channels(self):
return self._channels
def get_samplers(self):
return self._samplers
def get_sampler_at_index(self, index):
return self._samplers[index] | en | 0.257953 | #translation or scale interpolation #quaternion interpolation #translation or scale interpolation #quaternion interpolation | 2.288265 | 2 |
tests/conftest.py | cardano-community/koios-artifacts | 1 | 6632150 | #!/usr/bin/env python3
import pytest
import schemathesis
def pytest_addoption(parser):
parser.addoption(
"--local-url", action="store", default="http://127.0.0.1:8053/api/v0"
)
parser.addoption(
"--compare-url", action="store", default="https://guild.koios.rest/api/v0"
)
parser.addoption(
"--api-schema-file",
action="store",
default="../specs/results/koiosapi-guild.yaml",
)
@pytest.fixture
def local_url(request):
return request.config.getoption("--local-url")
@pytest.fixture
def compare_url(request):
return request.config.getoption("--compare-url")
@pytest.fixture
def api_schema(request):
schema = schemathesis.from_path(request.config.getoption("--api-schema-file"))
return schema
| #!/usr/bin/env python3
import pytest
import schemathesis
def pytest_addoption(parser):
parser.addoption(
"--local-url", action="store", default="http://127.0.0.1:8053/api/v0"
)
parser.addoption(
"--compare-url", action="store", default="https://guild.koios.rest/api/v0"
)
parser.addoption(
"--api-schema-file",
action="store",
default="../specs/results/koiosapi-guild.yaml",
)
@pytest.fixture
def local_url(request):
return request.config.getoption("--local-url")
@pytest.fixture
def compare_url(request):
return request.config.getoption("--compare-url")
@pytest.fixture
def api_schema(request):
schema = schemathesis.from_path(request.config.getoption("--api-schema-file"))
return schema
| fr | 0.221828 | #!/usr/bin/env python3 | 2.017432 | 2 |
Subsets and Splits