filename
stringlengths 4
198
| content
stringlengths 25
939k
| environment
list | variablearg
list | constarg
list | variableargjson
stringclasses 1
value | constargjson
stringlengths 2
3.9k
| lang
stringclasses 3
values | constargcount
float64 0
129
⌀ | variableargcount
float64 0
0
⌀ | sentence
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|
docs/conf.py
|
# Toil documentation build configuration file, created by
# sphinx-quickstart on Tue Aug 25 12:37:16 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import inspect
import re
from datetime import datetime
import toil.version
import time
# This makes the modules located in docs/vendor/sphinxcontrib available to import
sphinxPath = os.path.abspath(os.path.join(os.path.pardir, os.path.dirname('docs/vendor/sphinxcontrib/')))
sys.path.append(sphinxPath)
import fulltoc
def fetch_parent_dir(filepath: str, n: int = 1):
'''
Returns a parent directory, n places above the input filepath.
Equivalent to something like: '/home/user/dir'.split('/')[-2] if n=2.
'''
filepath = os.path.realpath(filepath)
for i in range(n):
filepath = os.path.dirname(filepath)
return os.path.basename(filepath)
path_to_dir = os.path.dirname(os.path.abspath(__file__))
# Example of toil.version.__file__ on sphinx:
# /home/docs/checkouts/readthedocs.org/user_builds/toil/envs/3.13.0/local/lib/python2.7/site-packages/toil-3.13.0a1-py2.7.egg/toil/version.pyc
envPath = os.path.abspath(toil.version.__file__)
# Example of __file__ on sphinx:
# /home/docs/checkouts/readthedocs.org/user_builds/toil/checkouts/3.13.0/docs/conf.py
wdPath_version = fetch_parent_dir(__file__, 2)
# Expected output: 3.13.0
assert wdPath_version in envPath, "Another Toil installation seems to have precedence over this working directory."
toilVersion = toil.version.baseVersion
# -- General configuration ------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.viewcode',
'sphinx.ext.intersphinx',
'fulltoc',
]
intersphinx_mapping = {'python': ('https://docs.python.org/3', None),}
# Link definitions available everywhere so we don't need to keep repeating ourselves.
rst_epilog = """
.. _Common Workflow Language: http://www.commonwl.org/
"""
def skip(app, what, name, obj, skip, options):
return name != '__init__' and (skip
or inspect.isclass(obj)
or name.startswith('_') and not inspect.ismodule(obj))
def setup(app):
app.connect('autodoc-skip-member', skip)
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames. Specify multiple suffix as list of string:
# Example: source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'Toil'
build_date = datetime.utcfromtimestamp(int(os.environ.get('SOURCE_DATE_EPOCH', time.time())))
copyright = f'2015 – {build_date.year} UCSC Computational Genomics Lab'
author = 'UCSC Computational Genomics Lab'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = re.split('[A-Za-z]', toilVersion)[0]
# The full version, including alpha/beta/rc tags.
release = toilVersion
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, 2do and 2doList produce output, else they produce nothing.
todo_include_todos = True
# Include doc string for __init__ method in the documentation
autoclass_content = 'class'
autodoc_member_order = 'bysource'
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'alabaster'
html_theme_options = {
"github_banner": True,
"github_user": "BD2KGenomics",
"github_repo": "toil",
"caption_font_size": "24px"
}
# The name of an image file (relative to this directory) to place at the top of the sidebar.
html_logo = "_static/logo.png"
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = "_static/favicon.ico"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If true, links to the reST sources are added to the pages.
html_show_sourcelink = False
# Output file base name for HTML help builder.
htmlhelp_basename = 'Toildoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto, manual, or own class]).
latex_documents = [(master_doc, 'Toil.tex', 'Toil Documentation', author, 'manual')]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [(master_doc, 'toil', 'Toil Documentation', [author], 1)]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author, dir menu entry, description, category)
texinfo_documents = [(master_doc, project, 'Toil Documentation', author, project, project, 'Miscellaneous')]
|
[] |
[] |
[
"SOURCE_DATE_EPOCH"
] |
[]
|
["SOURCE_DATE_EPOCH"]
|
python
| 1 | 0 | |
plugin/lighthouse/util/log.py
|
import os
import sys
import logging
from .misc import makedirs
from .disassembler import disassembler
#------------------------------------------------------------------------------
# Log / Print helpers
#------------------------------------------------------------------------------
def lmsg(message):
"""
Print a message to the disassembler output window, prefixed with [Lighthouse]
"""
# prefix the message
prefix_message = "[Lighthouse] %s" % message
# only print to disassembler if its output window is alive
if disassembler.is_msg_inited():
disassembler.message(prefix_message)
else:
logger.info(message)
def get_log_dir():
"""
Return the Lighthouse log directory.
"""
log_directory = os.path.join(
disassembler.get_disassembler_user_directory(),
"lighthouse_logs"
)
return log_directory
def logging_started():
"""
Check if logging has been started.
"""
return 'logger' in globals()
#------------------------------------------------------------------------------
# Logger Proxy
#------------------------------------------------------------------------------
class LoggerProxy(object):
"""
Fake file-like stream object that redirects writes to a logger instance.
"""
def __init__(self, logger, stream, log_level=logging.INFO):
self._logger = logger
self._log_level = log_level
self._stream = stream
def write(self, buf):
for line in buf.rstrip().splitlines():
self._logger.log(self._log_level, line.rstrip())
if self._stream:
self._stream.write(buf)
def flush(self):
pass
def isatty(self):
pass
#------------------------------------------------------------------------------
# Initialize Logging
#------------------------------------------------------------------------------
MAX_LOGS = 10
def cleanup_log_directory(log_directory):
"""
Retain only the last 15 logs.
"""
filetimes = {}
# build a map of all the files in the directory, and their last modified time
for log_name in os.listdir(log_directory):
filepath = os.path.join(log_directory, log_name)
if os.path.isfile(filepath):
filetimes[os.path.getmtime(filepath)] = filepath
# get the filetimes and check if there's enough to warrant cleanup
times = list(filetimes.keys())
if len(times) < MAX_LOGS:
return
logger.debug("Cleaning logs directory")
# discard the newest 15 logs
times.sort(reverse=True)
times = times[MAX_LOGS:]
# loop through the remaining older logs, and delete them
for log_time in times:
try:
os.remove(filetimes[log_time])
except Exception as e:
logger.error("Failed to delete log %s" % filetimes[log_time])
logger.error(e)
def start_logging():
global logger
# create the Lighthouse logger
logger = logging.getLogger("Lighthouse")
#
# only enable logging if the LIGHTHOUSE_LOGGING environment variable is
# present. we simply return a stub logger to sinkhole messages.
#
# NOTE / v0.9.0: logging is enabled by default for now...
#
#if os.getenv("LIGHTHOUSE_LOGGING") == None:
# logger.disabled = True
# return logger
# create a directory for lighthouse logs if it does not exist
log_dir = get_log_dir()
try:
makedirs(log_dir)
except Exception as e:
logger.disabled = True
return logger
# construct the full log path
log_path = os.path.join(log_dir, "lighthouse.%s.log" % os.getpid())
# config the logger
logging.basicConfig(
filename=log_path,
format='%(asctime)s | %(name)28s | %(levelname)7s: %(message)s',
datefmt='%m-%d-%Y %H:%M:%S',
level=logging.DEBUG
)
# proxy STDOUT/STDERR to the log files too
stdout_logger = logging.getLogger('Lighthouse.STDOUT')
stderr_logger = logging.getLogger('Lighthouse.STDERR')
sys.stdout = LoggerProxy(stdout_logger, sys.stdout, logging.INFO)
sys.stderr = LoggerProxy(stderr_logger, sys.stderr, logging.ERROR)
# limit the number of logs we keep
cleanup_log_directory(log_dir)
return logger
#------------------------------------------------------------------------------
# Log Helpers
#------------------------------------------------------------------------------
def log_config_warning(self, logger, section, field):
logger.warning("Config missing field '%s' in section '%s", field, section)
|
[] |
[] |
[
"LIGHTHOUSE_LOGGING"
] |
[]
|
["LIGHTHOUSE_LOGGING"]
|
python
| 1 | 0 | |
roadscene2vec/data/gen/scenario_runner/srunner/challenge/challenge_statistics_manager.py
|
#!/usr/bin/env python
# Copyright (c) 2018-2019 Intel Corporation
#
# This work is licensed under the terms of the MIT license.
# For a copy, see <https://opensource.org/licenses/MIT>.
"""
This module contains a statistics manager for the CARLA AD challenge
"""
from __future__ import print_function
import json
import os
import py_trees
from srunner.scenariomanager.traffic_events import TrafficEventType
PENALTY_COLLISION_STATIC = 6
PENALTY_COLLISION_VEHICLE = 6
PENALTY_COLLISION_PEDESTRIAN = 9
PENALTY_TRAFFIC_LIGHT = 3
PENALTY_WRONG_WAY = 2
PENALTY_SIDEWALK_INVASION = 2
PENALTY_STOP = 2
class ChallengeStatisticsManager(object):
"""
This is the statistics manager for the CARLA AD challenge
It gathers data at runtime via the scenario evaluation criteria and
provides the final results as json output.
Note: The class is purely static
"""
system_error = None
error_message = ""
n_routes = 1
statistics_routes = []
current_route_score = 0
current_penalty = 0
list_collisions = []
list_red_lights = []
list_wrong_way = []
list_route_dev = []
list_sidewalk_inv = []
list_stop_inf = []
@staticmethod
def set_number_of_scenarios(number):
"""
Set the total number of scenarios
"""
ChallengeStatisticsManager.n_routes = number
@staticmethod
def next_scenario(scenario):
"""
Update the scenario to the next executed scenario
"""
ChallengeStatisticsManager.scenario = scenario
ChallengeStatisticsManager.error_message = ""
@staticmethod
def record_fatal_error(error_message):
"""
Record the statistics in case of a fatal error (All scores = 0)
"""
result = "ERROR"
score_composed = 0.0
score_penalty = 0.0
score_route = 0.0
ChallengeStatisticsManager.system_error = True
return_message = error_message
return_message += "\n=================================="
current_statistics = {'id': -1,
'score_composed': score_composed,
'score_route': score_route,
'score_penalty': score_penalty,
'result': result,
'help_text': return_message
}
ChallengeStatisticsManager.statistics_routes.append(current_statistics)
@staticmethod
def set_error_message(message):
"""
Set the error message to the provided message
"""
ChallengeStatisticsManager.error_message = message
@staticmethod
def compute_current_statistics():
"""
Compute the current statistics by evaluating all relevant scenario criteria
"""
target_reached = False
score_penalty = 0.0
score_route = 0.0
list_traffic_events = []
ChallengeStatisticsManager.list_collisions = []
ChallengeStatisticsManager.list_red_lights = []
ChallengeStatisticsManager.list_wrong_way = []
ChallengeStatisticsManager.list_route_dev = []
ChallengeStatisticsManager.list_sidewalk_inv = []
ChallengeStatisticsManager.list_stop_inf = []
for node in ChallengeStatisticsManager.scenario.get_criteria():
if node.list_traffic_events:
list_traffic_events.extend(node.list_traffic_events)
# analyze all traffic events
for event in list_traffic_events:
if event.get_type() == TrafficEventType.COLLISION_STATIC:
score_penalty += PENALTY_COLLISION_STATIC
msg = event.get_message()
if msg:
ChallengeStatisticsManager.list_collisions.append(event.get_message())
elif event.get_type() == TrafficEventType.COLLISION_VEHICLE:
score_penalty += PENALTY_COLLISION_VEHICLE
msg = event.get_message()
if msg:
ChallengeStatisticsManager.list_collisions.append(event.get_message())
elif event.get_type() == TrafficEventType.COLLISION_PEDESTRIAN:
score_penalty += PENALTY_COLLISION_PEDESTRIAN
msg = event.get_message()
if msg:
ChallengeStatisticsManager.list_collisions.append(event.get_message())
elif event.get_type() == TrafficEventType.TRAFFIC_LIGHT_INFRACTION:
score_penalty += PENALTY_TRAFFIC_LIGHT
msg = event.get_message()
if msg:
ChallengeStatisticsManager.list_red_lights.append(event.get_message())
elif event.get_type() == TrafficEventType.WRONG_WAY_INFRACTION:
score_penalty += PENALTY_WRONG_WAY
msg = event.get_message()
if msg:
ChallengeStatisticsManager.list_wrong_way.append(event.get_message())
elif event.get_type() == TrafficEventType.ROUTE_DEVIATION:
msg = event.get_message()
if msg:
ChallengeStatisticsManager.list_route_dev.append(event.get_message())
elif event.get_type() == TrafficEventType.ON_SIDEWALK_INFRACTION:
score_penalty += PENALTY_SIDEWALK_INVASION
msg = event.get_message()
if msg:
ChallengeStatisticsManager.list_sidewalk_inv.append(event.get_message())
elif event.get_type() == TrafficEventType.STOP_INFRACTION:
score_penalty += PENALTY_STOP
msg = event.get_message()
if msg:
ChallengeStatisticsManager.list_stop_inf.append(event.get_message())
elif event.get_type() == TrafficEventType.ROUTE_COMPLETED:
score_route = 100.0
target_reached = True
elif event.get_type() == TrafficEventType.ROUTE_COMPLETION:
if not target_reached:
if event.get_dict():
score_route = event.get_dict()['route_completed']
else:
score_route = 0
ChallengeStatisticsManager.current_route_score = score_route
ChallengeStatisticsManager.current_penalty = score_penalty
print("Current Score: {}/{}".format(score_route, score_penalty))
@staticmethod
def record_scenario_statistics():
"""
Record the statistics of the current scenario (route)
In case of scenario failure, the last data gathered by compute_current_statistics will be used.
"""
failure = False
result = "SUCCESS"
score_composed = 0.0
return_message = ""
route_id = ChallengeStatisticsManager.scenario.name.split('_')[1]
if ChallengeStatisticsManager.error_message == "":
for criterion in ChallengeStatisticsManager.scenario.get_criteria():
if criterion.status == py_trees.common.Status.FAILURE:
failure = True
result = "FAILURE"
break
if ChallengeStatisticsManager.scenario.timeout and not failure:
result = "TIMEOUT"
ChallengeStatisticsManager.compute_current_statistics()
else:
result = "CRASH"
return_message += "\n=================================="
return_message += "\nCrash message: {}".format(ChallengeStatisticsManager.error_message)
return_message += "\n=================================="
score_composed = max(
ChallengeStatisticsManager.current_route_score - ChallengeStatisticsManager.current_penalty, 0.0)
return_message += "\n=================================="
# pylint: disable=line-too-long
return_message += "\n==[r{}:{}] [Score = {:.2f} : (route_score={}, infractions=-{})]".format(route_id, result,
score_composed,
ChallengeStatisticsManager.current_route_score,
ChallengeStatisticsManager.current_penalty)
# pylint: enable=line-too-long
if ChallengeStatisticsManager.list_collisions:
return_message += "\n===== Collisions:"
for item in ChallengeStatisticsManager.list_collisions:
return_message += "\n========== {}".format(item)
if ChallengeStatisticsManager.list_red_lights:
return_message += "\n===== Red lights:"
for item in ChallengeStatisticsManager.list_red_lights:
return_message += "\n========== {}".format(item)
if ChallengeStatisticsManager.list_stop_inf:
return_message += "\n===== STOP infractions:"
for item in ChallengeStatisticsManager.list_stop_inf:
return_message += "\n========== {}".format(item)
if ChallengeStatisticsManager.list_wrong_way:
return_message += "\n===== Wrong way:"
for item in ChallengeStatisticsManager.list_wrong_way:
return_message += "\n========== {}".format(item)
if ChallengeStatisticsManager.list_sidewalk_inv:
return_message += "\n===== Sidewalk invasions:"
for item in ChallengeStatisticsManager.list_sidewalk_inv:
return_message += "\n========== {}".format(item)
if ChallengeStatisticsManager.list_route_dev:
return_message += "\n===== Route deviation:"
for item in ChallengeStatisticsManager.list_route_dev:
return_message += "\n========== {}".format(item)
return_message += "\n=================================="
current_statistics = {'id': route_id,
'score_composed': score_composed,
'score_route': ChallengeStatisticsManager.current_route_score,
'score_penalty': ChallengeStatisticsManager.current_penalty,
'result': result,
'help_text': return_message
}
ChallengeStatisticsManager.statistics_routes.append(current_statistics)
@staticmethod
def report_challenge_statistics(filename, debug):
"""
Print and save the challenge statistics over all routes
"""
score_composed = 0.0
score_route = 0.0
score_penalty = 0.0
help_message = ""
phase_codename = os.getenv('CHALLENGE_PHASE_CODENAME', 'dev_track_3')
phase = phase_codename.split("_")[0]
if ChallengeStatisticsManager.system_error:
submission_status = 'FAILED'
for stats in ChallengeStatisticsManager.statistics_routes:
help_message += "{}\n\n".format(stats['help_text'])
else:
submission_status = 'FINISHED'
for stats in ChallengeStatisticsManager.statistics_routes:
score_composed += stats['score_composed'] / float(ChallengeStatisticsManager.n_routes)
score_route += stats['score_route'] / float(ChallengeStatisticsManager.n_routes)
score_penalty += stats['score_penalty'] / float(ChallengeStatisticsManager.n_routes)
help_message += "{}\n\n".format(stats['help_text'])
if debug:
print(help_message)
# create json structure
json_data = {
'submission_status': submission_status,
'stderr': help_message if phase == 'dev' or phase == 'debug' else 'No metadata provided for this phase',
'result': [
{
'split': phase,
'accuracies': {
'avg. route points': score_route,
'infraction points': score_penalty,
'total avg.': score_composed
}
}],
'metadata': [
{
'stderr': help_message,
'accuracies': {
'avg. route points': score_route,
'infraction points': score_penalty,
'total avg.': score_composed
}
}
]
}
with open(filename, "w+") as fd:
fd.write(json.dumps(json_data, indent=4))
|
[] |
[] |
[
"CHALLENGE_PHASE_CODENAME"
] |
[]
|
["CHALLENGE_PHASE_CODENAME"]
|
python
| 1 | 0 | |
mysqld_exporter.go
|
package main
import (
"database/sql"
"flag"
"fmt"
"net/http"
"os"
"path"
"time"
_ "github.com/go-sql-driver/mysql"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/common/log"
"github.com/prometheus/common/version"
"gopkg.in/ini.v1"
"github.com/prometheus/mysqld_exporter/collector"
)
var (
showVersion = flag.Bool(
"version", false,
"Print version information.",
)
listenAddress = flag.String(
"web.listen-address", ":9104",
"Address to listen on for web interface and telemetry.",
)
metricPath = flag.String(
"web.telemetry-path", "/metrics",
"Path under which to expose metrics.",
)
configMycnf = flag.String(
"config.my-cnf", path.Join(os.Getenv("HOME"), ".my.cnf"),
"Path to .my.cnf file to read MySQL credentials from.",
)
slowLogFilter = flag.Bool(
"log_slow_filter", false,
"Add a log_slow_filter to avoid exessive MySQL slow logging. NOTE: Not supported by Oracle MySQL.",
)
collectProcesslist = flag.Bool(
"collect.info_schema.processlist", false,
"Collect current thread state counts from the information_schema.processlist",
)
collectTableSchema = flag.Bool(
"collect.info_schema.tables", true,
"Collect metrics from information_schema.tables",
)
collectInnodbTablespaces = flag.Bool(
"collect.info_schema.innodb_tablespaces", false,
"Collect metrics from information_schema.innodb_sys_tablespaces",
)
innodbMetrics = flag.Bool(
"collect.info_schema.innodb_metrics", false,
"Collect metrics from information_schema.innodb_metrics",
)
collectGlobalStatus = flag.Bool(
"collect.global_status", true,
"Collect from SHOW GLOBAL STATUS",
)
collectGlobalVariables = flag.Bool(
"collect.global_variables", true,
"Collect from SHOW GLOBAL VARIABLES",
)
collectSlaveStatus = flag.Bool(
"collect.slave_status", true,
"Collect from SHOW SLAVE STATUS",
)
collectAutoIncrementColumns = flag.Bool(
"collect.auto_increment.columns", false,
"Collect auto_increment columns and max values from information_schema",
)
collectBinlogSize = flag.Bool(
"collect.binlog_size", false,
"Collect the current size of all registered binlog files",
)
collectPerfTableIOWaits = flag.Bool(
"collect.perf_schema.tableiowaits", false,
"Collect metrics from performance_schema.table_io_waits_summary_by_table",
)
collectPerfIndexIOWaits = flag.Bool(
"collect.perf_schema.indexiowaits", false,
"Collect metrics from performance_schema.table_io_waits_summary_by_index_usage",
)
collectPerfTableLockWaits = flag.Bool(
"collect.perf_schema.tablelocks", false,
"Collect metrics from performance_schema.table_lock_waits_summary_by_table",
)
collectPerfEventsStatements = flag.Bool(
"collect.perf_schema.eventsstatements", false,
"Collect metrics from performance_schema.events_statements_summary_by_digest",
)
collectPerfEventsWaits = flag.Bool(
"collect.perf_schema.eventswaits", false,
"Collect metrics from performance_schema.events_waits_summary_global_by_event_name",
)
collectPerfFileEvents = flag.Bool(
"collect.perf_schema.file_events", false,
"Collect metrics from performance_schema.file_summary_by_event_name",
)
collectUserStat = flag.Bool("collect.info_schema.userstats", false,
"If running with userstat=1, set to true to collect user statistics",
)
collectClientStat = flag.Bool("collect.info_schema.clientstats", false,
"If running with userstat=1, set to true to collect client statistics",
)
collectTableStat = flag.Bool("collect.info_schema.tablestats", false,
"If running with userstat=1, set to true to collect table statistics",
)
collectQueryResponseTime = flag.Bool("collect.info_schema.query_response_time", false,
"Collect query response time distribution if query_response_time_stats is ON.",
)
collectEngineTokudbStatus = flag.Bool("collect.engine_tokudb_status", false,
"Collect from SHOW ENGINE TOKUDB STATUS",
)
collectEngineInnodbStatus = flag.Bool("collect.engine_innodb_status", false,
"Collect from SHOW ENGINE INNODB STATUS",
)
)
// Metric name parts.
const (
// Namespace for all metrics.
namespace = "mysql"
// Subsystem(s).
exporter = "exporter"
)
// SQL Queries.
const (
sessionSettingsQuery = `SET SESSION log_slow_filter = 'tmp_table_on_disk,filesort_on_disk'`
upQuery = `SELECT 1`
)
// landingPage contains the HTML served at '/'.
// TODO: Make this nicer and more informative.
var landingPage = []byte(`<html>
<head><title>MySQLd exporter</title></head>
<body>
<h1>MySQLd exporter</h1>
<p><a href='` + *metricPath + `'>Metrics</a></p>
</body>
</html>
`)
// Exporter collects MySQL metrics. It implements prometheus.Collector.
type Exporter struct {
dsn string
duration, error prometheus.Gauge
totalScrapes prometheus.Counter
scrapeErrors *prometheus.CounterVec
mysqldUp prometheus.Gauge
}
// NewExporter returns a new MySQL exporter for the provided DSN.
func NewExporter(dsn string) *Exporter {
return &Exporter{
dsn: dsn,
duration: prometheus.NewGauge(prometheus.GaugeOpts{
Namespace: namespace,
Subsystem: exporter,
Name: "last_scrape_duration_seconds",
Help: "Duration of the last scrape of metrics from MySQL.",
}),
totalScrapes: prometheus.NewCounter(prometheus.CounterOpts{
Namespace: namespace,
Subsystem: exporter,
Name: "scrapes_total",
Help: "Total number of times MySQL was scraped for metrics.",
}),
scrapeErrors: prometheus.NewCounterVec(prometheus.CounterOpts{
Namespace: namespace,
Subsystem: exporter,
Name: "scrape_errors_total",
Help: "Total number of times an error occured scraping a MySQL.",
}, []string{"collector"}),
error: prometheus.NewGauge(prometheus.GaugeOpts{
Namespace: namespace,
Subsystem: exporter,
Name: "last_scrape_error",
Help: "Whether the last scrape of metrics from MySQL resulted in an error (1 for error, 0 for success).",
}),
mysqldUp: prometheus.NewGauge(prometheus.GaugeOpts{
Namespace: namespace,
Name: "up",
Help: "Whether the MySQL server is up.",
}),
}
}
// Describe implements prometheus.Collector.
func (e *Exporter) Describe(ch chan<- *prometheus.Desc) {
// We cannot know in advance what metrics the exporter will generate
// from MySQL. So we use the poor man's describe method: Run a collect
// and send the descriptors of all the collected metrics. The problem
// here is that we need to connect to the MySQL DB. If it is currently
// unavailable, the descriptors will be incomplete. Since this is a
// stand-alone exporter and not used as a library within other code
// implementing additional metrics, the worst that can happen is that we
// don't detect inconsistent metrics created by this exporter
// itself. Also, a change in the monitored MySQL instance may change the
// exported metrics during the runtime of the exporter.
metricCh := make(chan prometheus.Metric)
doneCh := make(chan struct{})
go func() {
for m := range metricCh {
ch <- m.Desc()
}
close(doneCh)
}()
e.Collect(metricCh)
close(metricCh)
<-doneCh
}
// Collect implements prometheus.Collector.
func (e *Exporter) Collect(ch chan<- prometheus.Metric) {
e.scrape(ch)
ch <- e.duration
ch <- e.totalScrapes
ch <- e.error
e.scrapeErrors.Collect(ch)
ch <- e.mysqldUp
}
func (e *Exporter) scrape(ch chan<- prometheus.Metric) {
e.totalScrapes.Inc()
var err error
defer func(begun time.Time) {
e.duration.Set(time.Since(begun).Seconds())
if err == nil {
e.error.Set(0)
} else {
e.error.Set(1)
}
}(time.Now())
db, err := sql.Open("mysql", e.dsn)
if err != nil {
log.Errorln("Error opening connection to database:", err)
return
}
defer db.Close()
isUpRows, err := db.Query(upQuery)
if err != nil {
log.Errorln("Error pinging mysqld:", err)
e.mysqldUp.Set(0)
return
}
isUpRows.Close()
e.mysqldUp.Set(1)
if *slowLogFilter {
sessionSettingsRows, err := db.Query(sessionSettingsQuery)
if err != nil {
log.Errorln("Error setting log_slow_filter:", err)
return
}
sessionSettingsRows.Close()
}
if *collectGlobalStatus {
if err = collector.ScrapeGlobalStatus(db, ch); err != nil {
log.Errorln("Error scraping for collect.global_status:", err)
e.scrapeErrors.WithLabelValues("collect.global_status").Inc()
}
}
if *collectGlobalVariables {
if err = collector.ScrapeGlobalVariables(db, ch); err != nil {
log.Errorln("Error scraping for collect.global_variables:", err)
e.scrapeErrors.WithLabelValues("collect.global_variables").Inc()
}
}
if *collectSlaveStatus {
if err = collector.ScrapeSlaveStatus(db, ch); err != nil {
log.Errorln("Error scraping for collect.slave_status:", err)
e.scrapeErrors.WithLabelValues("collect.slave_status").Inc()
}
}
if *collectProcesslist {
if err = collector.ScrapeProcesslist(db, ch); err != nil {
log.Errorln("Error scraping for collect.info_schema.processlist:", err)
e.scrapeErrors.WithLabelValues("collect.info_schema.processlist").Inc()
}
}
if *collectTableSchema {
if err = collector.ScrapeTableSchema(db, ch); err != nil {
log.Errorln("Error scraping for collect.info_schema.tables:", err)
e.scrapeErrors.WithLabelValues("collect.info_schema.tables").Inc()
}
}
if *collectInnodbTablespaces {
if err = collector.ScrapeInfoSchemaInnodbTablespaces(db, ch); err != nil {
log.Errorln("Error scraping for collect.info_schema.innodb_sys_tablespaces:", err)
e.scrapeErrors.WithLabelValues("collect.info_schema.innodb_sys_tablespaces").Inc()
}
}
if *innodbMetrics {
if err = collector.ScrapeInnodbMetrics(db, ch); err != nil {
log.Errorln("Error scraping for collect.info_schema.innodb_metrics:", err)
e.scrapeErrors.WithLabelValues("collect.info_schema.innodb_metrics").Inc()
}
}
if *collectAutoIncrementColumns {
if err = collector.ScrapeAutoIncrementColumns(db, ch); err != nil {
log.Errorln("Error scraping for collect.auto_increment.columns:", err)
e.scrapeErrors.WithLabelValues("collect.auto_increment.columns").Inc()
}
}
if *collectBinlogSize {
if err = collector.ScrapeBinlogSize(db, ch); err != nil {
log.Errorln("Error scraping for collect.binlog_size:", err)
e.scrapeErrors.WithLabelValues("collect.binlog_size").Inc()
}
}
if *collectPerfTableIOWaits {
if err = collector.ScrapePerfTableIOWaits(db, ch); err != nil {
log.Errorln("Error scraping for collect.perf_schema.tableiowaits:", err)
e.scrapeErrors.WithLabelValues("collect.perf_schema.tableiowaits").Inc()
}
}
if *collectPerfIndexIOWaits {
if err = collector.ScrapePerfIndexIOWaits(db, ch); err != nil {
log.Errorln("Error scraping for collect.perf_schema.indexiowaits:", err)
e.scrapeErrors.WithLabelValues("collect.perf_schema.indexiowaits").Inc()
}
}
if *collectPerfTableLockWaits {
if err = collector.ScrapePerfTableLockWaits(db, ch); err != nil {
log.Errorln("Error scraping for collect.perf_schema.tablelocks:", err)
e.scrapeErrors.WithLabelValues("collect.perf_schema.tablelocks").Inc()
}
}
if *collectPerfEventsStatements {
if err = collector.ScrapePerfEventsStatements(db, ch); err != nil {
log.Errorln("Error scraping for collect.perf_schema.eventsstatements:", err)
e.scrapeErrors.WithLabelValues("collect.perf_schema.eventsstatements").Inc()
}
}
if *collectPerfEventsWaits {
if err = collector.ScrapePerfEventsWaits(db, ch); err != nil {
log.Errorln("Error scraping for collect.perf_schema.eventswaits:", err)
e.scrapeErrors.WithLabelValues("collect.perf_schema.eventswaits").Inc()
}
}
if *collectPerfFileEvents {
if err = collector.ScrapePerfFileEvents(db, ch); err != nil {
log.Errorln("Error scraping for collect.perf_schema.file_events:", err)
e.scrapeErrors.WithLabelValues("collect.perf_schema.file_events").Inc()
}
}
if *collectUserStat {
if err = collector.ScrapeUserStat(db, ch); err != nil {
log.Errorln("Error scraping for collect.info_schema.userstats:", err)
e.scrapeErrors.WithLabelValues("collect.info_schema.userstats").Inc()
}
}
if *collectClientStat {
if err = collector.ScrapeClientStat(db, ch); err != nil {
log.Errorln("Error scraping for collect.info_schema.clientstats:", err)
e.scrapeErrors.WithLabelValues("collect.info_schema.clientstats").Inc()
}
}
if *collectTableStat {
if err = collector.ScrapeTableStat(db, ch); err != nil {
log.Errorln("Error scraping for collect.info_schema.tablestats:", err)
e.scrapeErrors.WithLabelValues("collect.info_schema.tablestats").Inc()
}
}
if *collectQueryResponseTime {
if err = collector.ScrapeQueryResponseTime(db, ch); err != nil {
log.Errorln("Error scraping for collect.info_schema.query_response_time:", err)
e.scrapeErrors.WithLabelValues("collect.info_schema.query_response_time").Inc()
}
}
if *collectEngineTokudbStatus {
if err = collector.ScrapeEngineTokudbStatus(db, ch); err != nil {
log.Errorln("Error scraping for collect.engine_tokudb_status:", err)
e.scrapeErrors.WithLabelValues("collect.engine_tokudb_status").Inc()
}
}
if *collectEngineInnodbStatus {
if err = collector.ScrapeEngineInnodbStatus(db, ch); err != nil {
log.Errorln("Error scraping for collect.engine_innodb_status:", err)
e.scrapeErrors.WithLabelValues("collect.engine_innodb_status").Inc()
}
}
}
func parseMycnf(config interface{}) (string, error) {
var dsn string
cfg, err := ini.Load(config)
if err != nil {
return dsn, fmt.Errorf("failed reading ini file: %s", err)
}
user := cfg.Section("client").Key("user").String()
password := cfg.Section("client").Key("password").String()
if (user == "") || (password == "") {
return dsn, fmt.Errorf("no user or password specified under [client] in %s", config)
}
host := cfg.Section("client").Key("host").MustString("localhost")
port := cfg.Section("client").Key("port").MustUint(3306)
socket := cfg.Section("client").Key("socket").String()
if socket != "" {
dsn = fmt.Sprintf("%s:%s@unix(%s)/", user, password, socket)
} else {
dsn = fmt.Sprintf("%s:%s@tcp(%s:%d)/", user, password, host, port)
}
log.Debugln(dsn)
return dsn, nil
}
func init() {
prometheus.MustRegister(version.NewCollector("mysqld_exporter"))
}
func main() {
flag.Parse()
if *showVersion {
fmt.Fprintln(os.Stdout, version.Print("mysqld_exporter"))
os.Exit(0)
}
log.Infoln("Starting mysqld_exporter", version.Info())
log.Infoln("Build context", version.BuildContext())
dsn := os.Getenv("DATA_SOURCE_NAME")
if len(dsn) == 0 {
var err error
if dsn, err = parseMycnf(*configMycnf); err != nil {
log.Fatal(err)
}
}
exporter := NewExporter(dsn)
prometheus.MustRegister(exporter)
http.Handle(*metricPath, prometheus.Handler())
http.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
w.Write(landingPage)
})
log.Infoln("Listening on", *listenAddress)
log.Fatal(http.ListenAndServe(*listenAddress, nil))
}
|
[
"\"HOME\"",
"\"DATA_SOURCE_NAME\""
] |
[] |
[
"DATA_SOURCE_NAME",
"HOME"
] |
[]
|
["DATA_SOURCE_NAME", "HOME"]
|
go
| 2 | 0 | |
cmd/ovirt-flexvolume-driver/kube-client.go
|
/*
Copyright 2019 oVirt-maintainers
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package main
import (
"fmt"
"os"
"path/filepath"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/tools/clientcmd"
"k8s.io/kubernetes/pkg/util/file"
)
func getSystemUUIDByNodeName(nodeName string) (string, error) {
nodes, e := getKubeNodes()
if e != nil {
return "", e
}
for _, n := range nodes {
if n.Name == nodeName {
return n.Status.NodeInfo.SystemUUID, nil
}
}
return "", fmt.Errorf("node name %s was not found", nodeName)
}
func getKubeNodes() ([]v1.Node, error) {
kubeconfig, err := locateKubeConfig()
if err != nil {
return nil, err
}
// use the current context in kubeconfig
config, err := clientcmd.BuildConfigFromFlags("", kubeconfig)
if err != nil {
return nil, err
}
// create the clientset
clientset, err := kubernetes.NewForConfig(config)
if err != nil {
return nil, err
}
nodes, err := clientset.CoreV1().Nodes().List(metav1.ListOptions{})
if err != nil {
return nil, err
}
return nodes.Items, nil
}
func locateKubeConfig() (string, error) {
defaultKubeConfig := "/etc/origin/master/admin.kubeconfig"
var err = os.ErrNotExist
var ok bool
if ok, err = file.FileOrSymlinkExists(defaultKubeConfig); ok {
return defaultKubeConfig, nil
}
if k := os.Getenv("KUBECONFIG"); k != "" {
if ok, err = file.FileOrSymlinkExists(k); ok {
return k, nil
}
}
if home := homeDir(); home != "" {
kubeconfig := filepath.Join(home, ".kube", "config")
if ok, err = file.FileOrSymlinkExists(kubeconfig); ok {
return kubeconfig, nil
}
}
return "", err
}
func homeDir() string {
if h := os.Getenv("HOME"); h != "" {
return h
}
return os.Getenv("USERPROFILE") // windows
}
|
[
"\"KUBECONFIG\"",
"\"HOME\"",
"\"USERPROFILE\""
] |
[] |
[
"HOME",
"USERPROFILE",
"KUBECONFIG"
] |
[]
|
["HOME", "USERPROFILE", "KUBECONFIG"]
|
go
| 3 | 0 | |
client.go
|
package hubspot
import (
"bytes"
"encoding/json"
"fmt"
"io/ioutil"
"net"
"net/http"
"net/url"
"os"
"path"
"time"
)
// ClientConfig object used for client creation
type ClientConfig struct {
APIHost string
APIKey string
OAuthToken string
HTTPTimeout time.Duration
DialTimeout time.Duration
TLSTimeout time.Duration
}
// NewClientConfig constructs a ClientConfig object with the environment variables set as default
func NewClientConfig() ClientConfig {
apiHost := "https://api.hubapi.com"
var apiKey string
var oauthToken string
if os.Getenv("HUBSPOT_API_HOST") != "" {
apiHost = os.Getenv("HUBSPOT_API_HOST")
}
if os.Getenv("HUBSPOT_API_KEY") != "" {
apiKey = os.Getenv("HUBSPOT_API_KEY")
}
if os.Getenv("HUBSPOT_OAUTH_TOKEN") != "" {
oauthToken = os.Getenv("HUBSPOT_OAUTH_TOKEN")
}
return ClientConfig{
APIHost: apiHost,
APIKey: apiKey,
OAuthToken: oauthToken,
HTTPTimeout: 10 * time.Second,
DialTimeout: 5 * time.Second,
TLSTimeout: 5 * time.Second,
}
}
// Client object
type Client struct {
config ClientConfig
}
// NewClient constructor
func NewClient(config ClientConfig) Client {
return Client{
config: config,
}
}
// addAPIKey adds HUBSPOT_API_KEY param to a given URL.
func (c Client) addAPIKey(u string) (string, error) {
if c.config.APIKey != "" {
uri, err := url.Parse(u)
if err != nil {
return u, err
}
q := uri.Query()
q.Set("hapikey", c.config.APIKey)
uri.RawQuery = q.Encode()
u = uri.String()
}
return u, nil
}
// Request executes any HubSpot API method using the current client configuration
func (c Client) Request(method, endpoint string, data, response interface{}) error {
// Construct endpoint URL
u, err := url.Parse(c.config.APIHost)
if err != nil {
return fmt.Errorf("hubspot.Client.Request(): url.Parse(): %v", err)
}
u.Path = path.Join(u.Path, endpoint)
// API Key authentication
uri := u.String()
if c.config.APIKey != "" {
uri, err = c.addAPIKey(uri)
if err != nil {
return fmt.Errorf("hubspot.Client.Request(): c.addAPIKey(): %v", err)
}
}
// Init request object
var req *http.Request
// Send data?
if data != nil {
// Encode data to JSON
dataEncoded, err := json.Marshal(data)
if err != nil {
return fmt.Errorf("hubspot.Client.Request(): json.Marshal(): %v", err)
}
buf := bytes.NewBuffer(dataEncoded)
// Create request
req, err = http.NewRequest(method, uri, buf)
} else {
// Create no-data request
req, err = http.NewRequest(method, uri, nil)
}
if err != nil {
return fmt.Errorf("hubspot.Client.Request(): http.NewRequest(): %v", err)
}
// OAuth authentication
if c.config.APIKey == "" && c.config.OAuthToken != "" {
req.Header.Add("Authorization", "Bearer "+c.config.OAuthToken)
}
// Headers
req.Header.Add("Content-Type", "application/json")
// Execute and read response body
netClient := &http.Client{
Timeout: c.config.HTTPTimeout,
Transport: &http.Transport{
Dial: (&net.Dialer{
Timeout: c.config.DialTimeout,
}).Dial,
TLSHandshakeTimeout: c.config.TLSTimeout,
},
}
resp, err := netClient.Do(req)
if err != nil {
return fmt.Errorf("hubspot.Client.Request(): c.config.HTTPClient.Do(): %v", err)
}
defer resp.Body.Close()
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
return fmt.Errorf("hubspot.Client.Request(): ioutil.ReadAll(): %v", err)
}
// Get data?
if response != nil {
err = json.Unmarshal(body, &response)
if err != nil {
return fmt.Errorf("hubspot.Client.Request(): json.Unmarshal(): %v \n%s", err, string(body))
}
}
// Return HTTP errors
if resp.StatusCode != 200 && resp.StatusCode != 204 {
return fmt.Errorf("HubSpot API error: %d - %s \n%s", resp.StatusCode, resp.Status, string(body))
}
// Done!
return nil
}
|
[
"\"HUBSPOT_API_HOST\"",
"\"HUBSPOT_API_HOST\"",
"\"HUBSPOT_API_KEY\"",
"\"HUBSPOT_API_KEY\"",
"\"HUBSPOT_OAUTH_TOKEN\"",
"\"HUBSPOT_OAUTH_TOKEN\""
] |
[] |
[
"HUBSPOT_API_KEY",
"HUBSPOT_API_HOST",
"HUBSPOT_OAUTH_TOKEN"
] |
[]
|
["HUBSPOT_API_KEY", "HUBSPOT_API_HOST", "HUBSPOT_OAUTH_TOKEN"]
|
go
| 3 | 0 | |
examples/buildapp/cmd/firstserver/firstserver.go
|
package main
import (
"github.com/xiaodulala/component-tools/examples/buildapp/internal/firstserver"
"math/rand"
"os"
"runtime"
"time"
)
func main() {
rand.Seed(time.Now().UTC().UnixNano())
if len(os.Getenv("GOMAXPROCS")) == 0 {
runtime.GOMAXPROCS(runtime.NumCPU())
}
firstserver.NewApp("first-server").Run()
}
|
[
"\"GOMAXPROCS\""
] |
[] |
[
"GOMAXPROCS"
] |
[]
|
["GOMAXPROCS"]
|
go
| 1 | 0 | |
examples/efficientnet.py
|
# load weights from
# https://github.com/lukemelas/EfficientNet-PyTorch/releases/download/1.0/efficientnet-b0-355c32eb.pth
# a rough copy of
# https://github.com/lukemelas/EfficientNet-PyTorch/blob/master/efficientnet_pytorch/model.py
import os
GPU = os.getenv("GPU", None) is not None
import sys
import io
import time
import numpy as np
np.set_printoptions(suppress=True)
from tinygrad.tensor import Tensor
from tinygrad.utils import fetch, get_parameters
from extra.efficientnet import EfficientNet
def infer(model, img):
# preprocess image
aspect_ratio = img.size[0] / img.size[1]
img = img.resize((int(224*max(aspect_ratio,1.0)), int(224*max(1.0/aspect_ratio,1.0))))
img = np.array(img)
y0,x0=(np.asarray(img.shape)[:2]-224)//2
retimg = img = img[y0:y0+224, x0:x0+224]
# if you want to look at the image
"""
import matplotlib.pyplot as plt
plt.imshow(img)
plt.show()
"""
# low level preprocess
img = np.moveaxis(img, [2,0,1], [0,1,2])
img = img.astype(np.float32)[:3].reshape(1,3,224,224)
img /= 255.0
img -= np.array([0.485, 0.456, 0.406]).reshape((1,-1,1,1))
img /= np.array([0.229, 0.224, 0.225]).reshape((1,-1,1,1))
# run the net
if GPU:
out = model.forward(Tensor(img).cuda()).cpu()
else:
out = model.forward(Tensor(img))
# if you want to look at the outputs
"""
import matplotlib.pyplot as plt
plt.plot(out.data[0])
plt.show()
"""
return out, retimg
if __name__ == "__main__":
# instantiate my net
model = EfficientNet(int(os.getenv("NUM", "0")))
model.load_weights_from_torch()
if GPU:
[x.cuda_() for x in get_parameters(model)]
# category labels
import ast
lbls = fetch("https://gist.githubusercontent.com/yrevar/942d3a0ac09ec9e5eb3a/raw/238f720ff059c1f82f368259d1ca4ffa5dd8f9f5/imagenet1000_clsidx_to_labels.txt")
lbls = ast.literal_eval(lbls.decode('utf-8'))
# load image and preprocess
from PIL import Image
url = sys.argv[1]
if url == 'webcam':
import cv2
cap = cv2.VideoCapture(0)
cap.set(cv2.CAP_PROP_BUFFERSIZE, 1)
while 1:
_ = cap.grab() # discard one frame to circumvent capture buffering
ret, frame = cap.read()
img = Image.fromarray(frame[:, :, [2,1,0]])
out, retimg = infer(model, img)
print(np.argmax(out.data), np.max(out.data), lbls[np.argmax(out.data)])
SCALE = 3
simg = cv2.resize(retimg, (224*SCALE, 224*SCALE))
retimg = cv2.cvtColor(simg, cv2.COLOR_RGB2BGR)
cv2.imshow('capture', retimg)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
else:
if url.startswith('http'):
img = Image.open(io.BytesIO(fetch(url)))
else:
img = Image.open(url)
st = time.time()
out, _ = infer(model, img)
print(np.argmax(out.data), np.max(out.data), lbls[np.argmax(out.data)])
print("did inference in %.2f s" % (time.time()-st))
#print("NOT", np.argmin(out.data), np.min(out.data), lbls[np.argmin(out.data)])
|
[] |
[] |
[
"GPU",
"NUM"
] |
[]
|
["GPU", "NUM"]
|
python
| 2 | 0 | |
metrics/amqsconn.go
|
/*
This is a short sample to show how to connect to a remote
queue manager in a Go program without requiring external
client configuration such as a CCDT. Only the basic
parameters are needed here - channel name and connection information -
along with the queue manager name.
For example, run as
amqsconn QMGR1 "SYSTEM.DEF.SVRCONN" "myhost.example.com(1414)"
If the MQSAMP_USER_ID environment variable is set, then a userid/password
flow is also made to authenticate to the queue manager.
There is no attempt in this sample to configure advanced security features
such TLS.
If an error occurs, the error is reported.
*/
package main
/*
Copyright (c) IBM Corporation 2017, 2018
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Contributors:
Mark Taylor - Initial Contribution
*/
import (
"bufio"
"fmt"
"os"
"time"
"github.com/ibm-messaging/mq-golang/ibmmq"
)
func main() {
var qMgrName string
var err error
var qMgr ibmmq.MQQueueManager
var rc int
if len(os.Args) != 4 {
fmt.Println("amqsconn <qmgrname> <channelname> <conname>")
fmt.Println("")
fmt.Println("For example")
fmt.Println(" amqsconn QMGR1 \"SYSTEM.DEF.SVRCONN\" \"myhost.example.com(1414)\"")
fmt.Println("All parameters are required.")
os.Exit(1)
}
// Which queue manager do we want to connect to
qMgrName = os.Args[1]
// Allocate the MQCNO and MQCD structures needed for the CONNX call.
cno := ibmmq.NewMQCNO()
cd := ibmmq.NewMQCD()
// Fill in required fields in the MQCD channel definition structure
cd.ChannelName = os.Args[2]
cd.ConnectionName = os.Args[3]
// Reference the CD structure from the CNO and indicate that we definitely want to
// use the client connection method.
cno.ClientConn = cd
cno.Options = ibmmq.MQCNO_CLIENT_BINDING
// Also fill in the userid and password if the MQSAMP_USER_ID
// environment variable is set. This is the same variable used by the C
// sample programs such as amqsput shipped with the MQ product.
userId := os.Getenv("MQSAMP_USER_ID")
if userId != "" {
scanner := bufio.NewScanner(os.Stdin)
csp := ibmmq.NewMQCSP()
csp.AuthenticationType = ibmmq.MQCSP_AUTH_USER_ID_AND_PWD
csp.UserId = userId
passWord := os.Getenv("MQSAMP_PASSWORD")
if passWord == "" {
fmt.Printf("Enter password for qmgr %s: \n", qMgrName)
// For simplicity (it doesn't help with understanding the MQ parts of this program)
// don't try to do anything special like turning off console echo for the password input
scanner.Scan()
csp.Password = scanner.Text()
} else {
csp.Password = passWord
}
// Make the CNO refer to the CSP structure so it gets used during the connection
cno.SecurityParms = csp
}
// And now we can try to connect. Wait a short time before disconnecting.
qMgr, err = ibmmq.Connx(qMgrName, cno)
if err == nil {
fmt.Printf("Connection to %s succeeded.\n", qMgrName)
d, _ := time.ParseDuration("5s")
time.Sleep(d)
qMgr.Disc() // Ignore errors from disconnect as we can't do much about it anyway
rc = 0
} else {
fmt.Printf("Connection to %s failed.\n", qMgrName)
fmt.Println(err)
rc = int(err.(*ibmmq.MQReturn).MQCC)
}
fmt.Println("Done.")
os.Exit(rc)
}
|
[
"\"MQSAMP_USER_ID\"",
"\"MQSAMP_PASSWORD\""
] |
[] |
[
"MQSAMP_PASSWORD",
"MQSAMP_USER_ID"
] |
[]
|
["MQSAMP_PASSWORD", "MQSAMP_USER_ID"]
|
go
| 2 | 0 | |
samples/discovery_profile_create.py
|
#!/usr/bin/env python
#
# Exercise the opsramp module as an illustration of how to use it.
#
# (c) Copyright 2019-2021 Hewlett Packard Enterprise Development LP
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import logging
import argparse
import opsramp.binding
import opsramp.integrations
subscription = os.environ['AZURE_SUBSCRIPTION']
azure_tenant = os.environ['AZURE_TENANT_ID']
azure_client = os.environ['AZURE_CLIENT_ID']
azure_secret = os.environ['AZURE_SECRET_KEY']
def connect():
url = os.environ['OPSRAMP_URL']
key = os.environ['OPSRAMP_KEY']
secret = os.environ['OPSRAMP_SECRET']
return opsramp.binding.connect(url, key, secret)
def parse_argv():
parser = argparse.ArgumentParser()
parser.add_argument(
'-d', '--debug',
action='store_true'
)
ns = parser.parse_args()
return ns
def main():
ns = parse_argv()
if ns.debug:
logging.basicConfig()
logging.getLogger().setLevel(logging.DEBUG)
tenant_id = os.environ['OPSRAMP_TENANT_ID']
ormp = connect()
tnt = ormp.tenant(tenant_id)
discovery = tnt.discovery()
# Create new discovery profile...
creds = opsramp.Integrations.mkAzureARM(
arm_subscription_id=subscription,
arm_tenant_id=azure_tenant,
arm_client_id=azure_client,
arm_secret_key=azure_secret
)
jdata = [{
'name': 'Azure ' + subscription,
'credential': creds,
'policy': {
'name': 'whatever',
'resourceType': 'ALL',
'entityType': 'ALL',
'rules': [{
'filterType': 'ANY_CLOUD_RESOURCE'
}],
'actions': [{
'action': 'MANAGE DEVICE',
'items': [],
'forceAssignOrUnassign': False
}],
'matchType': 'ANY'
},
'schedule': {
'patternType': 'MINUTES',
'pattern': '30',
'startTime': '00:30:00'
}
}]
resp = discovery.create(jdata)
print(resp)
if __name__ == "__main__":
main()
|
[] |
[] |
[
"OPSRAMP_TENANT_ID",
"AZURE_SECRET_KEY",
"AZURE_CLIENT_ID",
"OPSRAMP_SECRET",
"AZURE_TENANT_ID",
"AZURE_SUBSCRIPTION",
"OPSRAMP_KEY",
"OPSRAMP_URL"
] |
[]
|
["OPSRAMP_TENANT_ID", "AZURE_SECRET_KEY", "AZURE_CLIENT_ID", "OPSRAMP_SECRET", "AZURE_TENANT_ID", "AZURE_SUBSCRIPTION", "OPSRAMP_KEY", "OPSRAMP_URL"]
|
python
| 8 | 0 | |
main_normal.go
|
// +build !linux !amd64
package main
import (
"fmt"
"net/http"
"os"
)
func main() {
if isPrintVersion {
printVersion()
return
}
checkEnv("GITLAB_API_ENDPOINT")
checkEnv("GITLAB_BASE_URL")
checkEnv("GITLAB_PRIVATE_TOKEN")
checkEnv("SLACK_OAUTH_ACCESS_TOKEN")
port := os.Getenv("PORT")
if port == "" {
port = "8000"
}
fmt.Printf("gitpanda started: port=%s\n", port)
http.HandleFunc("/", normalHandler)
http.ListenAndServe(":"+port, nil)
}
|
[
"\"PORT\""
] |
[] |
[
"PORT"
] |
[]
|
["PORT"]
|
go
| 1 | 0 | |
Bottle.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Bottle is a fast and simple micro-framework for small web applications. It
offers request dispatching (Routes) with url parameter support, templates,
a built-in HTTP Server and adapters for many third party WSGI/HTTP-server and
template engines - all in a single file and with no dependencies other than the
Python Standard Library.
Homepage and documentation: http://bottlepy.org/
Copyright (c) 2014, Marcel Hellkamp.
License: MIT (see LICENSE for details)
"""
from __future__ import with_statement
__author__ = 'Marcel Hellkamp'
__version__ = '0.13-dev'
__license__ = 'MIT'
# The gevent and eventlet server adapters need to patch some modules before
# they are imported. This is why we parse the commandline parameters here but
# handle them later
if __name__ == '__main__':
from optparse import OptionParser
_cmd_parser = OptionParser(usage="usage: %prog [options] package.module:app")
_opt = _cmd_parser.add_option
_opt("--version", action="store_true", help="show version number.")
_opt("-b", "--bind", metavar="ADDRESS", help="bind socket to ADDRESS.")
_opt("-s", "--server", default='wsgiref', help="use SERVER as backend.")
_opt("-p", "--plugin", action="append", help="install additional plugin/s.")
_opt("--debug", action="store_true", help="start server in debug mode.")
_opt("--reload", action="store_true", help="auto-reload on file changes.")
_cmd_options, _cmd_args = _cmd_parser.parse_args()
if _cmd_options.server:
if _cmd_options.server.startswith('gevent'):
import gevent.monkey; gevent.monkey.patch_all()
elif _cmd_options.server.startswith('eventlet'):
import eventlet; eventlet.monkey_patch()
import base64, cgi, email.utils, functools, hmac, imp, itertools, mimetypes,\
os, re, subprocess, sys, tempfile, threading, time, warnings
from datetime import date as datedate, datetime, timedelta
from tempfile import TemporaryFile
from traceback import format_exc, print_exc
from inspect import getargspec
from unicodedata import normalize
try: from simplejson import dumps as json_dumps, loads as json_lds
except ImportError: # pragma: no cover
try: from json import dumps as json_dumps, loads as json_lds
except ImportError:
try: from django.utils.simplejson import dumps as json_dumps, loads as json_lds
except ImportError:
def json_dumps(data):
raise ImportError("JSON support requires Python 2.6 or simplejson.")
json_lds = json_dumps
# We now try to fix 2.5/2.6/3.1/3.2 incompatibilities.
# It ain't pretty but it works... Sorry for the mess.
py = sys.version_info
py3k = py >= (3, 0, 0)
py25 = py < (2, 6, 0)
py31 = (3, 1, 0) <= py < (3, 2, 0)
# Workaround for the missing "as" keyword in py3k.
def _e(): return sys.exc_info()[1]
# Workaround for the "print is a keyword/function" Python 2/3 dilemma
# and a fallback for mod_wsgi (resticts stdout/err attribute access)
try:
_stdout, _stderr = sys.stdout.write, sys.stderr.write
except IOError:
_stdout = lambda x: sys.stdout.write(x)
_stderr = lambda x: sys.stderr.write(x)
# Lots of stdlib and builtin differences.
if py3k:
import http.client as httplib
import _thread as thread
from urllib.parse import urljoin, SplitResult as UrlSplitResult
from urllib.parse import urlencode, quote as urlquote, unquote as urlunquote
urlunquote = functools.partial(urlunquote, encoding='latin1')
from http.cookies import SimpleCookie
from collections import MutableMapping as DictMixin
import pickle
from io import BytesIO
from configparser import ConfigParser
basestring = str
unicode = str
json_loads = lambda s: json_lds(touni(s))
callable = lambda x: hasattr(x, '__call__')
imap = map
def _raise(*a): raise a[0](a[1]).with_traceback(a[2])
else: # 2.x
import httplib
import thread
from urlparse import urljoin, SplitResult as UrlSplitResult
from urllib import urlencode, quote as urlquote, unquote as urlunquote
from Cookie import SimpleCookie
from itertools import imap
import cPickle as pickle
from StringIO import StringIO as BytesIO
from ConfigParser import SafeConfigParser as ConfigParser
if py25:
msg = "Python 2.5 support may be dropped in future versions of Bottle."
warnings.warn(msg, DeprecationWarning)
from UserDict import DictMixin
def next(it): return it.next()
bytes = str
else: # 2.6, 2.7
from collections import MutableMapping as DictMixin
unicode = unicode
json_loads = json_lds
eval(compile('def _raise(*a): raise a[0], a[1], a[2]', '<py3fix>', 'exec'))
# Some helpers for string/byte handling
def tob(s, enc='utf8'):
return s.encode(enc) if isinstance(s, unicode) else bytes(s)
def touni(s, enc='utf8', err='strict'):
if isinstance(s, bytes):
return s.decode(enc, err)
else:
return unicode(s or ("" if s is None else s))
tonat = touni if py3k else tob
# 3.2 fixes cgi.FieldStorage to accept bytes (which makes a lot of sense).
# 3.1 needs a workaround.
if py31:
from io import TextIOWrapper
class NCTextIOWrapper(TextIOWrapper):
def close(self): pass # Keep wrapped buffer open.
# A bug in functools causes it to break if the wrapper is an instance method
def update_wrapper(wrapper, wrapped, *a, **ka):
try:
functools.update_wrapper(wrapper, wrapped, *a, **ka)
except AttributeError:
pass
# These helpers are used at module level and need to be defined first.
# And yes, I know PEP-8, but sometimes a lower-case classname makes more sense.
def depr(message, strict=False):
warnings.warn(message, DeprecationWarning, stacklevel=3)
def makelist(data): # This is just too handy
if isinstance(data, (tuple, list, set, dict)):
return list(data)
elif data:
return [data]
else:
return []
class DictProperty(object):
""" Property that maps to a key in a local dict-like attribute. """
def __init__(self, attr, key=None, read_only=False):
self.attr, self.key, self.read_only = attr, key, read_only
def __call__(self, func):
functools.update_wrapper(self, func, updated=[])
self.getter, self.key = func, self.key or func.__name__
return self
def __get__(self, obj, cls):
if obj is None: return self
key, storage = self.key, getattr(obj, self.attr)
if key not in storage: storage[key] = self.getter(obj)
return storage[key]
def __set__(self, obj, value):
if self.read_only: raise AttributeError("Read-Only property.")
getattr(obj, self.attr)[self.key] = value
def __delete__(self, obj):
if self.read_only: raise AttributeError("Read-Only property.")
del getattr(obj, self.attr)[self.key]
class cached_property(object):
""" A property that is only computed once per instance and then replaces
itself with an ordinary attribute. Deleting the attribute resets the
property. """
def __init__(self, func):
self.__doc__ = getattr(func, '__doc__')
self.func = func
def __get__(self, obj, cls):
if obj is None: return self
value = obj.__dict__[self.func.__name__] = self.func(obj)
return value
class lazy_attribute(object):
""" A property that caches itself to the class object. """
def __init__(self, func):
functools.update_wrapper(self, func, updated=[])
self.getter = func
def __get__(self, obj, cls):
value = self.getter(cls)
setattr(cls, self.__name__, value)
return value
###############################################################################
# Exceptions and Events ########################################################
###############################################################################
class BottleException(Exception):
""" A base class for exceptions used by bottle. """
pass
###############################################################################
# Routing ######################################################################
###############################################################################
class RouteError(BottleException):
""" This is a base class for all routing related exceptions """
class RouteReset(BottleException):
""" If raised by a plugin or request handler, the route is reset and all
plugins are re-applied. """
class RouterUnknownModeError(RouteError): pass
class RouteSyntaxError(RouteError):
""" The route parser found something not supported by this router. """
class RouteBuildError(RouteError):
""" The route could not be built. """
def _re_flatten(p):
""" Turn all capturing groups in a regular expression pattern into
non-capturing groups. """
if '(' not in p:
return p
return re.sub(r'(\\*)(\(\?P<[^>]+>|\((?!\?))',
lambda m: m.group(0) if len(m.group(1)) % 2 else m.group(1) + '(?:', p)
class Router(object):
""" A Router is an ordered collection of route->target pairs. It is used to
efficiently match WSGI requests against a number of routes and return
the first target that satisfies the request. The target may be anything,
usually a string, ID or callable object. A route consists of a path-rule
and a HTTP method.
The path-rule is either a static path (e.g. `/contact`) or a dynamic
path that contains wildcards (e.g. `/wiki/<page>`). The wildcard syntax
and details on the matching order are described in docs:`routing`.
"""
default_pattern = '[^/]+'
default_filter = 're'
#: The current CPython regexp implementation does not allow more
#: than 99 matching groups per regular expression.
_MAX_GROUPS_PER_PATTERN = 99
def __init__(self, strict=False):
self.rules = [] # All rules in order
self._groups = {} # index of regexes to find them in dyna_routes
self.builder = {} # Data structure for the url builder
self.static = {} # Search structure for static routes
self.dyna_routes = {}
self.dyna_regexes = {} # Search structure for dynamic routes
#: If true, static routes are no longer checked first.
self.strict_order = strict
self.filters = {
're': lambda conf:
(_re_flatten(conf or self.default_pattern), None, None),
'int': lambda conf: (r'-?\d+', int, lambda x: str(int(x))),
'float': lambda conf: (r'-?[\d.]+', float, lambda x: str(float(x))),
'path': lambda conf: (r'.+?', None, None)}
def add_filter(self, name, func):
""" Add a filter. The provided function is called with the configuration
string as parameter and must return a (regexp, to_python, to_url) tuple.
The first element is a string, the last two are callables or None. """
self.filters[name] = func
rule_syntax = re.compile('(\\\\*)'
'(?:(?::([a-zA-Z_][a-zA-Z_0-9]*)?()(?:#(.*?)#)?)'
'|(?:<([a-zA-Z_][a-zA-Z_0-9]*)?(?::([a-zA-Z_]*)'
'(?::((?:\\\\.|[^\\\\>]+)+)?)?)?>))')
def _itertokens(self, rule):
offset, prefix = 0, ''
for match in self.rule_syntax.finditer(rule):
prefix += rule[offset:match.start()]
g = match.groups()
if len(g[0])%2: # Escaped wildcard
prefix += match.group(0)[len(g[0]):]
offset = match.end()
continue
if prefix:
yield prefix, None, None
name, filtr, conf = g[4:7] if g[2] is None else g[1:4]
yield name, filtr or 'default', conf or None
offset, prefix = match.end(), ''
if offset <= len(rule) or prefix:
yield prefix+rule[offset:], None, None
def add(self, rule, method, target, name=None):
""" Add a new rule or replace the target for an existing rule. """
anons = 0 # Number of anonymous wildcards found
keys = [] # Names of keys
pattern = '' # Regular expression pattern with named groups
filters = [] # Lists of wildcard input filters
builder = [] # Data structure for the URL builder
is_static = True
for key, mode, conf in self._itertokens(rule):
if mode:
is_static = False
if mode == 'default': mode = self.default_filter
mask, in_filter, out_filter = self.filters[mode](conf)
if not key:
pattern += '(?:%s)' % mask
key = 'anon%d' % anons
anons += 1
else:
pattern += '(?P<%s>%s)' % (key, mask)
keys.append(key)
if in_filter: filters.append((key, in_filter))
builder.append((key, out_filter or str))
elif key:
pattern += re.escape(key)
builder.append((None, key))
self.builder[rule] = builder
if name: self.builder[name] = builder
if is_static and not self.strict_order:
self.static.setdefault(method, {})
self.static[method][self.build(rule)] = (target, None)
return
try:
re_pattern = re.compile('^(%s)$' % pattern)
re_match = re_pattern.match
except re.error:
raise RouteSyntaxError("Could not add Route: %s (%s)" % (rule, _e()))
if filters:
def getargs(path):
url_args = re_match(path).groupdict()
for name, wildcard_filter in filters:
try:
url_args[name] = wildcard_filter(url_args[name])
except ValueError:
raise HTTPError(400, 'Path has wrong format.')
return url_args
elif re_pattern.groupindex:
def getargs(path):
return re_match(path).groupdict()
else:
getargs = None
flatpat = _re_flatten(pattern)
whole_rule = (rule, flatpat, target, getargs)
if (flatpat, method) in self._groups:
if DEBUG:
msg = 'Route <%s %s> overwrites a previously defined route'
warnings.warn(msg % (method, rule), RuntimeWarning)
self.dyna_routes[method][self._groups[flatpat, method]] = whole_rule
else:
self.dyna_routes.setdefault(method, []).append(whole_rule)
self._groups[flatpat, method] = len(self.dyna_routes[method]) - 1
self._compile(method)
def _compile(self, method):
all_rules = self.dyna_routes[method]
comborules = self.dyna_regexes[method] = []
maxgroups = self._MAX_GROUPS_PER_PATTERN
for x in range(0, len(all_rules), maxgroups):
some = all_rules[x:x+maxgroups]
combined = (flatpat for (_, flatpat, _, _) in some)
combined = '|'.join('(^%s$)' % flatpat for flatpat in combined)
combined = re.compile(combined).match
rules = [(target, getargs) for (_, _, target, getargs) in some]
comborules.append((combined, rules))
def build(self, _name, *anons, **query):
""" Build an URL by filling the wildcards in a rule. """
builder = self.builder.get(_name)
if not builder: raise RouteBuildError("No route with that name.", _name)
try:
for i, value in enumerate(anons): query['anon%d'%i] = value
url = ''.join([f(query.pop(n)) if n else f for (n,f) in builder])
return url if not query else url+'?'+urlencode(query)
except KeyError:
raise RouteBuildError('Missing URL argument: %r' % _e().args[0])
def match(self, environ):
""" Return a (target, url_args) tuple or raise HTTPError(400/404/405). """
verb = environ['REQUEST_METHOD'].upper()
path = environ['PATH_INFO'] or '/'
if verb == 'HEAD':
methods = ['PROXY', verb, 'GET', 'ANY']
else:
methods = ['PROXY', verb, 'ANY']
for method in methods:
if method in self.static and path in self.static[method]:
target, getargs = self.static[method][path]
return target, getargs(path) if getargs else {}
elif method in self.dyna_regexes:
for combined, rules in self.dyna_regexes[method]:
match = combined(path)
if match:
target, getargs = rules[match.lastindex - 1]
return target, getargs(path) if getargs else {}
# No matching route found. Collect alternative methods for 405 response
allowed = set([])
nocheck = set(methods)
for method in set(self.static) - nocheck:
if path in self.static[method]:
allowed.add(verb)
for method in set(self.dyna_regexes) - allowed - nocheck:
for combined, rules in self.dyna_regexes[method]:
match = combined(path)
if match:
allowed.add(method)
if allowed:
allow_header = ",".join(sorted(allowed))
raise HTTPError(405, "Method not allowed.", Allow=allow_header)
# No matching route and no alternative method found. We give up
raise HTTPError(404, "Not found: " + repr(path))
class Route(object):
""" This class wraps a route callback along with route specific metadata and
configuration and applies Plugins on demand. It is also responsible for
turing an URL path rule into a regular expression usable by the Router.
"""
def __init__(self, app, rule, method, callback, name=None,
plugins=None, skiplist=None, **config):
#: The application this route is installed to.
self.app = app
#: The path-rule string (e.g. ``/wiki/<page>``).
self.rule = rule
#: The HTTP method as a string (e.g. ``GET``).
self.method = method
#: The original callback with no plugins applied. Useful for introspection.
self.callback = callback
#: The name of the route (if specified) or ``None``.
self.name = name or None
#: A list of route-specific plugins (see :meth:`Bottle.route`).
self.plugins = plugins or []
#: A list of plugins to not apply to this route (see :meth:`Bottle.route`).
self.skiplist = skiplist or []
#: Additional keyword arguments passed to the :meth:`Bottle.route`
#: decorator are stored in this dictionary. Used for route-specific
#: plugin configuration and meta-data.
self.config = ConfigDict().load_dict(config)
@cached_property
def call(self):
""" The route callback with all plugins applied. This property is
created on demand and then cached to speed up subsequent requests."""
return self._make_callback()
def reset(self):
""" Forget any cached values. The next time :attr:`call` is accessed,
all plugins are re-applied. """
self.__dict__.pop('call', None)
def prepare(self):
""" Do all on-demand work immediately (useful for debugging)."""
self.call
def all_plugins(self):
""" Yield all Plugins affecting this route. """
unique = set()
for p in reversed(self.app.plugins + self.plugins):
if True in self.skiplist: break
name = getattr(p, 'name', False)
if name and (name in self.skiplist or name in unique): continue
if p in self.skiplist or type(p) in self.skiplist: continue
if name: unique.add(name)
yield p
def _make_callback(self):
callback = self.callback
for plugin in self.all_plugins():
try:
if hasattr(plugin, 'apply'):
callback = plugin.apply(callback, self)
else:
callback = plugin(callback)
except RouteReset: # Try again with changed configuration.
return self._make_callback()
if not callback is self.callback:
update_wrapper(callback, self.callback)
return callback
def get_undecorated_callback(self):
""" Return the callback. If the callback is a decorated function, try to
recover the original function. """
func = self.callback
func = getattr(func, '__func__' if py3k else 'im_func', func)
closure_attr = '__closure__' if py3k else 'func_closure'
while hasattr(func, closure_attr) and getattr(func, closure_attr):
func = getattr(func, closure_attr)[0].cell_contents
return func
def get_callback_args(self):
""" Return a list of argument names the callback (most likely) accepts
as keyword arguments. If the callback is a decorated function, try
to recover the original function before inspection. """
return getargspec(self.get_undecorated_callback())[0]
def get_config(self, key, default=None):
""" Lookup a config field and return its value, first checking the
route.config, then route.app.config."""
for conf in (self.config, self.app.conifg):
if key in conf: return conf[key]
return default
def __repr__(self):
cb = self.get_undecorated_callback()
return '<%s %r %r>' % (self.method, self.rule, cb)
###############################################################################
# Application Object ###########################################################
###############################################################################
class Bottle(object):
""" Each Bottle object represents a single, distinct web application and
consists of routes, callbacks, plugins, resources and configuration.
Instances are callable WSGI applications.
:param catchall: If true (default), handle all exceptions. Turn off to
let debugging middleware handle exceptions.
"""
def __init__(self, catchall=True, autojson=True):
#: A :class:`ConfigDict` for app specific configuration.
self.config = ConfigDict()
self.config._on_change = functools.partial(self.trigger_hook, 'config')
self.config.meta_set('autojson', 'validate', bool)
self.config.meta_set('catchall', 'validate', bool)
self.config['catchall'] = catchall
self.config['autojson'] = autojson
#: A :class:`ResourceManager` for application files
self.resources = ResourceManager()
self.routes = [] # List of installed :class:`Route` instances.
self.router = Router() # Maps requests to :class:`Route` instances.
self.error_handler = {}
# Core plugins
self.plugins = [] # List of installed plugins.
if self.config['autojson']:
self.install(JSONPlugin())
self.install(TemplatePlugin())
#: If true, most exceptions are caught and returned as :exc:`HTTPError`
catchall = DictProperty('config', 'catchall')
__hook_names = 'before_request', 'after_request', 'app_reset', 'config'
__hook_reversed = 'after_request'
@cached_property
def _hooks(self):
return dict((name, []) for name in self.__hook_names)
def add_hook(self, name, func):
""" Attach a callback to a hook. Three hooks are currently implemented:
before_request
Executed once before each request. The request context is
available, but no routing has happened yet.
after_request
Executed once after each request regardless of its outcome.
app_reset
Called whenever :meth:`Bottle.reset` is called.
"""
if name in self.__hook_reversed:
self._hooks[name].insert(0, func)
else:
self._hooks[name].append(func)
def remove_hook(self, name, func):
""" Remove a callback from a hook. """
if name in self._hooks and func in self._hooks[name]:
self._hooks[name].remove(func)
return True
def trigger_hook(self, __name, *args, **kwargs):
""" Trigger a hook and return a list of results. """
return [hook(*args, **kwargs) for hook in self._hooks[__name][:]]
def hook(self, name):
""" Return a decorator that attaches a callback to a hook. See
:meth:`add_hook` for details."""
def decorator(func):
self.add_hook(name, func)
return func
return decorator
def mount(self, prefix, app, **options):
""" Mount an application (:class:`Bottle` or plain WSGI) to a specific
URL prefix. Example::
root_app.mount('/admin/', admin_app)
:param prefix: path prefix or `mount-point`. If it ends in a slash,
that slash is mandatory.
:param app: an instance of :class:`Bottle` or a WSGI application.
All other parameters are passed to the underlying :meth:`route` call.
"""
segments = [p for p in prefix.split('/') if p]
if not segments: raise ValueError('Empty path prefix.')
path_depth = len(segments)
def mountpoint_wrapper():
try:
request.path_shift(path_depth)
rs = HTTPResponse([])
def start_response(status, headerlist, exc_info=None):
if exc_info:
_raise(*exc_info)
rs.status = status
for name, value in headerlist: rs.add_header(name, value)
return rs.body.append
body = app(request.environ, start_response)
if body and rs.body: body = itertools.chain(rs.body, body)
rs.body = body or rs.body
return rs
finally:
request.path_shift(-path_depth)
options.setdefault('skip', True)
options.setdefault('method', 'PROXY')
options.setdefault('mountpoint', {'prefix': prefix, 'target': app})
options['callback'] = mountpoint_wrapper
self.route('/%s/<:re:.*>' % '/'.join(segments), **options)
if not prefix.endswith('/'):
self.route('/' + '/'.join(segments), **options)
def merge(self, routes):
""" Merge the routes of another :class:`Bottle` application or a list of
:class:`Route` objects into this application. The routes keep their
'owner', meaning that the :data:`Route.app` attribute is not
changed. """
if isinstance(routes, Bottle):
routes = routes.routes
for route in routes:
self.add_route(route)
def install(self, plugin):
""" Add a plugin to the list of plugins and prepare it for being
applied to all routes of this application. A plugin may be a simple
decorator or an object that implements the :class:`Plugin` API.
"""
if hasattr(plugin, 'setup'): plugin.setup(self)
if not callable(plugin) and not hasattr(plugin, 'apply'):
raise TypeError("Plugins must be callable or implement .apply()")
self.plugins.append(plugin)
self.reset()
return plugin
def uninstall(self, plugin):
""" Uninstall plugins. Pass an instance to remove a specific plugin, a type
object to remove all plugins that match that type, a string to remove
all plugins with a matching ``name`` attribute or ``True`` to remove all
plugins. Return the list of removed plugins. """
removed, remove = [], plugin
for i, plugin in list(enumerate(self.plugins))[::-1]:
if remove is True or remove is plugin or remove is type(plugin) \
or getattr(plugin, 'name', True) == remove:
removed.append(plugin)
del self.plugins[i]
if hasattr(plugin, 'close'): plugin.close()
if removed: self.reset()
return removed
def reset(self, route=None):
""" Reset all routes (force plugins to be re-applied) and clear all
caches. If an ID or route object is given, only that specific route
is affected. """
if route is None: routes = self.routes
elif isinstance(route, Route): routes = [route]
else: routes = [self.routes[route]]
for route in routes: route.reset()
if DEBUG:
for route in routes: route.prepare()
self.trigger_hook('app_reset')
def close(self):
""" Close the application and all installed plugins. """
for plugin in self.plugins:
if hasattr(plugin, 'close'): plugin.close()
def run(self, **kwargs):
""" Calls :func:`run` with the same parameters. """
run(self, **kwargs)
def match(self, environ):
""" Search for a matching route and return a (:class:`Route` , urlargs)
tuple. The second value is a dictionary with parameters extracted
from the URL. Raise :exc:`HTTPError` (404/405) on a non-match."""
return self.router.match(environ)
def get_url(self, routename, **kargs):
""" Return a string that matches a named route """
scriptname = request.environ.get('SCRIPT_NAME', '').strip('/') + '/'
location = self.router.build(routename, **kargs).lstrip('/')
return urljoin(urljoin('/', scriptname), location)
def add_route(self, route):
""" Add a route object, but do not change the :data:`Route.app`
attribute."""
self.routes.append(route)
self.router.add(route.rule, route.method, route, name=route.name)
if DEBUG: route.prepare()
def route(self, path=None, method='GET', callback=None, name=None,
apply=None, skip=None, **config):
""" A decorator to bind a function to a request URL. Example::
@app.route('/hello/<name>')
def hello(name):
return 'Hello %s' % name
The ``:name`` part is a wildcard. See :class:`Router` for syntax
details.
:param path: Request path or a list of paths to listen to. If no
path is specified, it is automatically generated from the
signature of the function.
:param method: HTTP method (`GET`, `POST`, `PUT`, ...) or a list of
methods to listen to. (default: `GET`)
:param callback: An optional shortcut to avoid the decorator
syntax. ``route(..., callback=func)`` equals ``route(...)(func)``
:param name: The name for this route. (default: None)
:param apply: A decorator or plugin or a list of plugins. These are
applied to the route callback in addition to installed plugins.
:param skip: A list of plugins, plugin classes or names. Matching
plugins are not installed to this route. ``True`` skips all.
Any additional keyword arguments are stored as route-specific
configuration and passed to plugins (see :meth:`Plugin.apply`).
"""
if callable(path): path, callback = None, path
plugins = makelist(apply)
skiplist = makelist(skip)
def decorator(callback):
if isinstance(callback, basestring): callback = load(callback)
for rule in makelist(path) or yieldroutes(callback):
for verb in makelist(method):
verb = verb.upper()
route = Route(self, rule, verb, callback, name=name,
plugins=plugins, skiplist=skiplist, **config)
self.add_route(route)
return callback
return decorator(callback) if callback else decorator
def get(self, path=None, method='GET', **options):
""" Equals :meth:`route`. """
return self.route(path, method, **options)
def post(self, path=None, method='POST', **options):
""" Equals :meth:`route` with a ``POST`` method parameter. """
return self.route(path, method, **options)
def put(self, path=None, method='PUT', **options):
""" Equals :meth:`route` with a ``PUT`` method parameter. """
return self.route(path, method, **options)
def delete(self, path=None, method='DELETE', **options):
""" Equals :meth:`route` with a ``DELETE`` method parameter. """
return self.route(path, method, **options)
def patch(self, path=None, method='PATCH', **options):
""" Equals :meth:`route` with a ``PATCH`` method parameter. """
return self.route(path, method, **options)
def error(self, code=500):
""" Decorator: Register an output handler for a HTTP error code"""
def wrapper(handler):
self.error_handler[int(code)] = handler
return handler
return wrapper
def default_error_handler(self, res):
return tob(template(ERROR_PAGE_TEMPLATE, e=res))
def _handle(self, environ):
path = environ['bottle.raw_path'] = environ['PATH_INFO']
if py3k:
try:
environ['PATH_INFO'] = path.encode('latin1').decode('utf8')
except UnicodeError:
return HTTPError(400, 'Invalid path string. Expected UTF-8')
try:
environ['bottle.app'] = self
request.bind(environ)
response.bind()
try:
self.trigger_hook('before_request')
route, args = self.router.match(environ)
environ['route.handle'] = route
environ['bottle.route'] = route
environ['route.url_args'] = args
return route.call(**args)
finally:
self.trigger_hook('after_request')
except HTTPResponse:
return _e()
except RouteReset:
route.reset()
return self._handle(environ)
except (KeyboardInterrupt, SystemExit, MemoryError):
raise
except Exception:
if not self.catchall: raise
stacktrace = format_exc()
environ['wsgi.errors'].write(stacktrace)
return HTTPError(500, "Internal Server Error", _e(), stacktrace)
def _cast(self, out, peek=None):
""" Try to convert the parameter into something WSGI compatible and set
correct HTTP headers when possible.
Support: False, str, unicode, dict, HTTPResponse, HTTPError, file-like,
iterable of strings and iterable of unicodes
"""
# Empty output is done here
if not out:
if 'Content-Length' not in response:
response['Content-Length'] = 0
return []
# Join lists of byte or unicode strings. Mixed lists are NOT supported
if isinstance(out, (tuple, list))\
and isinstance(out[0], (bytes, unicode)):
out = out[0][0:0].join(out) # b'abc'[0:0] -> b''
# Encode unicode strings
if isinstance(out, unicode):
out = out.encode(response.charset)
# Byte Strings are just returned
if isinstance(out, bytes):
if 'Content-Length' not in response:
response['Content-Length'] = len(out)
return [out]
# HTTPError or HTTPException (recursive, because they may wrap anything)
# TODO: Handle these explicitly in handle() or make them iterable.
if isinstance(out, HTTPError):
out.apply(response)
out = self.error_handler.get(out.status_code, self.default_error_handler)(out)
return self._cast(out)
if isinstance(out, HTTPResponse):
out.apply(response)
return self._cast(out.body)
# File-like objects.
if hasattr(out, 'read'):
if 'wsgi.file_wrapper' in request.environ:
return request.environ['wsgi.file_wrapper'](out)
elif hasattr(out, 'close') or not hasattr(out, '__iter__'):
return WSGIFileWrapper(out)
# Handle Iterables. We peek into them to detect their inner type.
try:
iout = iter(out)
first = next(iout)
while not first:
first = next(iout)
except StopIteration:
return self._cast('')
except HTTPResponse:
first = _e()
except (KeyboardInterrupt, SystemExit, MemoryError):
raise
except:
if not self.catchall: raise
first = HTTPError(500, 'Unhandled exception', _e(), format_exc())
# These are the inner types allowed in iterator or generator objects.
if isinstance(first, HTTPResponse):
return self._cast(first)
elif isinstance(first, bytes):
new_iter = itertools.chain([first], iout)
elif isinstance(first, unicode):
encoder = lambda x: x.encode(response.charset)
new_iter = imap(encoder, itertools.chain([first], iout))
else:
msg = 'Unsupported response type: %s' % type(first)
return self._cast(HTTPError(500, msg))
if hasattr(out, 'close'):
new_iter = _closeiter(new_iter, out.close)
return new_iter
def wsgi(self, environ, start_response):
""" The bottle WSGI-interface. """
try:
out = self._cast(self._handle(environ))
# rfc2616 section 4.3
if response._status_code in (100, 101, 204, 304)\
or environ['REQUEST_METHOD'] == 'HEAD':
if hasattr(out, 'close'): out.close()
out = []
start_response(response._status_line, response.headerlist)
return out
except (KeyboardInterrupt, SystemExit, MemoryError):
raise
except:
if not self.catchall: raise
err = '<h1>Critical error while processing request: %s</h1>' \
% html_escape(environ.get('PATH_INFO', '/'))
if DEBUG:
err += '<h2>Error:</h2>\n<pre>\n%s\n</pre>\n' \
'<h2>Traceback:</h2>\n<pre>\n%s\n</pre>\n' \
% (html_escape(repr(_e())), html_escape(format_exc()))
environ['wsgi.errors'].write(err)
headers = [('Content-Type', 'text/html; charset=UTF-8')]
start_response('500 INTERNAL SERVER ERROR', headers, sys.exc_info())
return [tob(err)]
def __call__(self, environ, start_response):
""" Each instance of :class:'Bottle' is a WSGI application. """
return self.wsgi(environ, start_response)
def __enter__(self):
""" Use this application as default for all module-level shortcuts. """
default_app.push(self)
return self
def __exit__(self, exc_type, exc_value, traceback):
default_app.pop()
###############################################################################
# HTTP and WSGI Tools ##########################################################
###############################################################################
class BaseRequest(object):
""" A wrapper for WSGI environment dictionaries that adds a lot of
convenient access methods and properties. Most of them are read-only.
Adding new attributes to a request actually adds them to the environ
dictionary (as 'bottle.request.ext.<name>'). This is the recommended
way to store and access request-specific data.
"""
__slots__ = ('environ', )
#: Maximum size of memory buffer for :attr:`body` in bytes.
MEMFILE_MAX = 102400
def __init__(self, environ=None):
""" Wrap a WSGI environ dictionary. """
#: The wrapped WSGI environ dictionary. This is the only real attribute.
#: All other attributes actually are read-only properties.
self.environ = {} if environ is None else environ
self.environ['bottle.request'] = self
@DictProperty('environ', 'bottle.app', read_only=True)
def app(self):
""" Bottle application handling this request. """
raise RuntimeError('This request is not connected to an application.')
@DictProperty('environ', 'bottle.route', read_only=True)
def route(self):
""" The bottle :class:`Route` object that matches this request. """
raise RuntimeError('This request is not connected to a route.')
@DictProperty('environ', 'route.url_args', read_only=True)
def url_args(self):
""" The arguments extracted from the URL. """
raise RuntimeError('This request is not connected to a route.')
@property
def path(self):
""" The value of ``PATH_INFO`` with exactly one prefixed slash (to fix
broken clients and avoid the "empty path" edge case). """
return '/' + self.environ.get('PATH_INFO','').lstrip('/')
@property
def method(self):
""" The ``REQUEST_METHOD`` value as an uppercase string. """
return self.environ.get('REQUEST_METHOD', 'GET').upper()
@DictProperty('environ', 'bottle.request.headers', read_only=True)
def headers(self):
""" A :class:`WSGIHeaderDict` that provides case-insensitive access to
HTTP request headers. """
return WSGIHeaderDict(self.environ)
def get_header(self, name, default=None):
""" Return the value of a request header, or a given default value. """
return self.headers.get(name, default)
@DictProperty('environ', 'bottle.request.cookies', read_only=True)
def cookies(self):
""" Cookies parsed into a :class:`FormsDict`. Signed cookies are NOT
decoded. Use :meth:`get_cookie` if you expect signed cookies. """
cookies = SimpleCookie(self.environ.get('HTTP_COOKIE','')).values()
return FormsDict((c.key, c.value) for c in cookies)
def get_cookie(self, key, default=None, secret=None):
""" Return the content of a cookie. To read a `Signed Cookie`, the
`secret` must match the one used to create the cookie (see
:meth:`BaseResponse.set_cookie`). If anything goes wrong (missing
cookie or wrong signature), return a default value. """
value = self.cookies.get(key)
if secret and value:
dec = cookie_decode(value, secret) # (key, value) tuple or None
return dec[1] if dec and dec[0] == key else default
return value or default
@DictProperty('environ', 'bottle.request.query', read_only=True)
def query(self):
""" The :attr:`query_string` parsed into a :class:`FormsDict`. These
values are sometimes called "URL arguments" or "GET parameters", but
not to be confused with "URL wildcards" as they are provided by the
:class:`Router`. """
get = self.environ['bottle.get'] = FormsDict()
pairs = _parse_qsl(self.environ.get('QUERY_STRING', ''))
for key, value in pairs:
get[key] = value
return get
@DictProperty('environ', 'bottle.request.forms', read_only=True)
def forms(self):
""" Form values parsed from an `url-encoded` or `multipart/form-data`
encoded POST or PUT request body. The result is returned as a
:class:`FormsDict`. All keys and values are strings. File uploads
are stored separately in :attr:`files`. """
forms = FormsDict()
for name, item in self.POST.allitems():
if not isinstance(item, FileUpload):
forms[name] = item
return forms
@DictProperty('environ', 'bottle.request.params', read_only=True)
def params(self):
""" A :class:`FormsDict` with the combined values of :attr:`query` and
:attr:`forms`. File uploads are stored in :attr:`files`. """
params = FormsDict()
for key, value in self.query.allitems():
params[key] = value
for key, value in self.forms.allitems():
params[key] = value
return params
@DictProperty('environ', 'bottle.request.files', read_only=True)
def files(self):
""" File uploads parsed from `multipart/form-data` encoded POST or PUT
request body. The values are instances of :class:`FileUpload`.
"""
files = FormsDict()
for name, item in self.POST.allitems():
if isinstance(item, FileUpload):
files[name] = item
return files
@DictProperty('environ', 'bottle.request.json', read_only=True)
def json(self):
""" If the ``Content-Type`` header is ``application/json``, this
property holds the parsed content of the request body. Only requests
smaller than :attr:`MEMFILE_MAX` are processed to avoid memory
exhaustion. """
ctype = self.environ.get('CONTENT_TYPE', '').lower().split(';')[0]
if ctype == 'application/json':
b = self._get_body_string()
if not b:
return None
return json_loads(b)
return None
def _iter_body(self, read, bufsize):
maxread = max(0, self.content_length)
while maxread:
part = read(min(maxread, bufsize))
if not part: break
yield part
maxread -= len(part)
@staticmethod
def _iter_chunked(read, bufsize):
err = HTTPError(400, 'Error while parsing chunked transfer body.')
rn, sem, bs = tob('\r\n'), tob(';'), tob('')
while True:
header = read(1)
while header[-2:] != rn:
c = read(1)
header += c
if not c: raise err
if len(header) > bufsize: raise err
size, _, _ = header.partition(sem)
try:
maxread = int(tonat(size.strip()), 16)
except ValueError:
raise err
if maxread == 0: break
buff = bs
while maxread > 0:
if not buff:
buff = read(min(maxread, bufsize))
part, buff = buff[:maxread], buff[maxread:]
if not part: raise err
yield part
maxread -= len(part)
if read(2) != rn:
raise err
@DictProperty('environ', 'bottle.request.body', read_only=True)
def _body(self):
body_iter = self._iter_chunked if self.chunked else self._iter_body
read_func = self.environ['wsgi.input'].read
body, body_size, is_temp_file = BytesIO(), 0, False
for part in body_iter(read_func, self.MEMFILE_MAX):
body.write(part)
body_size += len(part)
if not is_temp_file and body_size > self.MEMFILE_MAX:
body, tmp = TemporaryFile(mode='w+b'), body
body.write(tmp.getvalue())
del tmp
is_temp_file = True
self.environ['wsgi.input'] = body
body.seek(0)
return body
def _get_body_string(self):
""" read body until content-length or MEMFILE_MAX into a string. Raise
HTTPError(413) on requests that are to large. """
clen = self.content_length
if clen > self.MEMFILE_MAX:
raise HTTPError(413, 'Request too large')
if clen < 0: clen = self.MEMFILE_MAX + 1
data = self.body.read(clen)
if len(data) > self.MEMFILE_MAX: # Fail fast
raise HTTPError(413, 'Request too large')
return data
@property
def body(self):
""" The HTTP request body as a seek-able file-like object. Depending on
:attr:`MEMFILE_MAX`, this is either a temporary file or a
:class:`io.BytesIO` instance. Accessing this property for the first
time reads and replaces the ``wsgi.input`` environ variable.
Subsequent accesses just do a `seek(0)` on the file object. """
self._body.seek(0)
return self._body
@property
def chunked(self):
""" True if Chunked transfer encoding was. """
return 'chunked' in self.environ.get('HTTP_TRANSFER_ENCODING', '').lower()
#: An alias for :attr:`query`.
GET = query
@DictProperty('environ', 'bottle.request.post', read_only=True)
def POST(self):
""" The values of :attr:`forms` and :attr:`files` combined into a single
:class:`FormsDict`. Values are either strings (form values) or
instances of :class:`cgi.FieldStorage` (file uploads).
"""
post = FormsDict()
# We default to application/x-www-form-urlencoded for everything that
# is not multipart and take the fast path (also: 3.1 workaround)
if not self.content_type.startswith('multipart/'):
pairs = _parse_qsl(tonat(self._get_body_string(), 'latin1'))
for key, value in pairs:
post[key] = value
return post
safe_env = {'QUERY_STRING':''} # Build a safe environment for cgi
for key in ('REQUEST_METHOD', 'CONTENT_TYPE', 'CONTENT_LENGTH'):
if key in self.environ: safe_env[key] = self.environ[key]
args = dict(fp=self.body, environ=safe_env, keep_blank_values=True)
if py31:
args['fp'] = NCTextIOWrapper(args['fp'], encoding='utf8',
newline='\n')
elif py3k:
args['encoding'] = 'utf8'
data = cgi.FieldStorage(**args)
self['_cgi.FieldStorage'] = data #http://bugs.python.org/issue18394#msg207958
data = data.list or []
for item in data:
if item.filename:
post[item.name] = FileUpload(item.file, item.name,
item.filename, item.headers)
else:
post[item.name] = item.value
return post
@property
def url(self):
""" The full request URI including hostname and scheme. If your app
lives behind a reverse proxy or load balancer and you get confusing
results, make sure that the ``X-Forwarded-Host`` header is set
correctly. """
return self.urlparts.geturl()
@DictProperty('environ', 'bottle.request.urlparts', read_only=True)
def urlparts(self):
""" The :attr:`url` string as an :class:`urlparse.SplitResult` tuple.
The tuple contains (scheme, host, path, query_string and fragment),
but the fragment is always empty because it is not visible to the
server. """
env = self.environ
http = env.get('HTTP_X_FORWARDED_PROTO') or env.get('wsgi.url_scheme', 'http')
host = env.get('HTTP_X_FORWARDED_HOST') or env.get('HTTP_HOST')
if not host:
# HTTP 1.1 requires a Host-header. This is for HTTP/1.0 clients.
host = env.get('SERVER_NAME', '127.0.0.1')
port = env.get('SERVER_PORT')
if port and port != ('80' if http == 'http' else '443'):
host += ':' + port
path = urlquote(self.fullpath)
return UrlSplitResult(http, host, path, env.get('QUERY_STRING'), '')
@property
def fullpath(self):
""" Request path including :attr:`script_name` (if present). """
return urljoin(self.script_name, self.path.lstrip('/'))
@property
def query_string(self):
""" The raw :attr:`query` part of the URL (everything in between ``?``
and ``#``) as a string. """
return self.environ.get('QUERY_STRING', '')
@property
def script_name(self):
""" The initial portion of the URL's `path` that was removed by a higher
level (server or routing middleware) before the application was
called. This script path is returned with leading and tailing
slashes. """
script_name = self.environ.get('SCRIPT_NAME', '').strip('/')
return '/' + script_name + '/' if script_name else '/'
def path_shift(self, shift=1):
""" Shift path segments from :attr:`path` to :attr:`script_name` and
vice versa.
:param shift: The number of path segments to shift. May be negative
to change the shift direction. (default: 1)
"""
script = self.environ.get('SCRIPT_NAME','/')
self['SCRIPT_NAME'], self['PATH_INFO'] = path_shift(script, self.path, shift)
@property
def content_length(self):
""" The request body length as an integer. The client is responsible to
set this header. Otherwise, the real length of the body is unknown
and -1 is returned. In this case, :attr:`body` will be empty. """
return int(self.environ.get('CONTENT_LENGTH') or -1)
@property
def content_type(self):
""" The Content-Type header as a lowercase-string (default: empty). """
return self.environ.get('CONTENT_TYPE', '').lower()
@property
def is_xhr(self):
""" True if the request was triggered by a XMLHttpRequest. This only
works with JavaScript libraries that support the `X-Requested-With`
header (most of the popular libraries do). """
requested_with = self.environ.get('HTTP_X_REQUESTED_WITH','')
return requested_with.lower() == 'xmlhttprequest'
@property
def is_ajax(self):
""" Alias for :attr:`is_xhr`. "Ajax" is not the right term. """
return self.is_xhr
@property
def auth(self):
""" HTTP authentication data as a (user, password) tuple. This
implementation currently supports basic (not digest) authentication
only. If the authentication happened at a higher level (e.g. in the
front web-server or a middleware), the password field is None, but
the user field is looked up from the ``REMOTE_USER`` environ
variable. On any errors, None is returned. """
basic = parse_auth(self.environ.get('HTTP_AUTHORIZATION',''))
if basic: return basic
ruser = self.environ.get('REMOTE_USER')
if ruser: return (ruser, None)
return None
@property
def remote_route(self):
""" A list of all IPs that were involved in this request, starting with
the client IP and followed by zero or more proxies. This does only
work if all proxies support the ```X-Forwarded-For`` header. Note
that this information can be forged by malicious clients. """
proxy = self.environ.get('HTTP_X_FORWARDED_FOR')
if proxy: return [ip.strip() for ip in proxy.split(',')]
remote = self.environ.get('REMOTE_ADDR')
return [remote] if remote else []
@property
def remote_addr(self):
""" The client IP as a string. Note that this information can be forged
by malicious clients. """
route = self.remote_route
return route[0] if route else None
def copy(self):
""" Return a new :class:`Request` with a shallow :attr:`environ` copy. """
return Request(self.environ.copy())
def get(self, value, default=None): return self.environ.get(value, default)
def __getitem__(self, key): return self.environ[key]
def __delitem__(self, key): self[key] = ""; del(self.environ[key])
def __iter__(self): return iter(self.environ)
def __len__(self): return len(self.environ)
def keys(self): return self.environ.keys()
def __setitem__(self, key, value):
""" Change an environ value and clear all caches that depend on it. """
if self.environ.get('bottle.request.readonly'):
raise KeyError('The environ dictionary is read-only.')
self.environ[key] = value
todelete = ()
if key == 'wsgi.input':
todelete = ('body', 'forms', 'files', 'params', 'post', 'json')
elif key == 'QUERY_STRING':
todelete = ('query', 'params')
elif key.startswith('HTTP_'):
todelete = ('headers', 'cookies')
for key in todelete:
self.environ.pop('bottle.request.'+key, None)
def __repr__(self):
return '<%s: %s %s>' % (self.__class__.__name__, self.method, self.url)
def __getattr__(self, name):
""" Search in self.environ for additional user defined attributes. """
try:
var = self.environ['bottle.request.ext.%s'%name]
return var.__get__(self) if hasattr(var, '__get__') else var
except KeyError:
raise AttributeError('Attribute %r not defined.' % name)
def __setattr__(self, name, value):
if name == 'environ': return object.__setattr__(self, name, value)
self.environ['bottle.request.ext.%s'%name] = value
def _hkey(s):
return s.title().replace('_','-')
class HeaderProperty(object):
def __init__(self, name, reader=None, writer=str, default=''):
self.name, self.default = name, default
self.reader, self.writer = reader, writer
self.__doc__ = 'Current value of the %r header.' % name.title()
def __get__(self, obj, _):
if obj is None: return self
value = obj.headers.get(self.name, self.default)
return self.reader(value) if self.reader else value
def __set__(self, obj, value):
obj.headers[self.name] = self.writer(value)
def __delete__(self, obj):
del obj.headers[self.name]
class BaseResponse(object):
""" Storage class for a response body as well as headers and cookies.
This class does support dict-like case-insensitive item-access to
headers, but is NOT a dict. Most notably, iterating over a response
yields parts of the body and not the headers.
:param body: The response body as one of the supported types.
:param status: Either an HTTP status code (e.g. 200) or a status line
including the reason phrase (e.g. '200 OK').
:param headers: A dictionary or a list of name-value pairs.
Additional keyword arguments are added to the list of headers.
Underscores in the header name are replaced with dashes.
"""
default_status = 200
default_content_type = 'text/html; charset=UTF-8'
# Header blacklist for specific response codes
# (rfc2616 section 10.2.3 and 10.3.5)
bad_headers = {
204: set(('Content-Type',)),
304: set(('Allow', 'Content-Encoding', 'Content-Language',
'Content-Length', 'Content-Range', 'Content-Type',
'Content-Md5', 'Last-Modified'))}
def __init__(self, body='', status=None, headers=None, **more_headers):
self._cookies = None
self._headers = {}
self.body = body
self.status = status or self.default_status
if headers:
if isinstance(headers, dict):
headers = headers.items()
for name, value in headers:
self.add_header(name, value)
if more_headers:
for name, value in more_headers.items():
self.add_header(name, value)
def copy(self, cls=None):
""" Returns a copy of self. """
cls = cls or BaseResponse
assert issubclass(cls, BaseResponse)
copy = cls()
copy.status = self.status
copy._headers = dict((k, v[:]) for (k, v) in self._headers.items())
if self._cookies:
copy._cookies = SimpleCookie()
copy._cookies.load(self._cookies.output())
return copy
def __iter__(self):
return iter(self.body)
def close(self):
if hasattr(self.body, 'close'):
self.body.close()
@property
def status_line(self):
""" The HTTP status line as a string (e.g. ``404 Not Found``)."""
return self._status_line
@property
def status_code(self):
""" The HTTP status code as an integer (e.g. 404)."""
return self._status_code
def _set_status(self, status):
if isinstance(status, int):
code, status = status, _HTTP_STATUS_LINES.get(status)
elif ' ' in status:
status = status.strip()
code = int(status.split()[0])
else:
raise ValueError('String status line without a reason phrase.')
if not 100 <= code <= 999: raise ValueError('Status code out of range.')
self._status_code = code
self._status_line = str(status or ('%d Unknown' % code))
def _get_status(self):
return self._status_line
status = property(_get_status, _set_status, None,
''' A writeable property to change the HTTP response status. It accepts
either a numeric code (100-999) or a string with a custom reason
phrase (e.g. "404 Brain not found"). Both :data:`status_line` and
:data:`status_code` are updated accordingly. The return value is
always a status string. ''')
del _get_status, _set_status
@property
def headers(self):
""" An instance of :class:`HeaderDict`, a case-insensitive dict-like
view on the response headers. """
hdict = HeaderDict()
hdict.dict = self._headers
return hdict
def __contains__(self, name): return _hkey(name) in self._headers
def __delitem__(self, name): del self._headers[_hkey(name)]
def __getitem__(self, name): return self._headers[_hkey(name)][-1]
def __setitem__(self, name, value): self._headers[_hkey(name)] = [str(value)]
def get_header(self, name, default=None):
""" Return the value of a previously defined header. If there is no
header with that name, return a default value. """
return self._headers.get(_hkey(name), [default])[-1]
def set_header(self, name, value):
""" Create a new response header, replacing any previously defined
headers with the same name. """
self._headers[_hkey(name)] = [value if isinstance(value, unicode) else str(value)]
def add_header(self, name, value):
""" Add an additional response header, not removing duplicates. """
self._headers.setdefault(_hkey(name), []).append(str(value))
def iter_headers(self):
""" Yield (header, value) tuples, skipping headers that are not
allowed with the current response status code. """
return self.headerlist
@property
def headerlist(self):
""" WSGI conform list of (header, value) tuples. """
out = []
headers = list(self._headers.items())
if 'Content-Type' not in self._headers:
headers.append(('Content-Type', [self.default_content_type]))
if self._status_code in self.bad_headers:
bad_headers = self.bad_headers[self._status_code]
headers = [h for h in headers if h[0] not in bad_headers]
out += [(name, val) for (name, vals) in headers for val in vals]
if self._cookies:
for c in self._cookies.values():
out.append(('Set-Cookie', c.OutputString()))
if py3k:
out = [
(k, v.encode('utf8').decode('latin1')
if isinstance(v, unicode) else v) for (k, v) in out]
return out
content_type = HeaderProperty('Content-Type')
content_length = HeaderProperty('Content-Length', reader=int)
expires = HeaderProperty('Expires',
reader=lambda x: datetime.utcfromtimestamp(parse_date(x)),
writer=lambda x: http_date(x))
@property
def charset(self, default='UTF-8'):
""" Return the charset specified in the content-type header (default: utf8). """
if 'charset=' in self.content_type:
return self.content_type.split('charset=')[-1].split(';')[0].strip()
return default
def set_cookie(self, name, value, secret=None, **options):
""" Create a new cookie or replace an old one. If the `secret` parameter is
set, create a `Signed Cookie` (described below).
:param name: the name of the cookie.
:param value: the value of the cookie.
:param secret: a signature key required for signed cookies.
Additionally, this method accepts all RFC 2109 attributes that are
supported by :class:`cookie.Morsel`, including:
:param max_age: maximum age in seconds. (default: None)
:param expires: a datetime object or UNIX timestamp. (default: None)
:param domain: the domain that is allowed to read the cookie.
(default: current domain)
:param path: limits the cookie to a given path (default: current path)
:param secure: limit the cookie to HTTPS connections (default: off).
:param httponly: prevents client-side javascript to read this cookie
(default: off, requires Python 2.6 or newer).
If neither `expires` nor `max_age` is set (default), the cookie will
expire at the end of the browser session (as soon as the browser
window is closed).
Signed cookies may store any pickle-able object and are
cryptographically signed to prevent manipulation. Keep in mind that
cookies are limited to 4kb in most browsers.
Warning: Signed cookies are not encrypted (the client can still see
the content) and not copy-protected (the client can restore an old
cookie). The main intention is to make pickling and unpickling
save, not to store secret information at client side.
"""
if not self._cookies:
self._cookies = SimpleCookie()
if secret:
value = touni(cookie_encode((name, value), secret))
elif not isinstance(value, basestring):
raise TypeError('Secret key missing for non-string Cookie.')
if len(value) > 4096: raise ValueError('Cookie value to long.')
self._cookies[name] = value
for key, value in options.items():
if key == 'max_age':
if isinstance(value, timedelta):
value = value.seconds + value.days * 24 * 3600
if key == 'expires':
if isinstance(value, (datedate, datetime)):
value = value.timetuple()
elif isinstance(value, (int, float)):
value = time.gmtime(value)
value = time.strftime("%a, %d %b %Y %H:%M:%S GMT", value)
self._cookies[name][key.replace('_', '-')] = value
def delete_cookie(self, key, **kwargs):
""" Delete a cookie. Be sure to use the same `domain` and `path`
settings as used to create the cookie. """
kwargs['max_age'] = -1
kwargs['expires'] = 0
self.set_cookie(key, '', **kwargs)
def __repr__(self):
out = ''
for name, value in self.headerlist:
out += '%s: %s\n' % (name.title(), value.strip())
return out
def _local_property():
ls = threading.local()
def fget(_):
try: return ls.var
except AttributeError:
raise RuntimeError("Request context not initialized.")
def fset(_, value): ls.var = value
def fdel(_): del ls.var
return property(fget, fset, fdel, 'Thread-local property')
class LocalRequest(BaseRequest):
""" A thread-local subclass of :class:`BaseRequest` with a different
set of attributes for each thread. There is usually only one global
instance of this class (:data:`request`). If accessed during a
request/response cycle, this instance always refers to the *current*
request (even on a multithreaded server). """
bind = BaseRequest.__init__
environ = _local_property()
class LocalResponse(BaseResponse):
""" A thread-local subclass of :class:`BaseResponse` with a different
set of attributes for each thread. There is usually only one global
instance of this class (:data:`response`). Its attributes are used
to build the HTTP response at the end of the request/response cycle.
"""
bind = BaseResponse.__init__
_status_line = _local_property()
_status_code = _local_property()
_cookies = _local_property()
_headers = _local_property()
body = _local_property()
Request = BaseRequest
Response = BaseResponse
class HTTPResponse(Response, BottleException):
def __init__(self, body='', status=None, headers=None, **more_headers):
super(HTTPResponse, self).__init__(body, status, headers, **more_headers)
def apply(self, other):
other._status_code = self._status_code
other._status_line = self._status_line
other._headers = self._headers
other._cookies = self._cookies
other.body = self.body
class HTTPError(HTTPResponse):
default_status = 500
def __init__(self, status=None, body=None, exception=None, traceback=None,
**options):
self.exception = exception
self.traceback = traceback
super(HTTPError, self).__init__(body, status, **options)
###############################################################################
# Plugins ######################################################################
###############################################################################
class PluginError(BottleException): pass
class JSONPlugin(object):
name = 'json'
api = 2
def __init__(self, json_dumps=json_dumps):
self.json_dumps = json_dumps
def apply(self, callback, _):
dumps = self.json_dumps
if not dumps: return callback
def wrapper(*a, **ka):
try:
rv = callback(*a, **ka)
except HTTPError:
rv = _e()
if isinstance(rv, dict):
#Attempt to serialize, raises exception on failure
json_response = dumps(rv)
#Set content type only if serialization successful
response.content_type = 'application/json'
return json_response
elif isinstance(rv, HTTPResponse) and isinstance(rv.body, dict):
rv.body = dumps(rv.body)
rv.content_type = 'application/json'
return rv
return wrapper
class TemplatePlugin(object):
""" This plugin applies the :func:`view` decorator to all routes with a
`template` config parameter. If the parameter is a tuple, the second
element must be a dict with additional options (e.g. `template_engine`)
or default variables for the template. """
name = 'template'
api = 2
def apply(self, callback, route):
conf = route.config.get('template')
if isinstance(conf, (tuple, list)) and len(conf) == 2:
return view(conf[0], **conf[1])(callback)
elif isinstance(conf, str):
return view(conf)(callback)
else:
return callback
#: Not a plugin, but part of the plugin API. TODO: Find a better place.
class _ImportRedirect(object):
def __init__(self, name, impmask):
""" Create a virtual package that redirects imports (see PEP 302). """
self.name = name
self.impmask = impmask
self.module = sys.modules.setdefault(name, imp.new_module(name))
self.module.__dict__.update({'__file__': __file__, '__path__': [],
'__all__': [], '__loader__': self})
sys.meta_path.append(self)
def find_module(self, fullname, path=None):
if '.' not in fullname: return
packname = fullname.rsplit('.', 1)[0]
if packname != self.name: return
return self
def load_module(self, fullname):
if fullname in sys.modules: return sys.modules[fullname]
modname = fullname.rsplit('.', 1)[1]
realname = self.impmask % modname
__import__(realname)
module = sys.modules[fullname] = sys.modules[realname]
setattr(self.module, modname, module)
module.__loader__ = self
return module
###############################################################################
# Common Utilities #############################################################
###############################################################################
class MultiDict(DictMixin):
""" This dict stores multiple values per key, but behaves exactly like a
normal dict in that it returns only the newest value for any given key.
There are special methods available to access the full list of values.
"""
def __init__(self, *a, **k):
self.dict = dict((k, [v]) for (k, v) in dict(*a, **k).items())
def __len__(self): return len(self.dict)
def __iter__(self): return iter(self.dict)
def __contains__(self, key): return key in self.dict
def __delitem__(self, key): del self.dict[key]
def __getitem__(self, key): return self.dict[key][-1]
def __setitem__(self, key, value): self.append(key, value)
def keys(self): return self.dict.keys()
if py3k:
def values(self): return (v[-1] for v in self.dict.values())
def items(self): return ((k, v[-1]) for k, v in self.dict.items())
def allitems(self):
return ((k, v) for k, vl in self.dict.items() for v in vl)
iterkeys = keys
itervalues = values
iteritems = items
iterallitems = allitems
else:
def values(self): return [v[-1] for v in self.dict.values()]
def items(self): return [(k, v[-1]) for k, v in self.dict.items()]
def iterkeys(self): return self.dict.iterkeys()
def itervalues(self): return (v[-1] for v in self.dict.itervalues())
def iteritems(self):
return ((k, v[-1]) for k, v in self.dict.iteritems())
def iterallitems(self):
return ((k, v) for k, vl in self.dict.iteritems() for v in vl)
def allitems(self):
return [(k, v) for k, vl in self.dict.iteritems() for v in vl]
def get(self, key, default=None, index=-1, type=None):
""" Return the most recent value for a key.
:param default: The default value to be returned if the key is not
present or the type conversion fails.
:param index: An index for the list of available values.
:param type: If defined, this callable is used to cast the value
into a specific type. Exception are suppressed and result in
the default value to be returned.
"""
try:
val = self.dict[key][index]
return type(val) if type else val
except Exception:
pass
return default
def append(self, key, value):
""" Add a new value to the list of values for this key. """
self.dict.setdefault(key, []).append(value)
def replace(self, key, value):
""" Replace the list of values with a single value. """
self.dict[key] = [value]
def getall(self, key):
""" Return a (possibly empty) list of values for a key. """
return self.dict.get(key) or []
#: Aliases for WTForms to mimic other multi-dict APIs (Django)
getone = get
getlist = getall
class FormsDict(MultiDict):
""" This :class:`MultiDict` subclass is used to store request form data.
Additionally to the normal dict-like item access methods (which return
unmodified data as native strings), this container also supports
attribute-like access to its values. Attributes are automatically de-
or recoded to match :attr:`input_encoding` (default: 'utf8'). Missing
attributes default to an empty string. """
#: Encoding used for attribute values.
input_encoding = 'utf8'
#: If true (default), unicode strings are first encoded with `latin1`
#: and then decoded to match :attr:`input_encoding`.
recode_unicode = True
def _fix(self, s, encoding=None):
if isinstance(s, unicode) and self.recode_unicode: # Python 3 WSGI
return s.encode('latin1').decode(encoding or self.input_encoding)
elif isinstance(s, bytes): # Python 2 WSGI
return s.decode(encoding or self.input_encoding)
else:
return s
def decode(self, encoding=None):
""" Returns a copy with all keys and values de- or recoded to match
:attr:`input_encoding`. Some libraries (e.g. WTForms) want a
unicode dictionary. """
copy = FormsDict()
enc = copy.input_encoding = encoding or self.input_encoding
copy.recode_unicode = False
for key, value in self.allitems():
copy.append(self._fix(key, enc), self._fix(value, enc))
return copy
def getunicode(self, name, default=None, encoding=None):
""" Return the value as a unicode string, or the default. """
try:
return self._fix(self[name], encoding)
except (UnicodeError, KeyError):
return default
def __getattr__(self, name, default=unicode()):
# Without this guard, pickle generates a cryptic TypeError:
if name.startswith('__') and name.endswith('__'):
return super(FormsDict, self).__getattr__(name)
return self.getunicode(name, default=default)
class HeaderDict(MultiDict):
""" A case-insensitive version of :class:`MultiDict` that defaults to
replace the old value instead of appending it. """
def __init__(self, *a, **ka):
self.dict = {}
if a or ka: self.update(*a, **ka)
def __contains__(self, key): return _hkey(key) in self.dict
def __delitem__(self, key): del self.dict[_hkey(key)]
def __getitem__(self, key): return self.dict[_hkey(key)][-1]
def __setitem__(self, key, value): self.dict[_hkey(key)] = [str(value)]
def append(self, key, value):
self.dict.setdefault(_hkey(key), []).append(str(value))
def replace(self, key, value): self.dict[_hkey(key)] = [str(value)]
def getall(self, key): return self.dict.get(_hkey(key)) or []
def get(self, key, default=None, index=-1):
return MultiDict.get(self, _hkey(key), default, index)
def filter(self, names):
for name in [_hkey(n) for n in names]:
if name in self.dict:
del self.dict[name]
class WSGIHeaderDict(DictMixin):
""" This dict-like class wraps a WSGI environ dict and provides convenient
access to HTTP_* fields. Keys and values are native strings
(2.x bytes or 3.x unicode) and keys are case-insensitive. If the WSGI
environment contains non-native string values, these are de- or encoded
using a lossless 'latin1' character set.
The API will remain stable even on changes to the relevant PEPs.
Currently PEP 333, 444 and 3333 are supported. (PEP 444 is the only one
that uses non-native strings.)
"""
#: List of keys that do not have a ``HTTP_`` prefix.
cgikeys = ('CONTENT_TYPE', 'CONTENT_LENGTH')
def __init__(self, environ):
self.environ = environ
def _ekey(self, key):
""" Translate header field name to CGI/WSGI environ key. """
key = key.replace('-','_').upper()
if key in self.cgikeys:
return key
return 'HTTP_' + key
def raw(self, key, default=None):
""" Return the header value as is (may be bytes or unicode). """
return self.environ.get(self._ekey(key), default)
def __getitem__(self, key):
val = self.environ[self._ekey(key)]
if py3k:
if isinstance(val, unicode):
val = val.encode('latin1').decode('utf8')
else:
val = val.decode('utf8')
return val
def __setitem__(self, key, value):
raise TypeError("%s is read-only." % self.__class__)
def __delitem__(self, key):
raise TypeError("%s is read-only." % self.__class__)
def __iter__(self):
for key in self.environ:
if key[:5] == 'HTTP_':
yield _hkey(key[5:])
elif key in self.cgikeys:
yield _hkey(key)
def keys(self): return [x for x in self]
def __len__(self): return len(self.keys())
def __contains__(self, key): return self._ekey(key) in self.environ
class ConfigDict(dict):
""" A dict-like configuration storage with additional support for
namespaces, validators, meta-data, on_change listeners and more.
"""
__slots__ = ('_meta', '_on_change')
def __init__(self):
self._meta = {}
self._on_change = lambda name, value: None
def load_config(self, filename):
""" Load values from an ``*.ini`` style config file.
If the config file contains sections, their names are used as
namespaces for the values within. The two special sections
``DEFAULT`` and ``bottle`` refer to the root namespace (no prefix).
"""
conf = ConfigParser()
conf.read(filename)
for section in conf.sections():
for key, value in conf.items(section):
if section not in ('DEFAULT', 'bottle'):
key = section + '.' + key
self[key] = value
return self
def load_dict(self, source, namespace=''):
""" Load values from a dictionary structure. Nesting can be used to
represent namespaces.
>>> c = ConfigDict()
>>> c.load_dict({'some': {'namespace': {'key': 'value'} } })
{'some.namespace.key': 'value'}
"""
for key, value in source.items():
if isinstance(key, str):
nskey = (namespace + '.' + key).strip('.')
if isinstance(value, dict):
self.load_dict(value, namespace=nskey)
else:
self[nskey] = value
else:
raise TypeError('Key has type %r (not a string)' % type(key))
return self
def update(self, *a, **ka):
""" If the first parameter is a string, all keys are prefixed with this
namespace. Apart from that it works just as the usual dict.update().
Example: ``update('some.namespace', key='value')`` """
prefix = ''
if a and isinstance(a[0], str):
prefix = a[0].strip('.') + '.'
a = a[1:]
for key, value in dict(*a, **ka).items():
self[prefix+key] = value
def setdefault(self, key, value):
if key not in self:
self[key] = value
return self[key]
def __setitem__(self, key, value):
if not isinstance(key, str):
raise TypeError('Key has type %r (not a string)' % type(key))
value = self.meta_get(key, 'filter', lambda x: x)(value)
if key in self and self[key] is value:
return
self._on_change(key, value)
dict.__setitem__(self, key, value)
def __delitem__(self, key):
self._on_change(key, None)
dict.__delitem__(self, key)
def meta_get(self, key, metafield, default=None):
""" Return the value of a meta field for a key. """
return self._meta.get(key, {}).get(metafield, default)
def meta_set(self, key, metafield, value):
""" Set the meta field for a key to a new value. This triggers the
on-change handler for existing keys. """
self._meta.setdefault(key, {})[metafield] = value
if key in self:
self[key] = self[key]
def meta_list(self, key):
""" Return an iterable of meta field names defined for a key. """
return self._meta.get(key, {}).keys()
class AppStack(list):
""" A stack-like list. Calling it returns the head of the stack. """
def __call__(self):
""" Return the current default application. """
return self[-1]
def push(self, value=None):
""" Add a new :class:`Bottle` instance to the stack """
if not isinstance(value, Bottle):
value = Bottle()
self.append(value)
return value
class WSGIFileWrapper(object):
def __init__(self, fp, buffer_size=1024*64):
self.fp, self.buffer_size = fp, buffer_size
for attr in ('fileno', 'close', 'read', 'readlines', 'tell', 'seek'):
if hasattr(fp, attr): setattr(self, attr, getattr(fp, attr))
def __iter__(self):
buff, read = self.buffer_size, self.read
while True:
part = read(buff)
if not part: return
yield part
class _closeiter(object):
""" This only exists to be able to attach a .close method to iterators that
do not support attribute assignment (most of itertools). """
def __init__(self, iterator, close=None):
self.iterator = iterator
self.close_callbacks = makelist(close)
def __iter__(self):
return iter(self.iterator)
def close(self):
for func in self.close_callbacks:
func()
class ResourceManager(object):
""" This class manages a list of search paths and helps to find and open
application-bound resources (files).
:param base: default value for :meth:`add_path` calls.
:param opener: callable used to open resources.
:param cachemode: controls which lookups are cached. One of 'all',
'found' or 'none'.
"""
def __init__(self, base='./', opener=open, cachemode='all'):
self.opener = opener
self.base = base
self.cachemode = cachemode
#: A list of search paths. See :meth:`add_path` for details.
self.path = []
#: A cache for resolved paths. ``res.cache.clear()`` clears the cache.
self.cache = {}
def add_path(self, path, base=None, index=None, create=False):
""" Add a new path to the list of search paths. Return False if the
path does not exist.
:param path: The new search path. Relative paths are turned into
an absolute and normalized form. If the path looks like a file
(not ending in `/`), the filename is stripped off.
:param base: Path used to absolutize relative search paths.
Defaults to :attr:`base` which defaults to ``os.getcwd()``.
:param index: Position within the list of search paths. Defaults
to last index (appends to the list).
The `base` parameter makes it easy to reference files installed
along with a python module or package::
res.add_path('./resources/', __file__)
"""
base = os.path.abspath(os.path.dirname(base or self.base))
path = os.path.abspath(os.path.join(base, os.path.dirname(path)))
path += os.sep
if path in self.path:
self.path.remove(path)
if create and not os.path.isdir(path):
os.makedirs(path)
if index is None:
self.path.append(path)
else:
self.path.insert(index, path)
self.cache.clear()
return os.path.exists(path)
def __iter__(self):
""" Iterate over all existing files in all registered paths. """
search = self.path[:]
while search:
path = search.pop()
if not os.path.isdir(path): continue
for name in os.listdir(path):
full = os.path.join(path, name)
if os.path.isdir(full): search.append(full)
else: yield full
def lookup(self, name):
""" Search for a resource and return an absolute file path, or `None`.
The :attr:`path` list is searched in order. The first match is
returend. Symlinks are followed. The result is cached to speed up
future lookups. """
if name not in self.cache or DEBUG:
for path in self.path:
fpath = os.path.join(path, name)
if os.path.isfile(fpath):
if self.cachemode in ('all', 'found'):
self.cache[name] = fpath
return fpath
if self.cachemode == 'all':
self.cache[name] = None
return self.cache[name]
def open(self, name, mode='r', *args, **kwargs):
""" Find a resource and return a file object, or raise IOError. """
fname = self.lookup(name)
if not fname: raise IOError("Resource %r not found." % name)
return self.opener(fname, mode=mode, *args, **kwargs)
class FileUpload(object):
def __init__(self, fileobj, name, filename, headers=None):
""" Wrapper for file uploads. """
#: Open file(-like) object (BytesIO buffer or temporary file)
self.file = fileobj
#: Name of the upload form field
self.name = name
#: Raw filename as sent by the client (may contain unsafe characters)
self.raw_filename = filename
#: A :class:`HeaderDict` with additional headers (e.g. content-type)
self.headers = HeaderDict(headers) if headers else HeaderDict()
content_type = HeaderProperty('Content-Type')
content_length = HeaderProperty('Content-Length', reader=int, default=-1)
@cached_property
def filename(self):
""" Name of the file on the client file system, but normalized to ensure
file system compatibility. An empty filename is returned as 'empty'.
Only ASCII letters, digits, dashes, underscores and dots are
allowed in the final filename. Accents are removed, if possible.
Whitespace is replaced by a single dash. Leading or tailing dots
or dashes are removed. The filename is limited to 255 characters.
"""
fname = self.raw_filename
if not isinstance(fname, unicode):
fname = fname.decode('utf8', 'ignore')
fname = normalize('NFKD', fname).encode('ASCII', 'ignore').decode('ASCII')
fname = os.path.basename(fname.replace('\\', os.path.sep))
fname = re.sub(r'[^a-zA-Z0-9-_.\s]', '', fname).strip()
fname = re.sub(r'[-\s]+', '-', fname).strip('.-')
return fname[:255] or 'empty'
def _copy_file(self, fp, chunk_size=2**16):
read, write, offset = self.file.read, fp.write, self.file.tell()
while 1:
buf = read(chunk_size)
if not buf: break
write(buf)
self.file.seek(offset)
def save(self, destination, overwrite=False, chunk_size=2**16):
""" Save file to disk or copy its content to an open file(-like) object.
If *destination* is a directory, :attr:`filename` is added to the
path. Existing files are not overwritten by default (IOError).
:param destination: File path, directory or file(-like) object.
:param overwrite: If True, replace existing files. (default: False)
:param chunk_size: Bytes to read at a time. (default: 64kb)
"""
if isinstance(destination, basestring): # Except file-likes here
if os.path.isdir(destination):
destination = os.path.join(destination, self.filename)
if not overwrite and os.path.exists(destination):
raise IOError('File exists.')
with open(destination, 'wb') as fp:
self._copy_file(fp, chunk_size)
else:
self._copy_file(destination, chunk_size)
###############################################################################
# Application Helper ###########################################################
###############################################################################
def abort(code=500, text='Unknown Error.'):
""" Aborts execution and causes a HTTP error. """
raise HTTPError(code, text)
def redirect(url, code=None):
""" Aborts execution and causes a 303 or 302 redirect, depending on
the HTTP protocol version. """
if not code:
code = 303 if request.get('SERVER_PROTOCOL') == "HTTP/1.1" else 302
res = response.copy(cls=HTTPResponse)
res.status = code
res.body = ""
res.set_header('Location', urljoin(request.url, url))
raise res
def _file_iter_range(fp, offset, bytes, maxread=1024*1024):
""" Yield chunks from a range in a file. No chunk is bigger than maxread."""
fp.seek(offset)
while bytes > 0:
part = fp.read(min(bytes, maxread))
if not part: break
bytes -= len(part)
yield part
def static_file(filename, root, mimetype='auto', download=False, charset='UTF-8'):
""" Open a file in a safe way and return :exc:`HTTPResponse` with status
code 200, 305, 403 or 404. The ``Content-Type``, ``Content-Encoding``,
``Content-Length`` and ``Last-Modified`` headers are set if possible.
Special support for ``If-Modified-Since``, ``Range`` and ``HEAD``
requests.
:param filename: Name or path of the file to send.
:param root: Root path for file lookups. Should be an absolute directory
path.
:param mimetype: Defines the content-type header (default: guess from
file extension)
:param download: If True, ask the browser to open a `Save as...` dialog
instead of opening the file with the associated program. You can
specify a custom filename as a string. If not specified, the
original filename is used (default: False).
:param charset: The charset to use for files with a ``text/*``
mime-type. (default: UTF-8)
"""
root = os.path.abspath(root) + os.sep
filename = os.path.abspath(os.path.join(root, filename.strip('/\\')))
headers = dict()
if not filename.startswith(root):
return HTTPError(403, "Access denied.")
if not os.path.exists(filename) or not os.path.isfile(filename):
return HTTPError(404, "File does not exist.")
if not os.access(filename, os.R_OK):
return HTTPError(403, "You do not have permission to access this file.")
if mimetype == 'auto':
mimetype, encoding = mimetypes.guess_type(filename)
if encoding: headers['Content-Encoding'] = encoding
if mimetype:
if mimetype[:5] == 'text/' and charset and 'charset' not in mimetype:
mimetype += '; charset=%s' % charset
headers['Content-Type'] = mimetype
if download:
download = os.path.basename(filename if download == True else download)
headers['Content-Disposition'] = 'attachment; filename="%s"' % download
stats = os.stat(filename)
headers['Content-Length'] = clen = stats.st_size
lm = time.strftime("%a, %d %b %Y %H:%M:%S GMT", time.gmtime(stats.st_mtime))
headers['Last-Modified'] = lm
ims = request.environ.get('HTTP_IF_MODIFIED_SINCE')
if ims:
ims = parse_date(ims.split(";")[0].strip())
if ims is not None and ims >= int(stats.st_mtime):
headers['Date'] = time.strftime("%a, %d %b %Y %H:%M:%S GMT", time.gmtime())
return HTTPResponse(status=304, **headers)
body = '' if request.method == 'HEAD' else open(filename, 'rb')
headers["Accept-Ranges"] = "bytes"
ranges = request.environ.get('HTTP_RANGE')
if 'HTTP_RANGE' in request.environ:
ranges = list(parse_range_header(request.environ['HTTP_RANGE'], clen))
if not ranges:
return HTTPError(416, "Requested Range Not Satisfiable")
offset, end = ranges[0]
headers["Content-Range"] = "bytes %d-%d/%d" % (offset, end-1, clen)
headers["Content-Length"] = str(end-offset)
if body: body = _file_iter_range(body, offset, end-offset)
return HTTPResponse(body, status=206, **headers)
return HTTPResponse(body, **headers)
###############################################################################
# HTTP Utilities and MISC (TODO) ###############################################
###############################################################################
def debug(mode=True):
""" Change the debug level.
There is only one debug level supported at the moment."""
global DEBUG
if mode: warnings.simplefilter('default')
DEBUG = bool(mode)
def http_date(value):
if isinstance(value, (datedate, datetime)):
value = value.utctimetuple()
elif isinstance(value, (int, float)):
value = time.gmtime(value)
if not isinstance(value, basestring):
value = time.strftime("%a, %d %b %Y %H:%M:%S GMT", value)
return value
def parse_date(ims):
""" Parse rfc1123, rfc850 and asctime timestamps and return UTC epoch. """
try:
ts = email.utils.parsedate_tz(ims)
return time.mktime(ts[:8] + (0,)) - (ts[9] or 0) - time.timezone
except (TypeError, ValueError, IndexError, OverflowError):
return None
def parse_auth(header):
""" Parse rfc2617 HTTP authentication header string (basic) and return (user,pass) tuple or None"""
try:
method, data = header.split(None, 1)
if method.lower() == 'basic':
user, pwd = touni(base64.b64decode(tob(data))).split(':',1)
return user, pwd
except (KeyError, ValueError):
return None
def parse_range_header(header, maxlen=0):
""" Yield (start, end) ranges parsed from a HTTP Range header. Skip
unsatisfiable ranges. The end index is non-inclusive."""
if not header or header[:6] != 'bytes=': return
ranges = [r.split('-', 1) for r in header[6:].split(',') if '-' in r]
for start, end in ranges:
try:
if not start: # bytes=-100 -> last 100 bytes
start, end = max(0, maxlen-int(end)), maxlen
elif not end: # bytes=100- -> all but the first 99 bytes
start, end = int(start), maxlen
else: # bytes=100-200 -> bytes 100-200 (inclusive)
start, end = int(start), min(int(end)+1, maxlen)
if 0 <= start < end <= maxlen:
yield start, end
except ValueError:
pass
def _parse_qsl(qs):
r = []
for pair in qs.replace(';','&').split('&'):
if not pair: continue
nv = pair.split('=', 1)
if len(nv) != 2: nv.append('')
key = urlunquote(nv[0].replace('+', ' '))
value = urlunquote(nv[1].replace('+', ' '))
r.append((key, value))
return r
def _lscmp(a, b):
""" Compares two strings in a cryptographically safe way:
Runtime is not affected by length of common prefix. """
return not sum(0 if x==y else 1 for x, y in zip(a, b)) and len(a) == len(b)
def cookie_encode(data, key):
""" Encode and sign a pickle-able object. Return a (byte) string """
msg = base64.b64encode(pickle.dumps(data, -1))
sig = base64.b64encode(hmac.new(tob(key), msg).digest())
return tob('!') + sig + tob('?') + msg
def cookie_decode(data, key):
""" Verify and decode an encoded string. Return an object or None."""
data = tob(data)
if cookie_is_encoded(data):
sig, msg = data.split(tob('?'), 1)
if _lscmp(sig[1:], base64.b64encode(hmac.new(tob(key), msg).digest())):
return pickle.loads(base64.b64decode(msg))
return None
def cookie_is_encoded(data):
""" Return True if the argument looks like a encoded cookie."""
return bool(data.startswith(tob('!')) and tob('?') in data)
def html_escape(string):
""" Escape HTML special characters ``&<>`` and quotes ``'"``. """
return string.replace('&','&').replace('<','<').replace('>','>')\
.replace('"','"').replace("'",''')
def html_quote(string):
""" Escape and quote a string to be used as an HTTP attribute."""
return '"%s"' % html_escape(string).replace('\n',' ')\
.replace('\r',' ').replace('\t','	')
def yieldroutes(func):
""" Return a generator for routes that match the signature (name, args)
of the func parameter. This may yield more than one route if the function
takes optional keyword arguments. The output is best described by example::
a() -> '/a'
b(x, y) -> '/b/<x>/<y>'
c(x, y=5) -> '/c/<x>' and '/c/<x>/<y>'
d(x=5, y=6) -> '/d' and '/d/<x>' and '/d/<x>/<y>'
"""
path = '/' + func.__name__.replace('__','/').lstrip('/')
spec = getargspec(func)
argc = len(spec[0]) - len(spec[3] or [])
path += ('/<%s>' * argc) % tuple(spec[0][:argc])
yield path
for arg in spec[0][argc:]:
path += '/<%s>' % arg
yield path
def path_shift(script_name, path_info, shift=1):
""" Shift path fragments from PATH_INFO to SCRIPT_NAME and vice versa.
:return: The modified paths.
:param script_name: The SCRIPT_NAME path.
:param script_name: The PATH_INFO path.
:param shift: The number of path fragments to shift. May be negative to
change the shift direction. (default: 1)
"""
if shift == 0: return script_name, path_info
pathlist = path_info.strip('/').split('/')
scriptlist = script_name.strip('/').split('/')
if pathlist and pathlist[0] == '': pathlist = []
if scriptlist and scriptlist[0] == '': scriptlist = []
if 0 < shift <= len(pathlist):
moved = pathlist[:shift]
scriptlist = scriptlist + moved
pathlist = pathlist[shift:]
elif 0 > shift >= -len(scriptlist):
moved = scriptlist[shift:]
pathlist = moved + pathlist
scriptlist = scriptlist[:shift]
else:
empty = 'SCRIPT_NAME' if shift < 0 else 'PATH_INFO'
raise AssertionError("Cannot shift. Nothing left from %s" % empty)
new_script_name = '/' + '/'.join(scriptlist)
new_path_info = '/' + '/'.join(pathlist)
if path_info.endswith('/') and pathlist: new_path_info += '/'
return new_script_name, new_path_info
def auth_basic(check, realm="private", text="Access denied"):
""" Callback decorator to require HTTP auth (basic).
TODO: Add route(check_auth=...) parameter. """
def decorator(func):
@functools.wraps(func)
def wrapper(*a, **ka):
user, password = request.auth or (None, None)
if user is None or not check(user, password):
err = HTTPError(401, text)
err.add_header('WWW-Authenticate', 'Basic realm="%s"' % realm)
return err
return func(*a, **ka)
return wrapper
return decorator
# Shortcuts for common Bottle methods.
# They all refer to the current default application.
def make_default_app_wrapper(name):
""" Return a callable that relays calls to the current default app. """
@functools.wraps(getattr(Bottle, name))
def wrapper(*a, **ka):
return getattr(app(), name)(*a, **ka)
return wrapper
route = make_default_app_wrapper('route')
get = make_default_app_wrapper('get')
post = make_default_app_wrapper('post')
put = make_default_app_wrapper('put')
delete = make_default_app_wrapper('delete')
patch = make_default_app_wrapper('patch')
error = make_default_app_wrapper('error')
mount = make_default_app_wrapper('mount')
hook = make_default_app_wrapper('hook')
install = make_default_app_wrapper('install')
uninstall = make_default_app_wrapper('uninstall')
url = make_default_app_wrapper('get_url')
###############################################################################
# Server Adapter ###############################################################
###############################################################################
class ServerAdapter(object):
quiet = False
def __init__(self, host='127.0.0.1', port=8080, **options):
self.options = options
self.host = host
self.port = int(port)
def run(self, handler): # pragma: no cover
pass
def __repr__(self):
args = ', '.join(['%s=%s'%(k,repr(v)) for k, v in self.options.items()])
return "%s(%s)" % (self.__class__.__name__, args)
class CGIServer(ServerAdapter):
quiet = True
def run(self, handler): # pragma: no cover
from wsgiref.handlers import CGIHandler
def fixed_environ(environ, start_response):
environ.setdefault('PATH_INFO', '')
return handler(environ, start_response)
CGIHandler().run(fixed_environ)
class FlupFCGIServer(ServerAdapter):
def run(self, handler): # pragma: no cover
import flup.server.fcgi
self.options.setdefault('bindAddress', (self.host, self.port))
flup.server.fcgi.WSGIServer(handler, **self.options).run()
class WSGIRefServer(ServerAdapter):
def run(self, app): # pragma: no cover
from wsgiref.simple_server import make_server
from wsgiref.simple_server import WSGIRequestHandler, WSGIServer
import socket
class FixedHandler(WSGIRequestHandler):
def address_string(self): # Prevent reverse DNS lookups please.
return self.client_address[0]
def log_request(*args, **kw):
if not self.quiet:
return WSGIRequestHandler.log_request(*args, **kw)
handler_cls = self.options.get('handler_class', FixedHandler)
server_cls = self.options.get('server_class', WSGIServer)
if ':' in self.host: # Fix wsgiref for IPv6 addresses.
if getattr(server_cls, 'address_family') == socket.AF_INET:
class server_cls(server_cls):
address_family = socket.AF_INET6
self.srv = make_server(self.host, self.port, app, server_cls, handler_cls)
self.port = self.srv.server_port # update port actual port (0 means random)
try:
self.srv.serve_forever()
except KeyboardInterrupt:
self.srv.server_close() # Prevent ResourceWarning: unclosed socket
raise
class CherryPyServer(ServerAdapter):
def run(self, handler): # pragma: no cover
from cherrypy import wsgiserver
self.options['bind_addr'] = (self.host, self.port)
self.options['wsgi_app'] = handler
certfile = self.options.get('certfile')
if certfile:
del self.options['certfile']
keyfile = self.options.get('keyfile')
if keyfile:
del self.options['keyfile']
server = wsgiserver.CherryPyWSGIServer(**self.options)
if certfile:
server.ssl_certificate = certfile
if keyfile:
server.ssl_private_key = keyfile
try:
server.start()
finally:
server.stop()
class WaitressServer(ServerAdapter):
def run(self, handler):
from waitress import serve
serve(handler, host=self.host, port=self.port)
class PasteServer(ServerAdapter):
def run(self, handler): # pragma: no cover
from paste import httpserver
from paste.translogger import TransLogger
handler = TransLogger(handler, setup_console_handler=(not self.quiet))
httpserver.serve(handler, host=self.host, port=str(self.port),
**self.options)
class MeinheldServer(ServerAdapter):
def run(self, handler):
from meinheld import server
server.listen((self.host, self.port))
server.run(handler)
class FapwsServer(ServerAdapter):
""" Extremely fast webserver using libev. See http://www.fapws.org/ """
def run(self, handler): # pragma: no cover
import fapws._evwsgi as evwsgi
from fapws import base, config
port = self.port
if float(config.SERVER_IDENT[-2:]) > 0.4:
# fapws3 silently changed its API in 0.5
port = str(port)
evwsgi.start(self.host, port)
# fapws3 never releases the GIL. Complain upstream. I tried. No luck.
if 'BOTTLE_CHILD' in os.environ and not self.quiet:
_stderr("WARNING: Auto-reloading does not work with Fapws3.\n")
_stderr(" (Fapws3 breaks python thread support)\n")
evwsgi.set_base_module(base)
def app(environ, start_response):
environ['wsgi.multiprocess'] = False
return handler(environ, start_response)
evwsgi.wsgi_cb(('', app))
evwsgi.run()
class TornadoServer(ServerAdapter):
""" The super hyped asynchronous server by facebook. Untested. """
def run(self, handler): # pragma: no cover
import tornado.wsgi, tornado.httpserver, tornado.ioloop
container = tornado.wsgi.WSGIContainer(handler)
server = tornado.httpserver.HTTPServer(container)
server.listen(port=self.port,address=self.host)
tornado.ioloop.IOLoop.instance().start()
class AppEngineServer(ServerAdapter):
""" Adapter for Google App Engine. """
quiet = True
def run(self, handler):
from google.appengine.ext.webapp import util
# A main() function in the handler script enables 'App Caching'.
# Lets makes sure it is there. This _really_ improves performance.
module = sys.modules.get('__main__')
if module and not hasattr(module, 'main'):
module.main = lambda: util.run_wsgi_app(handler)
util.run_wsgi_app(handler)
class TwistedServer(ServerAdapter):
""" Untested. """
def run(self, handler):
from twisted.web import server, wsgi
from twisted.python.threadpool import ThreadPool
from twisted.internet import reactor
thread_pool = ThreadPool()
thread_pool.start()
reactor.addSystemEventTrigger('after', 'shutdown', thread_pool.stop)
factory = server.Site(wsgi.WSGIResource(reactor, thread_pool, handler))
reactor.listenTCP(self.port, factory, interface=self.host)
if not reactor.running:
reactor.run()
class DieselServer(ServerAdapter):
""" Untested. """
def run(self, handler):
from diesel.protocols.wsgi import WSGIApplication
app = WSGIApplication(handler, port=self.port)
app.run()
class GeventServer(ServerAdapter):
""" Untested. Options:
* `fast` (default: False) uses libevent's http server, but has some
issues: No streaming, no pipelining, no SSL.
* See gevent.wsgi.WSGIServer() documentation for more options.
"""
def run(self, handler):
from gevent import wsgi, pywsgi, local
if not isinstance(threading.local(), local.local):
msg = "Bottle requires gevent.monkey.patch_all() (before import)"
raise RuntimeError(msg)
if not self.options.pop('fast', None): wsgi = pywsgi
self.options['log'] = None if self.quiet else 'default'
address = (self.host, self.port)
server = wsgi.WSGIServer(address, handler, **self.options)
if 'BOTTLE_CHILD' in os.environ:
import signal
signal.signal(signal.SIGINT, lambda s, f: server.stop())
server.serve_forever()
class GeventSocketIOServer(ServerAdapter):
def run(self,handler):
from socketio import server
address = (self.host, self.port)
server.SocketIOServer(address, handler, **self.options).serve_forever()
class GunicornServer(ServerAdapter):
""" Untested. See http://gunicorn.org/configure.html for options. """
def run(self, handler):
from gunicorn.app.base import Application
config = {'bind': "%s:%d" % (self.host, int(self.port))}
config.update(self.options)
class GunicornApplication(Application):
def init(self, parser, opts, args):
return config
def load(self):
return handler
GunicornApplication().run()
class EventletServer(ServerAdapter):
""" Untested. Options:
* `backlog` adjust the eventlet backlog parameter which is the maximum
number of queued connections. Should be at least 1; the maximum
value is system-dependent.
* `family`: (default is 2) socket family, optional. See socket
documentation for available families.
"""
def run(self, handler):
from eventlet import wsgi, listen, patcher
if not patcher.is_monkey_patched(os):
msg = "Bottle requires eventlet.monkey_patch() (before import)"
raise RuntimeError(msg)
socket_args = {}
for arg in ('backlog', 'family'):
try:
socket_args[arg] = self.options.pop(arg)
except KeyError:
pass
address = (self.host, self.port)
try:
wsgi.server(listen(address, **socket_args), handler,
log_output=(not self.quiet))
except TypeError:
# Fallback, if we have old version of eventlet
wsgi.server(listen(address), handler)
class RocketServer(ServerAdapter):
""" Untested. """
def run(self, handler):
from rocket import Rocket
server = Rocket((self.host, self.port), 'wsgi', { 'wsgi_app' : handler })
server.start()
class BjoernServer(ServerAdapter):
""" Fast server written in C: https://github.com/jonashaag/bjoern """
def run(self, handler):
from bjoern import run
run(handler, self.host, self.port)
class AutoServer(ServerAdapter):
""" Untested. """
adapters = [WaitressServer, PasteServer, TwistedServer, CherryPyServer, WSGIRefServer]
def run(self, handler):
for sa in self.adapters:
try:
return sa(self.host, self.port, **self.options).run(handler)
except ImportError:
pass
server_names = {
'cgi': CGIServer,
'flup': FlupFCGIServer,
'wsgiref': WSGIRefServer,
'waitress': WaitressServer,
'cherrypy': CherryPyServer,
'paste': PasteServer,
'fapws3': FapwsServer,
'tornado': TornadoServer,
'gae': AppEngineServer,
'twisted': TwistedServer,
'diesel': DieselServer,
'meinheld': MeinheldServer,
'gunicorn': GunicornServer,
'eventlet': EventletServer,
'gevent': GeventServer,
'geventSocketIO':GeventSocketIOServer,
'rocket': RocketServer,
'bjoern' : BjoernServer,
'auto': AutoServer,
}
###############################################################################
# Application Control ##########################################################
###############################################################################
def load(target, **namespace):
""" Import a module or fetch an object from a module.
* ``package.module`` returns `module` as a module object.
* ``pack.mod:name`` returns the module variable `name` from `pack.mod`.
* ``pack.mod:func()`` calls `pack.mod.func()` and returns the result.
The last form accepts not only function calls, but any type of
expression. Keyword arguments passed to this function are available as
local variables. Example: ``import_string('re:compile(x)', x='[a-z]')``
"""
module, target = target.split(":", 1) if ':' in target else (target, None)
if module not in sys.modules: __import__(module)
if not target: return sys.modules[module]
if target.isalnum(): return getattr(sys.modules[module], target)
package_name = module.split('.')[0]
namespace[package_name] = sys.modules[package_name]
return eval('%s.%s' % (module, target), namespace)
def load_app(target):
""" Load a bottle application from a module and make sure that the import
does not affect the current default application, but returns a separate
application object. See :func:`load` for the target parameter. """
global NORUN; NORUN, nr_old = True, NORUN
tmp = default_app.push() # Create a new "default application"
try:
rv = load(target) # Import the target module
return rv if callable(rv) else tmp
finally:
default_app.remove(tmp) # Remove the temporary added default application
NORUN = nr_old
_debug = debug
def run(app=None, server='wsgiref', host='127.0.0.1', port=8080,
interval=1, reloader=False, quiet=False, plugins=None,
debug=None, **kargs):
""" Start a server instance. This method blocks until the server terminates.
:param app: WSGI application or target string supported by
:func:`load_app`. (default: :func:`default_app`)
:param server: Server adapter to use. See :data:`server_names` keys
for valid names or pass a :class:`ServerAdapter` subclass.
(default: `wsgiref`)
:param host: Server address to bind to. Pass ``0.0.0.0`` to listens on
all interfaces including the external one. (default: 127.0.0.1)
:param port: Server port to bind to. Values below 1024 require root
privileges. (default: 8080)
:param reloader: Start auto-reloading server? (default: False)
:param interval: Auto-reloader interval in seconds (default: 1)
:param quiet: Suppress output to stdout and stderr? (default: False)
:param options: Options passed to the server adapter.
"""
if NORUN: return
if reloader and not os.environ.get('BOTTLE_CHILD'):
lockfile = None
try:
fd, lockfile = tempfile.mkstemp(prefix='bottle.', suffix='.lock')
os.close(fd) # We only need this file to exist. We never write to it
while os.path.exists(lockfile):
args = [sys.executable] + sys.argv
environ = os.environ.copy()
environ['BOTTLE_CHILD'] = 'true'
environ['BOTTLE_LOCKFILE'] = lockfile
p = subprocess.Popen(args, env=environ)
while p.poll() is None: # Busy wait...
os.utime(lockfile, None) # I am alive!
time.sleep(interval)
if p.poll() != 3:
if os.path.exists(lockfile): os.unlink(lockfile)
sys.exit(p.poll())
except KeyboardInterrupt:
pass
finally:
if os.path.exists(lockfile):
os.unlink(lockfile)
return
try:
if debug is not None: _debug(debug)
app = app or default_app()
if isinstance(app, basestring):
app = load_app(app)
if not callable(app):
raise ValueError("Application is not callable: %r" % app)
for plugin in plugins or []:
if isinstance(plugin, basestring):
plugin = load(plugin)
app.install(plugin)
if server in server_names:
server = server_names.get(server)
if isinstance(server, basestring):
server = load(server)
if isinstance(server, type):
server = server(host=host, port=port, **kargs)
if not isinstance(server, ServerAdapter):
raise ValueError("Unknown or unsupported server: %r" % server)
server.quiet = server.quiet or quiet
if not server.quiet:
_stderr("Bottle v%s server starting up (using %s)...\n" % (__version__, repr(server)))
_stderr("Listening on http://%s:%d/\n" % (server.host, server.port))
_stderr("Hit Ctrl-C to quit.\n\n")
if reloader:
lockfile = os.environ.get('BOTTLE_LOCKFILE')
bgcheck = FileCheckerThread(lockfile, interval)
with bgcheck:
server.run(app)
if bgcheck.status == 'reload':
sys.exit(3)
else:
server.run(app)
except KeyboardInterrupt:
pass
except (SystemExit, MemoryError):
raise
except:
if not reloader: raise
if not getattr(server, 'quiet', quiet):
print_exc()
time.sleep(interval)
sys.exit(3)
class FileCheckerThread(threading.Thread):
""" Interrupt main-thread as soon as a changed module file is detected,
the lockfile gets deleted or gets to old. """
def __init__(self, lockfile, interval):
threading.Thread.__init__(self)
self.daemon = True
self.lockfile, self.interval = lockfile, interval
#: Is one of 'reload', 'error' or 'exit'
self.status = None
def run(self):
exists = os.path.exists
mtime = lambda p: os.stat(p).st_mtime
files = dict()
for module in list(sys.modules.values()):
path = getattr(module, '__file__', '')
if path[-4:] in ('.pyo', '.pyc'): path = path[:-1]
if path and exists(path): files[path] = mtime(path)
while not self.status:
if not exists(self.lockfile)\
or mtime(self.lockfile) < time.time() - self.interval - 5:
self.status = 'error'
thread.interrupt_main()
for path, lmtime in list(files.items()):
if not exists(path) or mtime(path) > lmtime:
self.status = 'reload'
thread.interrupt_main()
break
time.sleep(self.interval)
def __enter__(self):
self.start()
def __exit__(self, exc_type, *_):
if not self.status: self.status = 'exit' # silent exit
self.join()
return exc_type is not None and issubclass(exc_type, KeyboardInterrupt)
###############################################################################
# Template Adapters ############################################################
###############################################################################
class TemplateError(HTTPError):
def __init__(self, message):
HTTPError.__init__(self, 500, message)
class BaseTemplate(object):
""" Base class and minimal API for template adapters """
extensions = ['tpl','html','thtml','stpl']
settings = {} #used in prepare()
defaults = {} #used in render()
def __init__(self, source=None, name=None, lookup=None, encoding='utf8', **settings):
""" Create a new template.
If the source parameter (str or buffer) is missing, the name argument
is used to guess a template filename. Subclasses can assume that
self.source and/or self.filename are set. Both are strings.
The lookup, encoding and settings parameters are stored as instance
variables.
The lookup parameter stores a list containing directory paths.
The encoding parameter should be used to decode byte strings or files.
The settings parameter contains a dict for engine-specific settings.
"""
self.name = name
self.source = source.read() if hasattr(source, 'read') else source
self.filename = source.filename if hasattr(source, 'filename') else None
self.lookup = [os.path.abspath(x) for x in lookup] if lookup else []
self.encoding = encoding
self.settings = self.settings.copy() # Copy from class variable
self.settings.update(settings) # Apply
if not self.source and self.name:
self.filename = self.search(self.name, self.lookup)
if not self.filename:
raise TemplateError('Template %s not found.' % repr(name))
if not self.source and not self.filename:
raise TemplateError('No template specified.')
self.prepare(**self.settings)
@classmethod
def search(cls, name, lookup=None):
""" Search name in all directories specified in lookup.
First without, then with common extensions. Return first hit. """
if not lookup:
depr('The template lookup path list should not be empty.', True) #0.12
lookup = ['.']
if os.path.isabs(name) and os.path.isfile(name):
depr('Absolute template path names are deprecated.', True) #0.12
return os.path.abspath(name)
for spath in lookup:
spath = os.path.abspath(spath) + os.sep
fname = os.path.abspath(os.path.join(spath, name))
if not fname.startswith(spath): continue
if os.path.isfile(fname): return fname
for ext in cls.extensions:
if os.path.isfile('%s.%s' % (fname, ext)):
return '%s.%s' % (fname, ext)
@classmethod
def global_config(cls, key, *args):
""" This reads or sets the global settings stored in class.settings. """
if args:
cls.settings = cls.settings.copy() # Make settings local to class
cls.settings[key] = args[0]
else:
return cls.settings[key]
def prepare(self, **options):
""" Run preparations (parsing, caching, ...).
It should be possible to call this again to refresh a template or to
update settings.
"""
raise NotImplementedError
def render(self, *args, **kwargs):
""" Render the template with the specified local variables and return
a single byte or unicode string. If it is a byte string, the encoding
must match self.encoding. This method must be thread-safe!
Local variables may be provided in dictionaries (args)
or directly, as keywords (kwargs).
"""
raise NotImplementedError
class MakoTemplate(BaseTemplate):
def prepare(self, **options):
from mako.template import Template
from mako.lookup import TemplateLookup
options.update({'input_encoding':self.encoding})
options.setdefault('format_exceptions', bool(DEBUG))
lookup = TemplateLookup(directories=self.lookup, **options)
if self.source:
self.tpl = Template(self.source, lookup=lookup, **options)
else:
self.tpl = Template(uri=self.name, filename=self.filename, lookup=lookup, **options)
def render(self, *args, **kwargs):
for dictarg in args: kwargs.update(dictarg)
_defaults = self.defaults.copy()
_defaults.update(kwargs)
return self.tpl.render(**_defaults)
class CheetahTemplate(BaseTemplate):
def prepare(self, **options):
from Cheetah.Template import Template
self.context = threading.local()
self.context.vars = {}
options['searchList'] = [self.context.vars]
if self.source:
self.tpl = Template(source=self.source, **options)
else:
self.tpl = Template(file=self.filename, **options)
def render(self, *args, **kwargs):
for dictarg in args: kwargs.update(dictarg)
self.context.vars.update(self.defaults)
self.context.vars.update(kwargs)
out = str(self.tpl)
self.context.vars.clear()
return out
class Jinja2Template(BaseTemplate):
def prepare(self, filters=None, tests=None, globals={}, **kwargs):
from jinja2 import Environment, FunctionLoader
self.env = Environment(loader=FunctionLoader(self.loader), **kwargs)
if filters: self.env.filters.update(filters)
if tests: self.env.tests.update(tests)
if globals: self.env.globals.update(globals)
if self.source:
self.tpl = self.env.from_string(self.source)
else:
self.tpl = self.env.get_template(self.filename)
def render(self, *args, **kwargs):
for dictarg in args: kwargs.update(dictarg)
_defaults = self.defaults.copy()
_defaults.update(kwargs)
return self.tpl.render(**_defaults)
def loader(self, name):
fname = self.search(name, self.lookup)
if not fname: return
with open(fname, "rb") as f:
return f.read().decode(self.encoding)
class SimpleTemplate(BaseTemplate):
def prepare(self, escape_func=html_escape, noescape=False, syntax=None, **ka):
self.cache = {}
enc = self.encoding
self._str = lambda x: touni(x, enc)
self._escape = lambda x: escape_func(touni(x, enc))
self.syntax = syntax
if noescape:
self._str, self._escape = self._escape, self._str
@cached_property
def co(self):
return compile(self.code, self.filename or '<string>', 'exec')
@cached_property
def code(self):
source = self.source
if not source:
with open(self.filename, 'rb') as f:
source = f.read()
try:
source, encoding = touni(source), 'utf8'
except UnicodeError:
depr('Template encodings other than utf8 are no longer supported.') #0.11
source, encoding = touni(source, 'latin1'), 'latin1'
parser = StplParser(source, encoding=encoding, syntax=self.syntax)
code = parser.translate()
self.encoding = parser.encoding
return code
def _rebase(self, _env, _name=None, **kwargs):
_env['_rebase'] = (_name, kwargs)
def _include(self, _env, _name=None, **kwargs):
env = _env.copy()
env.update(kwargs)
if _name not in self.cache:
self.cache[_name] = self.__class__(name=_name, lookup=self.lookup)
return self.cache[_name].execute(env['_stdout'], env)
def execute(self, _stdout, kwargs):
env = self.defaults.copy()
env.update(kwargs)
env.update({'_stdout': _stdout, '_printlist': _stdout.extend,
'include': functools.partial(self._include, env),
'rebase': functools.partial(self._rebase, env), '_rebase': None,
'_str': self._str, '_escape': self._escape, 'get': env.get,
'setdefault': env.setdefault, 'defined': env.__contains__ })
eval(self.co, env)
if env.get('_rebase'):
subtpl, rargs = env.pop('_rebase')
rargs['base'] = ''.join(_stdout) #copy stdout
del _stdout[:] # clear stdout
return self._include(env, subtpl, **rargs)
return env
def render(self, *args, **kwargs):
""" Render the template using keyword arguments as local variables. """
env = {}; stdout = []
for dictarg in args: env.update(dictarg)
env.update(kwargs)
self.execute(stdout, env)
return ''.join(stdout)
class StplSyntaxError(TemplateError): pass
class StplParser(object):
""" Parser for stpl templates. """
_re_cache = {} #: Cache for compiled re patterns
# This huge pile of voodoo magic splits python code into 8 different tokens.
# 1: All kinds of python strings (trust me, it works)
_re_tok = '((?m)[urbURB]?(?:\'\'(?!\')|""(?!")|\'{6}|"{6}' \
'|\'(?:[^\\\\\']|\\\\.)+?\'|"(?:[^\\\\"]|\\\\.)+?"' \
'|\'{3}(?:[^\\\\]|\\\\.|\\n)+?\'{3}' \
'|"{3}(?:[^\\\\]|\\\\.|\\n)+?"{3}))'
_re_inl = _re_tok.replace('|\\n','') # We re-use this string pattern later
# 2: Comments (until end of line, but not the newline itself)
_re_tok += '|(#.*)'
# 3,4: Keywords that start or continue a python block (only start of line)
_re_tok += '|^([ \\t]*(?:if|for|while|with|try|def|class)\\b)' \
'|^([ \\t]*(?:elif|else|except|finally)\\b)'
# 5: Our special 'end' keyword (but only if it stands alone)
_re_tok += '|((?:^|;)[ \\t]*end[ \\t]*(?=(?:%(block_close)s[ \\t]*)?\\r?$|;|#))'
# 6: A customizable end-of-code-block template token (only end of line)
_re_tok += '|(%(block_close)s[ \\t]*(?=$))'
# 7: And finally, a single newline. The 8th token is 'everything else'
_re_tok += '|(\\r?\\n)'
# Match the start tokens of code areas in a template
_re_split = '(?m)^[ \t]*(\\\\?)((%(line_start)s)|(%(block_start)s))'
# Match inline statements (may contain python strings)
_re_inl = '%%(inline_start)s((?:%s|[^\'"\n]+?)*?)%%(inline_end)s' % _re_inl
default_syntax = '<% %> % {{ }}'
def __init__(self, source, syntax=None, encoding='utf8'):
self.source, self.encoding = touni(source, encoding), encoding
self.set_syntax(syntax or self.default_syntax)
self.code_buffer, self.text_buffer = [], []
self.lineno, self.offset = 1, 0
self.indent, self.indent_mod = 0, 0
def get_syntax(self):
""" Tokens as a space separated string (default: <% %> % {{ }}) """
return self._syntax
def set_syntax(self, syntax):
self._syntax = syntax
self._tokens = syntax.split()
if not syntax in self._re_cache:
names = 'block_start block_close line_start inline_start inline_end'
etokens = map(re.escape, self._tokens)
pattern_vars = dict(zip(names.split(), etokens))
patterns = (self._re_split, self._re_tok, self._re_inl)
patterns = [re.compile(p%pattern_vars) for p in patterns]
self._re_cache[syntax] = patterns
self.re_split, self.re_tok, self.re_inl = self._re_cache[syntax]
syntax = property(get_syntax, set_syntax)
def translate(self):
if self.offset: raise RuntimeError('Parser is a one time instance.')
while True:
m = self.re_split.search(self.source[self.offset:])
if m:
text = self.source[self.offset:self.offset+m.start()]
self.text_buffer.append(text)
offs = self.offset
self.offset += m.end()
if m.group(1): # Escape syntax
line, sep, _ = self.source[self.offset:].partition('\n')
self.text_buffer.append(self.source[offs+m.start():offs+m.start(1)]+m.group(2)+line+sep)
self.offset += len(line+sep)
continue
self.flush_text()
self.read_code(multiline=bool(m.group(4)))
else: break
self.text_buffer.append(self.source[self.offset:])
self.flush_text()
return ''.join(self.code_buffer)
def read_code(self, multiline):
code_line, comment = '', ''
while True:
m = self.re_tok.search(self.source[self.offset:])
if not m:
code_line += self.source[self.offset:]
self.offset = len(self.source)
self.write_code(code_line.strip(), comment)
return
code_line += self.source[self.offset:self.offset+m.start()]
self.offset += m.end()
_str, _com, _blk1, _blk2, _end, _cend, _nl = m.groups()
if code_line and (_blk1 or _blk2): # a if b else c
code_line += _blk1 or _blk2
continue
if _str: # Python string
code_line += _str
elif _com: # Python comment (up to EOL)
comment = _com
if multiline and _com.strip().endswith(self._tokens[1]):
multiline = False # Allow end-of-block in comments
elif _blk1: # Start-block keyword (if/for/while/def/try/...)
code_line, self.indent_mod = _blk1, -1
self.indent += 1
elif _blk2: # Continue-block keyword (else/elif/except/...)
code_line, self.indent_mod = _blk2, -1
elif _end: # The non-standard 'end'-keyword (ends a block)
self.indent -= 1
elif _cend: # The end-code-block template token (usually '%>')
if multiline: multiline = False
else: code_line += _cend
else: # \n
self.write_code(code_line.strip(), comment)
self.lineno += 1
code_line, comment, self.indent_mod = '', '', 0
if not multiline:
break
def flush_text(self):
text = ''.join(self.text_buffer)
del self.text_buffer[:]
if not text: return
parts, pos, nl = [], 0, '\\\n'+' '*self.indent
for m in self.re_inl.finditer(text):
prefix, pos = text[pos:m.start()], m.end()
if prefix:
parts.append(nl.join(map(repr, prefix.splitlines(True))))
if prefix.endswith('\n'): parts[-1] += nl
parts.append(self.process_inline(m.group(1).strip()))
if pos < len(text):
prefix = text[pos:]
lines = prefix.splitlines(True)
if lines[-1].endswith('\\\\\n'): lines[-1] = lines[-1][:-3]
elif lines[-1].endswith('\\\\\r\n'): lines[-1] = lines[-1][:-4]
parts.append(nl.join(map(repr, lines)))
code = '_printlist((%s,))' % ', '.join(parts)
self.lineno += code.count('\n')+1
self.write_code(code)
@staticmethod
def process_inline(chunk):
if chunk[0] == '!': return '_str(%s)' % chunk[1:]
return '_escape(%s)' % chunk
def write_code(self, line, comment=''):
code = ' ' * (self.indent+self.indent_mod)
code += line.lstrip() + comment + '\n'
self.code_buffer.append(code)
def template(*args, **kwargs):
"""
Get a rendered template as a string iterator.
You can use a name, a filename or a template string as first parameter.
Template rendering arguments can be passed as dictionaries
or directly (as keyword arguments).
"""
tpl = args[0] if args else None
adapter = kwargs.pop('template_adapter', SimpleTemplate)
lookup = kwargs.pop('template_lookup', TEMPLATE_PATH)
tplid = (id(lookup), tpl)
if tplid not in TEMPLATES or DEBUG:
settings = kwargs.pop('template_settings', {})
if isinstance(tpl, adapter):
TEMPLATES[tplid] = tpl
if settings: TEMPLATES[tplid].prepare(**settings)
elif "\n" in tpl or "{" in tpl or "%" in tpl or '$' in tpl:
TEMPLATES[tplid] = adapter(source=tpl, lookup=lookup, **settings)
else:
TEMPLATES[tplid] = adapter(name=tpl, lookup=lookup, **settings)
if not TEMPLATES[tplid]:
abort(500, 'Template (%s) not found' % tpl)
for dictarg in args[1:]: kwargs.update(dictarg)
return TEMPLATES[tplid].render(kwargs)
mako_template = functools.partial(template, template_adapter=MakoTemplate)
cheetah_template = functools.partial(template, template_adapter=CheetahTemplate)
jinja2_template = functools.partial(template, template_adapter=Jinja2Template)
def view(tpl_name, **defaults):
""" Decorator: renders a template for a handler.
The handler can control its behavior like that:
- return a dict of template vars to fill out the template
- return something other than a dict and the view decorator will not
process the template, but return the handler result as is.
This includes returning a HTTPResponse(dict) to get,
for instance, JSON with autojson or other castfilters.
"""
def decorator(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
result = func(*args, **kwargs)
if isinstance(result, (dict, DictMixin)):
tplvars = defaults.copy()
tplvars.update(result)
return template(tpl_name, **tplvars)
elif result is None:
return template(tpl_name, defaults)
return result
return wrapper
return decorator
mako_view = functools.partial(view, template_adapter=MakoTemplate)
cheetah_view = functools.partial(view, template_adapter=CheetahTemplate)
jinja2_view = functools.partial(view, template_adapter=Jinja2Template)
###############################################################################
# Constants and Globals ########################################################
###############################################################################
TEMPLATE_PATH = ['./', './views/']
TEMPLATES = {}
DEBUG = False
NORUN = False # If set, run() does nothing. Used by load_app()
#: A dict to map HTTP status codes (e.g. 404) to phrases (e.g. 'Not Found')
HTTP_CODES = httplib.responses
HTTP_CODES[418] = "I'm a teapot" # RFC 2324
HTTP_CODES[428] = "Precondition Required"
HTTP_CODES[429] = "Too Many Requests"
HTTP_CODES[431] = "Request Header Fields Too Large"
HTTP_CODES[511] = "Network Authentication Required"
_HTTP_STATUS_LINES = dict((k, '%d %s'%(k,v)) for (k,v) in HTTP_CODES.items())
#: The default template used for error pages. Override with @error()
ERROR_PAGE_TEMPLATE = """
%%try:
%%from %s import DEBUG, request
<!DOCTYPE HTML PUBLIC "-//IETF//DTD HTML 2.0//EN">
<html>
<head>
<title>Error: {{e.status}}</title>
<style type="text/css">
html {background-color: #eee; font-family: sans-serif;}
body {background-color: #fff; border: 1px solid #ddd;
padding: 15px; margin: 15px;}
pre {background-color: #eee; border: 1px solid #ddd; padding: 5px;}
</style>
</head>
<body>
<h1>Error: {{e.status}}</h1>
<p>Sorry, the requested URL <tt>{{repr(request.url)}}</tt>
caused an error:</p>
<pre>{{e.body}}</pre>
%%if DEBUG and e.exception:
<h2>Exception:</h2>
<pre>{{repr(e.exception)}}</pre>
%%end
%%if DEBUG and e.traceback:
<h2>Traceback:</h2>
<pre>{{e.traceback}}</pre>
%%end
</body>
</html>
%%except ImportError:
<b>ImportError:</b> Could not generate the error page. Please add bottle to
the import path.
%%end
""" % __name__
#: A thread-safe instance of :class:`LocalRequest`. If accessed from within a
#: request callback, this instance always refers to the *current* request
#: (even on a multithreaded server).
request = LocalRequest()
#: A thread-safe instance of :class:`LocalResponse`. It is used to change the
#: HTTP response for the *current* request.
response = LocalResponse()
#: A thread-safe namespace. Not used by Bottle.
local = threading.local()
# Initialize app stack (create first empty Bottle app)
# BC: 0.6.4 and needed for run()
app = default_app = AppStack()
app.push()
#: A virtual package that redirects import statements.
#: Example: ``import bottle.ext.sqlite`` actually imports `bottle_sqlite`.
ext = _ImportRedirect('bottle.ext' if __name__ == '__main__' else __name__+".ext", 'bottle_%s').module
if __name__ == '__main__':
opt, args, parser = _cmd_options, _cmd_args, _cmd_parser
if opt.version:
_stdout('Bottle %s\n'%__version__)
sys.exit(0)
if not args:
parser.print_help()
_stderr('\nError: No application entry point specified.\n')
sys.exit(1)
sys.path.insert(0, '.')
sys.modules.setdefault('bottle', sys.modules['__main__'])
host, port = (opt.bind or 'localhost'), 8080
if ':' in host and host.rfind(']') < host.rfind(':'):
host, port = host.rsplit(':', 1)
host = host.strip('[]')
run(args[0], host=host, port=int(port), server=opt.server,
reloader=opt.reload, plugins=opt.plugin, debug=opt.debug)
# THE END
|
[] |
[] |
[
"BOTTLE_LOCKFILE",
"BOTTLE_CHILD"
] |
[]
|
["BOTTLE_LOCKFILE", "BOTTLE_CHILD"]
|
python
| 2 | 0 | |
pdfminer/cmapdb.py
|
""" Adobe character mapping (CMap) support.
CMaps provide the mapping between character codes and Unicode
code-points to character ids (CIDs).
More information is available on the Adobe website:
http://opensource.adobe.com/wiki/display/cmap/CMap+Resources
"""
import sys
import os
import os.path
import gzip
try:
import cPickle as pickle
except ImportError:
import pickle as pickle
import struct
import logging
from .psparser import PSStackParser
from .psparser import PSSyntaxError
from .psparser import PSEOF
from .psparser import PSLiteral
from .psparser import literal_name
from .psparser import KWD
from .encodingdb import name2unicode
from .utils import choplist
from .utils import nunpack
import six #Python 2+3 compatibility
class CMapError(Exception):
pass
## CMapBase
##
class CMapBase(object):
debug = 0
def __init__(self, **kwargs):
self.attrs = kwargs.copy()
return
def is_vertical(self):
return self.attrs.get('WMode', 0) != 0
def set_attr(self, k, v):
self.attrs[k] = v
return
def add_code2cid(self, code, cid):
return
def add_cid2unichr(self, cid, code):
return
def use_cmap(self, cmap):
return
## CMap
##
class CMap(CMapBase):
def __init__(self, **kwargs):
CMapBase.__init__(self, **kwargs)
self.code2cid = {}
return
def __repr__(self):
return '<CMap: %s>' % self.attrs.get('CMapName')
def use_cmap(self, cmap):
assert isinstance(cmap, CMap), str(type(cmap))
def copy(dst, src):
for (k, v) in src.iteritems():
if isinstance(v, dict):
d = {}
dst[k] = d
copy(d, v)
else:
dst[k] = v
copy(self.code2cid, cmap.code2cid)
return
def decode(self, code):
d = self.code2cid
for i in six.iterbytes(code):
if i in d:
d = d[i]
if isinstance(d, int):
yield d
d = self.code2cid
else:
d = self.code2cid
return
def dump(self, out=sys.stdout, code2cid=None, code=None):
if code2cid is None:
code2cid = self.code2cid
code = ()
for (k, v) in sorted(code2cid.iteritems()):
c = code+(k,)
if isinstance(v, int):
out.write('code %r = cid %d\n' % (c, v))
else:
self.dump(out=out, code2cid=v, code=c)
return
## IdentityCMap
##
class IdentityCMap(CMapBase):
def decode(self, code):
n = len(code)//2
if n:
return struct.unpack('>%dH' % n, code)
else:
return ()
## UnicodeMap
##
class UnicodeMap(CMapBase):
def __init__(self, **kwargs):
CMapBase.__init__(self, **kwargs)
self.cid2unichr = {}
return
def __repr__(self):
return '<UnicodeMap: %s>' % self.attrs.get('CMapName')
def get_unichr(self, cid):
return self.cid2unichr[cid]
def dump(self, out=sys.stdout):
for (k, v) in sorted(self.cid2unichr.iteritems()):
out.write('cid %d = unicode %r\n' % (k, v))
return
## FileCMap
##
class FileCMap(CMap):
def add_code2cid(self, code, cid):
assert isinstance(code, str) and isinstance(cid, int), str((type(code), type(cid)))
d = self.code2cid
for c in code[:-1]:
c = ord(c)
if c in d:
d = d[c]
else:
t = {}
d[c] = t
d = t
c = ord(code[-1])
d[c] = cid
return
## FileUnicodeMap
##
class FileUnicodeMap(UnicodeMap):
def add_cid2unichr(self, cid, code):
assert isinstance(cid, int), str(type(cid))
if isinstance(code, PSLiteral):
# Interpret as an Adobe glyph name.
self.cid2unichr[cid] = name2unicode(code.name)
elif isinstance(code, bytes):
# Interpret as UTF-16BE.
self.cid2unichr[cid] = code.decode('UTF-16BE', 'ignore')
elif isinstance(code, int):
self.cid2unichr[cid] = six.unichr(code)
else:
raise TypeError(code)
return
## PyCMap
##
class PyCMap(CMap):
def __init__(self, name, module):
CMap.__init__(self, CMapName=name)
self.code2cid = module.CODE2CID
if module.IS_VERTICAL:
self.attrs['WMode'] = 1
return
## PyUnicodeMap
##
class PyUnicodeMap(UnicodeMap):
def __init__(self, name, module, vertical):
UnicodeMap.__init__(self, CMapName=name)
if vertical:
self.cid2unichr = module.CID2UNICHR_V
self.attrs['WMode'] = 1
else:
self.cid2unichr = module.CID2UNICHR_H
return
## CMapDB
##
class CMapDB(object):
_cmap_cache = {}
_umap_cache = {}
class CMapNotFound(CMapError):
pass
@classmethod
def _load_data(klass, name):
name = name.replace("\0", "")
filename = '%s.pickle.gz' % name
cmap_paths = (os.environ.get('CMAP_PATH', '/usr/share/pdfminer/'),
os.path.join(os.path.dirname(__file__), 'cmap'),)
for directory in cmap_paths:
path = os.path.join(directory, filename)
if os.path.exists(path):
gzfile = gzip.open(path)
try:
return type(str(name), (), pickle.loads(gzfile.read()))
finally:
gzfile.close()
else:
raise CMapDB.CMapNotFound(name)
@classmethod
def get_cmap(klass, name):
if name == 'Identity-H':
return IdentityCMap(WMode=0)
elif name == 'Identity-V':
return IdentityCMap(WMode=1)
try:
return klass._cmap_cache[name]
except KeyError:
pass
data = klass._load_data(name)
klass._cmap_cache[name] = cmap = PyCMap(name, data)
return cmap
@classmethod
def get_unicode_map(klass, name, vertical=False):
try:
return klass._umap_cache[name][vertical]
except KeyError:
pass
data = klass._load_data('to-unicode-%s' % name)
klass._umap_cache[name] = umaps = [PyUnicodeMap(name, data, v) for v in (False, True)]
return umaps[vertical]
## CMapParser
##
class CMapParser(PSStackParser):
def __init__(self, cmap, fp):
PSStackParser.__init__(self, fp)
self.cmap = cmap
# some ToUnicode maps don't have "begincmap" keyword.
self._in_cmap = True
return
def run(self):
try:
self.nextobject()
except PSEOF:
pass
return
KEYWORD_BEGINCMAP = KWD(b'begincmap')
KEYWORD_ENDCMAP = KWD(b'endcmap')
KEYWORD_USECMAP = KWD(b'usecmap')
KEYWORD_DEF = KWD(b'def')
KEYWORD_BEGINCODESPACERANGE = KWD(b'begincodespacerange')
KEYWORD_ENDCODESPACERANGE = KWD(b'endcodespacerange')
KEYWORD_BEGINCIDRANGE = KWD(b'begincidrange')
KEYWORD_ENDCIDRANGE = KWD(b'endcidrange')
KEYWORD_BEGINCIDCHAR = KWD(b'begincidchar')
KEYWORD_ENDCIDCHAR = KWD(b'endcidchar')
KEYWORD_BEGINBFRANGE = KWD(b'beginbfrange')
KEYWORD_ENDBFRANGE = KWD(b'endbfrange')
KEYWORD_BEGINBFCHAR = KWD(b'beginbfchar')
KEYWORD_ENDBFCHAR = KWD(b'endbfchar')
KEYWORD_BEGINNOTDEFRANGE = KWD(b'beginnotdefrange')
KEYWORD_ENDNOTDEFRANGE = KWD(b'endnotdefrange')
def do_keyword(self, pos, token):
if token is self.KEYWORD_BEGINCMAP:
self._in_cmap = True
self.popall()
return
elif token is self.KEYWORD_ENDCMAP:
self._in_cmap = False
return
if not self._in_cmap:
return
#
if token is self.KEYWORD_DEF:
try:
((_, k), (_, v)) = self.pop(2)
self.cmap.set_attr(literal_name(k), v)
except PSSyntaxError:
pass
return
if token is self.KEYWORD_USECMAP:
try:
((_, cmapname),) = self.pop(1)
self.cmap.use_cmap(CMapDB.get_cmap(literal_name(cmapname)))
except PSSyntaxError:
pass
except CMapDB.CMapNotFound:
pass
return
if token is self.KEYWORD_BEGINCODESPACERANGE:
self.popall()
return
if token is self.KEYWORD_ENDCODESPACERANGE:
self.popall()
return
if token is self.KEYWORD_BEGINCIDRANGE:
self.popall()
return
if token is self.KEYWORD_ENDCIDRANGE:
objs = [obj for (__, obj) in self.popall()]
for (s, e, cid) in choplist(3, objs):
if (not isinstance(s, str) or not isinstance(e, str) or
not isinstance(cid, int) or len(s) != len(e)):
continue
sprefix = s[:-4]
eprefix = e[:-4]
if sprefix != eprefix:
continue
svar = s[-4:]
evar = e[-4:]
s1 = nunpack(svar)
e1 = nunpack(evar)
vlen = len(svar)
#assert s1 <= e1, str((s1, e1))
for i in range(e1-s1+1):
x = sprefix+struct.pack('>L', s1+i)[-vlen:]
self.cmap.add_code2cid(x, cid+i)
return
if token is self.KEYWORD_BEGINCIDCHAR:
self.popall()
return
if token is self.KEYWORD_ENDCIDCHAR:
objs = [obj for (__, obj) in self.popall()]
for (cid, code) in choplist(2, objs):
if isinstance(code, str) and isinstance(cid, str):
self.cmap.add_code2cid(code, nunpack(cid))
return
if token is self.KEYWORD_BEGINBFRANGE:
self.popall()
return
if token is self.KEYWORD_ENDBFRANGE:
objs = [obj for (__, obj) in self.popall()]
for (s, e, code) in choplist(3, objs):
if (not isinstance(s, bytes) or not isinstance(e, bytes) or
len(s) != len(e)):
continue
s1 = nunpack(s)
e1 = nunpack(e)
#assert s1 <= e1, str((s1, e1))
if isinstance(code, list):
for i in range(e1-s1+1):
self.cmap.add_cid2unichr(s1+i, code[i])
else:
var = code[-4:]
base = nunpack(var)
prefix = code[:-4]
vlen = len(var)
for i in range(e1-s1+1):
x = prefix+struct.pack('>L', base+i)[-vlen:]
self.cmap.add_cid2unichr(s1+i, x)
return
if token is self.KEYWORD_BEGINBFCHAR:
self.popall()
return
if token is self.KEYWORD_ENDBFCHAR:
objs = [obj for (__, obj) in self.popall()]
for (cid, code) in choplist(2, objs):
if isinstance(cid, bytes) and isinstance(code, bytes):
self.cmap.add_cid2unichr(nunpack(cid), code)
return
if token is self.KEYWORD_BEGINNOTDEFRANGE:
self.popall()
return
if token is self.KEYWORD_ENDNOTDEFRANGE:
self.popall()
return
self.push((pos, token))
return
# test
def main(argv):
args = argv[1:]
for fname in args:
fp = file(fname, 'rb')
cmap = FileUnicodeMap()
#cmap = FileCMap()
CMapParser(cmap, fp).run()
fp.close()
cmap.dump()
return
if __name__ == '__main__':
sys.exit(main(sys.argv))
|
[] |
[] |
[
"CMAP_PATH"
] |
[]
|
["CMAP_PATH"]
|
python
| 1 | 0 | |
Python38/Lib/pathlib.py
|
import fnmatch
import functools
import io
import ntpath
import os
import posixpath
import re
import sys
from _collections_abc import Sequence
from errno import EINVAL, ENOENT, ENOTDIR, EBADF, ELOOP
from operator import attrgetter
from stat import S_ISDIR, S_ISLNK, S_ISREG, S_ISSOCK, S_ISBLK, S_ISCHR, S_ISFIFO
from urllib.parse import quote_from_bytes as urlquote_from_bytes
supports_symlinks = True
if os.name == 'nt':
import nt
if sys.getwindowsversion()[:2] >= (6, 0):
from nt import _getfinalpathname
else:
supports_symlinks = False
_getfinalpathname = None
else:
nt = None
__all__ = [
"PurePath", "PurePosixPath", "PureWindowsPath",
"Path", "PosixPath", "WindowsPath",
]
#
# Internals
#
# EBADF - guard against macOS `stat` throwing EBADF
_IGNORED_ERROS = (ENOENT, ENOTDIR, EBADF, ELOOP)
_IGNORED_WINERRORS = (
21, # ERROR_NOT_READY - drive exists but is not accessible
1921, # ERROR_CANT_RESOLVE_FILENAME - fix for broken symlink pointing to itself
)
def _ignore_error(exception):
return (getattr(exception, 'errno', None) in _IGNORED_ERROS or
getattr(exception, 'winerror', None) in _IGNORED_WINERRORS)
def _is_wildcard_pattern(pat):
# Whether this pattern needs actual matching using fnmatch, or can
# be looked up directly as a file.
return "*" in pat or "?" in pat or "[" in pat
class _Flavour(object):
"""A flavour implements a particular (platform-specific) set of path
semantics."""
def __init__(self):
self.join = self.sep.join
def parse_parts(self, parts):
parsed = []
sep = self.sep
altsep = self.altsep
drv = root = ''
it = reversed(parts)
for part in it:
if not part:
continue
if altsep:
part = part.replace(altsep, sep)
drv, root, rel = self.splitroot(part)
if sep in rel:
for x in reversed(rel.split(sep)):
if x and x != '.':
parsed.append(sys.intern(x))
else:
if rel and rel != '.':
parsed.append(sys.intern(rel))
if drv or root:
if not drv:
# If no drive is present, try to find one in the previous
# parts. This makes the result of parsing e.g.
# ("C:", "/", "a") reasonably intuitive.
for part in it:
if not part:
continue
if altsep:
part = part.replace(altsep, sep)
drv = self.splitroot(part)[0]
if drv:
break
break
if drv or root:
parsed.append(drv + root)
parsed.reverse()
return drv, root, parsed
def join_parsed_parts(self, drv, root, parts, drv2, root2, parts2):
"""
Join the two paths represented by the respective
(drive, root, parts) tuples. Return a new (drive, root, parts) tuple.
"""
if root2:
if not drv2 and drv:
return drv, root2, [drv + root2] + parts2[1:]
elif drv2:
if drv2 == drv or self.casefold(drv2) == self.casefold(drv):
# Same drive => second path is relative to the first
return drv, root, parts + parts2[1:]
else:
# Second path is non-anchored (common case)
return drv, root, parts + parts2
return drv2, root2, parts2
class _WindowsFlavour(_Flavour):
# Reference for Windows paths can be found at
# http://msdn.microsoft.com/en-us/library/aa365247%28v=vs.85%29.aspx
sep = '\\'
altsep = '/'
has_drv = True
pathmod = ntpath
is_supported = (os.name == 'nt')
drive_letters = set('abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ')
ext_namespace_prefix = '\\\\?\\'
reserved_names = (
{'CON', 'PRN', 'AUX', 'NUL'} |
{'COM%d' % i for i in range(1, 10)} |
{'LPT%d' % i for i in range(1, 10)}
)
# Interesting findings about extended paths:
# - '\\?\c:\a', '//?/c:\a' and '//?/c:/a' are all supported
# but '\\?\c:/a' is not
# - extended paths are always absolute; "relative" extended paths will
# fail.
def splitroot(self, part, sep=sep):
first = part[0:1]
second = part[1:2]
if (second == sep and first == sep):
# XXX extended paths should also disable the collapsing of "."
# components (according to MSDN docs).
prefix, part = self._split_extended_path(part)
first = part[0:1]
second = part[1:2]
else:
prefix = ''
third = part[2:3]
if (second == sep and first == sep and third != sep):
# is a UNC path:
# vvvvvvvvvvvvvvvvvvvvv root
# \\machine\mountpoint\directory\etc\...
# directory ^^^^^^^^^^^^^^
index = part.find(sep, 2)
if index != -1:
index2 = part.find(sep, index + 1)
# a UNC path can't have two slashes in a row
# (after the initial two)
if index2 != index + 1:
if index2 == -1:
index2 = len(part)
if prefix:
return prefix + part[1:index2], sep, part[index2+1:]
else:
return part[:index2], sep, part[index2+1:]
drv = root = ''
if second == ':' and first in self.drive_letters:
drv = part[:2]
part = part[2:]
first = third
if first == sep:
root = first
part = part.lstrip(sep)
return prefix + drv, root, part
def casefold(self, s):
return s.lower()
def casefold_parts(self, parts):
return [p.lower() for p in parts]
def compile_pattern(self, pattern):
return re.compile(fnmatch.translate(pattern), re.IGNORECASE).fullmatch
def resolve(self, path, strict=False):
s = str(path)
if not s:
return os.getcwd()
previous_s = None
if _getfinalpathname is not None:
if strict:
return self._ext_to_normal(_getfinalpathname(s))
else:
tail_parts = [] # End of the path after the first one not found
while True:
try:
s = self._ext_to_normal(_getfinalpathname(s))
except FileNotFoundError:
previous_s = s
s, tail = os.path.split(s)
tail_parts.append(tail)
if previous_s == s:
return path
else:
return os.path.join(s, *reversed(tail_parts))
# Means fallback on absolute
return None
def _split_extended_path(self, s, ext_prefix=ext_namespace_prefix):
prefix = ''
if s.startswith(ext_prefix):
prefix = s[:4]
s = s[4:]
if s.startswith('UNC\\'):
prefix += s[:3]
s = '\\' + s[3:]
return prefix, s
def _ext_to_normal(self, s):
# Turn back an extended path into a normal DOS-like path
return self._split_extended_path(s)[1]
def is_reserved(self, parts):
# NOTE: the rules for reserved names seem somewhat complicated
# (e.g. r"..\NUL" is reserved but not r"foo\NUL").
# We err on the side of caution and return True for paths which are
# not considered reserved by Windows.
if not parts:
return False
if parts[0].startswith('\\\\'):
# UNC paths are never reserved
return False
return parts[-1].partition('.')[0].upper() in self.reserved_names
def make_uri(self, path):
# Under Windows, file URIs use the UTF-8 encoding.
drive = path.drive
if len(drive) == 2 and drive[1] == ':':
# It's a path on a local drive => 'file:///c:/a/b'
rest = path.as_posix()[2:].lstrip('/')
return 'file:///%s/%s' % (
drive, urlquote_from_bytes(rest.encode('utf-8')))
else:
# It's a path on a network drive => 'file://host/share/a/b'
return 'file:' + urlquote_from_bytes(path.as_posix().encode('utf-8'))
def gethomedir(self, username):
if 'USERPROFILE' in os.environ:
userhome = os.environ['USERPROFILE']
elif 'HOMEPATH' in os.environ:
try:
drv = os.environ['HOMEDRIVE']
except KeyError:
drv = ''
userhome = drv + os.environ['HOMEPATH']
else:
raise RuntimeError("Can't determine home directory")
if username:
# Try to guess user home directory. By default all users
# directories are located in the same place and are named by
# corresponding usernames. If current user home directory points
# to nonstandard place, this guess is likely wrong.
if os.environ['USERNAME'] != username:
drv, root, parts = self.parse_parts((userhome,))
if parts[-1] != os.environ['USERNAME']:
raise RuntimeError("Can't determine home directory "
"for %r" % username)
parts[-1] = username
if drv or root:
userhome = drv + root + self.join(parts[1:])
else:
userhome = self.join(parts)
return userhome
class _PosixFlavour(_Flavour):
sep = '/'
altsep = ''
has_drv = False
pathmod = posixpath
is_supported = (os.name != 'nt')
def splitroot(self, part, sep=sep):
if part and part[0] == sep:
stripped_part = part.lstrip(sep)
# According to POSIX path resolution:
# http://pubs.opengroup.org/onlinepubs/009695399/basedefs/xbd_chap04.html#tag_04_11
# "A pathname that begins with two successive slashes may be
# interpreted in an implementation-defined manner, although more
# than two leading slashes shall be treated as a single slash".
if len(part) - len(stripped_part) == 2:
return '', sep * 2, stripped_part
else:
return '', sep, stripped_part
else:
return '', '', part
def casefold(self, s):
return s
def casefold_parts(self, parts):
return parts
def compile_pattern(self, pattern):
return re.compile(fnmatch.translate(pattern)).fullmatch
def resolve(self, path, strict=False):
sep = self.sep
accessor = path._accessor
seen = {}
def _resolve(path, rest):
if rest.startswith(sep):
path = ''
for name in rest.split(sep):
if not name or name == '.':
# current dir
continue
if name == '..':
# parent dir
path, _, _ = path.rpartition(sep)
continue
newpath = path + sep + name
if newpath in seen:
# Already seen this path
path = seen[newpath]
if path is not None:
# use cached value
continue
# The symlink is not resolved, so we must have a symlink loop.
raise RuntimeError("Symlink loop from %r" % newpath)
# Resolve the symbolic link
try:
target = accessor.readlink(newpath)
except OSError as e:
if e.errno != EINVAL and strict:
raise
# Not a symlink, or non-strict mode. We just leave the path
# untouched.
path = newpath
else:
seen[newpath] = None # not resolved symlink
path = _resolve(path, target)
seen[newpath] = path # resolved symlink
return path
# NOTE: according to POSIX, getcwd() cannot contain path components
# which are symlinks.
base = '' if path.is_absolute() else os.getcwd()
return _resolve(base, str(path)) or sep
def is_reserved(self, parts):
return False
def make_uri(self, path):
# We represent the path using the local filesystem encoding,
# for portability to other applications.
bpath = bytes(path)
return 'file://' + urlquote_from_bytes(bpath)
def gethomedir(self, username):
if not username:
try:
return os.environ['HOME']
except KeyError:
import pwd
return pwd.getpwuid(os.getuid()).pw_dir
else:
import pwd
try:
return pwd.getpwnam(username).pw_dir
except KeyError:
raise RuntimeError("Can't determine home directory "
"for %r" % username)
_windows_flavour = _WindowsFlavour()
_posix_flavour = _PosixFlavour()
class _Accessor:
"""An accessor implements a particular (system-specific or not) way of
accessing paths on the filesystem."""
class _NormalAccessor(_Accessor):
stat = os.stat
lstat = os.lstat
open = os.open
listdir = os.listdir
scandir = os.scandir
chmod = os.chmod
if hasattr(os, "lchmod"):
lchmod = os.lchmod
else:
def lchmod(self, pathobj, mode):
raise NotImplementedError("lchmod() not available on this system")
mkdir = os.mkdir
unlink = os.unlink
if hasattr(os, "link"):
link_to = os.link
else:
@staticmethod
def link_to(self, target):
raise NotImplementedError("os.link() not available on this system")
rmdir = os.rmdir
rename = os.rename
replace = os.replace
if nt:
if supports_symlinks:
symlink = os.symlink
else:
def symlink(a, b, target_is_directory):
raise NotImplementedError("symlink() not available on this system")
else:
# Under POSIX, os.symlink() takes two args
@staticmethod
def symlink(a, b, target_is_directory):
return os.symlink(a, b)
utime = os.utime
# Helper for resolve()
def readlink(self, path):
return os.readlink(path)
_normal_accessor = _NormalAccessor()
#
# Globbing helpers
#
def _make_selector(pattern_parts, flavour):
pat = pattern_parts[0]
child_parts = pattern_parts[1:]
if pat == '**':
cls = _RecursiveWildcardSelector
elif '**' in pat:
raise ValueError("Invalid pattern: '**' can only be an entire path component")
elif _is_wildcard_pattern(pat):
cls = _WildcardSelector
else:
cls = _PreciseSelector
return cls(pat, child_parts, flavour)
if hasattr(functools, "lru_cache"):
_make_selector = functools.lru_cache()(_make_selector)
class _Selector:
"""A selector matches a specific glob pattern part against the children
of a given path."""
def __init__(self, child_parts, flavour):
self.child_parts = child_parts
if child_parts:
self.successor = _make_selector(child_parts, flavour)
self.dironly = True
else:
self.successor = _TerminatingSelector()
self.dironly = False
def select_from(self, parent_path):
"""Iterate over all child paths of `parent_path` matched by this
selector. This can contain parent_path itself."""
path_cls = type(parent_path)
is_dir = path_cls.is_dir
exists = path_cls.exists
scandir = parent_path._accessor.scandir
if not is_dir(parent_path):
return iter([])
return self._select_from(parent_path, is_dir, exists, scandir)
class _TerminatingSelector:
def _select_from(self, parent_path, is_dir, exists, scandir):
yield parent_path
class _PreciseSelector(_Selector):
def __init__(self, name, child_parts, flavour):
self.name = name
_Selector.__init__(self, child_parts, flavour)
def _select_from(self, parent_path, is_dir, exists, scandir):
try:
path = parent_path._make_child_relpath(self.name)
if (is_dir if self.dironly else exists)(path):
for p in self.successor._select_from(path, is_dir, exists, scandir):
yield p
except PermissionError:
return
class _WildcardSelector(_Selector):
def __init__(self, pat, child_parts, flavour):
self.match = flavour.compile_pattern(pat)
_Selector.__init__(self, child_parts, flavour)
def _select_from(self, parent_path, is_dir, exists, scandir):
try:
with scandir(parent_path) as scandir_it:
entries = list(scandir_it)
for entry in entries:
if self.dironly:
try:
# "entry.is_dir()" can raise PermissionError
# in some cases (see bpo-38894), which is not
# among the errors ignored by _ignore_error()
if not entry.is_dir():
continue
except OSError as e:
if not _ignore_error(e):
raise
continue
name = entry.name
if self.match(name):
path = parent_path._make_child_relpath(name)
for p in self.successor._select_from(path, is_dir, exists, scandir):
yield p
except PermissionError:
return
class _RecursiveWildcardSelector(_Selector):
def __init__(self, pat, child_parts, flavour):
_Selector.__init__(self, child_parts, flavour)
def _iterate_directories(self, parent_path, is_dir, scandir):
yield parent_path
try:
with scandir(parent_path) as scandir_it:
entries = list(scandir_it)
for entry in entries:
entry_is_dir = False
try:
entry_is_dir = entry.is_dir()
except OSError as e:
if not _ignore_error(e):
raise
if entry_is_dir and not entry.is_symlink():
path = parent_path._make_child_relpath(entry.name)
for p in self._iterate_directories(path, is_dir, scandir):
yield p
except PermissionError:
return
def _select_from(self, parent_path, is_dir, exists, scandir):
try:
yielded = set()
try:
successor_select = self.successor._select_from
for starting_point in self._iterate_directories(parent_path, is_dir, scandir):
for p in successor_select(starting_point, is_dir, exists, scandir):
if p not in yielded:
yield p
yielded.add(p)
finally:
yielded.clear()
except PermissionError:
return
#
# Public API
#
class _PathParents(Sequence):
"""This object provides sequence-like access to the logical ancestors
of a path. Don't try to construct it yourself."""
__slots__ = ('_pathcls', '_drv', '_root', '_parts')
def __init__(self, path):
# We don't store the instance to avoid reference cycles
self._pathcls = type(path)
self._drv = path._drv
self._root = path._root
self._parts = path._parts
def __len__(self):
if self._drv or self._root:
return len(self._parts) - 1
else:
return len(self._parts)
def __getitem__(self, idx):
if idx < 0 or idx >= len(self):
raise IndexError(idx)
return self._pathcls._from_parsed_parts(self._drv, self._root,
self._parts[:-idx - 1])
def __repr__(self):
return "<{}.parents>".format(self._pathcls.__name__)
class PurePath(object):
"""Base class for manipulating paths without I/O.
PurePath represents a filesystem path and offers operations which
don't imply any actual filesystem I/O. Depending on your system,
instantiating a PurePath will return either a PurePosixPath or a
PureWindowsPath object. You can also instantiate either of these classes
directly, regardless of your system.
"""
__slots__ = (
'_drv', '_root', '_parts',
'_str', '_hash', '_pparts', '_cached_cparts',
)
def __new__(cls, *args):
"""Construct a PurePath from one or several strings and or existing
PurePath objects. The strings and path objects are combined so as
to yield a canonicalized path, which is incorporated into the
new PurePath object.
"""
if cls is PurePath:
cls = PureWindowsPath if os.name == 'nt' else PurePosixPath
return cls._from_parts(args)
def __reduce__(self):
# Using the parts tuple helps share interned path parts
# when pickling related paths.
return (self.__class__, tuple(self._parts))
@classmethod
def _parse_args(cls, args):
# This is useful when you don't want to create an instance, just
# canonicalize some constructor arguments.
parts = []
for a in args:
if isinstance(a, PurePath):
parts += a._parts
else:
a = os.fspath(a)
if isinstance(a, str):
# Force-cast str subclasses to str (issue #21127)
parts.append(str(a))
else:
raise TypeError(
"argument should be a str object or an os.PathLike "
"object returning str, not %r"
% type(a))
return cls._flavour.parse_parts(parts)
@classmethod
def _from_parts(cls, args, init=True):
# We need to call _parse_args on the instance, so as to get the
# right flavour.
self = object.__new__(cls)
drv, root, parts = self._parse_args(args)
self._drv = drv
self._root = root
self._parts = parts
if init:
self._init()
return self
@classmethod
def _from_parsed_parts(cls, drv, root, parts, init=True):
self = object.__new__(cls)
self._drv = drv
self._root = root
self._parts = parts
if init:
self._init()
return self
@classmethod
def _format_parsed_parts(cls, drv, root, parts):
if drv or root:
return drv + root + cls._flavour.join(parts[1:])
else:
return cls._flavour.join(parts)
def _init(self):
# Overridden in concrete Path
pass
def _make_child(self, args):
drv, root, parts = self._parse_args(args)
drv, root, parts = self._flavour.join_parsed_parts(
self._drv, self._root, self._parts, drv, root, parts)
return self._from_parsed_parts(drv, root, parts)
def __str__(self):
"""Return the string representation of the path, suitable for
passing to system calls."""
try:
return self._str
except AttributeError:
self._str = self._format_parsed_parts(self._drv, self._root,
self._parts) or '.'
return self._str
def __fspath__(self):
return str(self)
def as_posix(self):
"""Return the string representation of the path with forward (/)
slashes."""
f = self._flavour
return str(self).replace(f.sep, '/')
def __bytes__(self):
"""Return the bytes representation of the path. This is only
recommended to use under Unix."""
return os.fsencode(self)
def __repr__(self):
return "{}({!r})".format(self.__class__.__name__, self.as_posix())
def as_uri(self):
"""Return the path as a 'file' URI."""
if not self.is_absolute():
raise ValueError("relative path can't be expressed as a file URI")
return self._flavour.make_uri(self)
@property
def _cparts(self):
# Cached casefolded parts, for hashing and comparison
try:
return self._cached_cparts
except AttributeError:
self._cached_cparts = self._flavour.casefold_parts(self._parts)
return self._cached_cparts
def __eq__(self, other):
if not isinstance(other, PurePath):
return NotImplemented
return self._cparts == other._cparts and self._flavour is other._flavour
def __hash__(self):
try:
return self._hash
except AttributeError:
self._hash = hash(tuple(self._cparts))
return self._hash
def __lt__(self, other):
if not isinstance(other, PurePath) or self._flavour is not other._flavour:
return NotImplemented
return self._cparts < other._cparts
def __le__(self, other):
if not isinstance(other, PurePath) or self._flavour is not other._flavour:
return NotImplemented
return self._cparts <= other._cparts
def __gt__(self, other):
if not isinstance(other, PurePath) or self._flavour is not other._flavour:
return NotImplemented
return self._cparts > other._cparts
def __ge__(self, other):
if not isinstance(other, PurePath) or self._flavour is not other._flavour:
return NotImplemented
return self._cparts >= other._cparts
drive = property(attrgetter('_drv'),
doc="""The drive prefix (letter or UNC path), if any.""")
root = property(attrgetter('_root'),
doc="""The root of the path, if any.""")
@property
def anchor(self):
"""The concatenation of the drive and root, or ''."""
anchor = self._drv + self._root
return anchor
@property
def name(self):
"""The final path component, if any."""
parts = self._parts
if len(parts) == (1 if (self._drv or self._root) else 0):
return ''
return parts[-1]
@property
def suffix(self):
"""
The final component's last suffix, if any.
This includes the leading period. For example: '.txt'
"""
name = self.name
i = name.rfind('.')
if 0 < i < len(name) - 1:
return name[i:]
else:
return ''
@property
def suffixes(self):
"""
A list of the final component's suffixes, if any.
These include the leading periods. For example: ['.tar', '.gz']
"""
name = self.name
if name.endswith('.'):
return []
name = name.lstrip('.')
return ['.' + suffix for suffix in name.split('.')[1:]]
@property
def stem(self):
"""The final path component, minus its last suffix."""
name = self.name
i = name.rfind('.')
if 0 < i < len(name) - 1:
return name[:i]
else:
return name
def with_name(self, name):
"""Return a new path with the file name changed."""
if not self.name:
raise ValueError("%r has an empty name" % (self,))
drv, root, parts = self._flavour.parse_parts((name,))
if (not name or name[-1] in [self._flavour.sep, self._flavour.altsep]
or drv or root or len(parts) != 1):
raise ValueError("Invalid name %r" % (name))
return self._from_parsed_parts(self._drv, self._root,
self._parts[:-1] + [name])
def with_suffix(self, suffix):
"""Return a new path with the file suffix changed. If the path
has no suffix, add given suffix. If the given suffix is an empty
string, remove the suffix from the path.
"""
f = self._flavour
if f.sep in suffix or f.altsep and f.altsep in suffix:
raise ValueError("Invalid suffix %r" % (suffix,))
if suffix and not suffix.startswith('.') or suffix == '.':
raise ValueError("Invalid suffix %r" % (suffix))
name = self.name
if not name:
raise ValueError("%r has an empty name" % (self,))
old_suffix = self.suffix
if not old_suffix:
name = name + suffix
else:
name = name[:-len(old_suffix)] + suffix
return self._from_parsed_parts(self._drv, self._root,
self._parts[:-1] + [name])
def relative_to(self, *other):
"""Return the relative path to another path identified by the passed
arguments. If the operation is not possible (because this is not
a subpath of the other path), raise ValueError.
"""
# For the purpose of this method, drive and root are considered
# separate parts, i.e.:
# Path('c:/').relative_to('c:') gives Path('/')
# Path('c:/').relative_to('/') raise ValueError
if not other:
raise TypeError("need at least one argument")
parts = self._parts
drv = self._drv
root = self._root
if root:
abs_parts = [drv, root] + parts[1:]
else:
abs_parts = parts
to_drv, to_root, to_parts = self._parse_args(other)
if to_root:
to_abs_parts = [to_drv, to_root] + to_parts[1:]
else:
to_abs_parts = to_parts
n = len(to_abs_parts)
cf = self._flavour.casefold_parts
if (root or drv) if n == 0 else cf(abs_parts[:n]) != cf(to_abs_parts):
formatted = self._format_parsed_parts(to_drv, to_root, to_parts)
raise ValueError("{!r} does not start with {!r}"
.format(str(self), str(formatted)))
return self._from_parsed_parts('', root if n == 1 else '',
abs_parts[n:])
@property
def parts(self):
"""An object providing sequence-like access to the
components in the filesystem path."""
# We cache the tuple to avoid building a new one each time .parts
# is accessed. XXX is this necessary?
try:
return self._pparts
except AttributeError:
self._pparts = tuple(self._parts)
return self._pparts
def joinpath(self, *args):
"""Combine this path with one or several arguments, and return a
new path representing either a subpath (if all arguments are relative
paths) or a totally different path (if one of the arguments is
anchored).
"""
return self._make_child(args)
def __truediv__(self, key):
try:
return self._make_child((key,))
except TypeError:
return NotImplemented
def __rtruediv__(self, key):
try:
return self._from_parts([key] + self._parts)
except TypeError:
return NotImplemented
@property
def parent(self):
"""The logical parent of the path."""
drv = self._drv
root = self._root
parts = self._parts
if len(parts) == 1 and (drv or root):
return self
return self._from_parsed_parts(drv, root, parts[:-1])
@property
def parents(self):
"""A sequence of this path's logical parents."""
return _PathParents(self)
def is_absolute(self):
"""True if the path is absolute (has both a root and, if applicable,
a drive)."""
if not self._root:
return False
return not self._flavour.has_drv or bool(self._drv)
def is_reserved(self):
"""Return True if the path contains one of the special names reserved
by the system, if any."""
return self._flavour.is_reserved(self._parts)
def match(self, path_pattern):
"""
Return True if this path matches the given pattern.
"""
cf = self._flavour.casefold
path_pattern = cf(path_pattern)
drv, root, pat_parts = self._flavour.parse_parts((path_pattern,))
if not pat_parts:
raise ValueError("empty pattern")
if drv and drv != cf(self._drv):
return False
if root and root != cf(self._root):
return False
parts = self._cparts
if drv or root:
if len(pat_parts) != len(parts):
return False
pat_parts = pat_parts[1:]
elif len(pat_parts) > len(parts):
return False
for part, pat in zip(reversed(parts), reversed(pat_parts)):
if not fnmatch.fnmatchcase(part, pat):
return False
return True
# Can't subclass os.PathLike from PurePath and keep the constructor
# optimizations in PurePath._parse_args().
os.PathLike.register(PurePath)
class PurePosixPath(PurePath):
"""PurePath subclass for non-Windows systems.
On a POSIX system, instantiating a PurePath should return this object.
However, you can also instantiate it directly on any system.
"""
_flavour = _posix_flavour
__slots__ = ()
class PureWindowsPath(PurePath):
"""PurePath subclass for Windows systems.
On a Windows system, instantiating a PurePath should return this object.
However, you can also instantiate it directly on any system.
"""
_flavour = _windows_flavour
__slots__ = ()
# Filesystem-accessing classes
class Path(PurePath):
"""PurePath subclass that can make system calls.
Path represents a filesystem path but unlike PurePath, also offers
methods to do system calls on path objects. Depending on your system,
instantiating a Path will return either a PosixPath or a WindowsPath
object. You can also instantiate a PosixPath or WindowsPath directly,
but cannot instantiate a WindowsPath on a POSIX system or vice versa.
"""
__slots__ = (
'_accessor',
'_closed',
)
def __new__(cls, *args, **kwargs):
if cls is Path:
cls = WindowsPath if os.name == 'nt' else PosixPath
self = cls._from_parts(args, init=False)
if not self._flavour.is_supported:
raise NotImplementedError("cannot instantiate %r on your system"
% (cls.__name__,))
self._init()
return self
def _init(self,
# Private non-constructor arguments
template=None,
):
self._closed = False
if template is not None:
self._accessor = template._accessor
else:
self._accessor = _normal_accessor
def _make_child_relpath(self, part):
# This is an optimization used for dir walking. `part` must be
# a single part relative to this path.
parts = self._parts + [part]
return self._from_parsed_parts(self._drv, self._root, parts)
def __enter__(self):
if self._closed:
self._raise_closed()
return self
def __exit__(self, t, v, tb):
self._closed = True
def _raise_closed(self):
raise ValueError("I/O operation on closed path")
def _opener(self, name, flags, mode=0o666):
# A stub for the opener argument to built-in open()
return self._accessor.open(self, flags, mode)
def _raw_open(self, flags, mode=0o777):
"""
Open the file pointed by this path and return a file descriptor,
as os.open() does.
"""
if self._closed:
self._raise_closed()
return self._accessor.open(self, flags, mode)
# Public API
@classmethod
def cwd(cls):
"""Return a new path pointing to the current working directory
(as returned by os.getcwd()).
"""
return cls(os.getcwd())
@classmethod
def home(cls):
"""Return a new path pointing to the user's home directory (as
returned by os.path.expanduser('~')).
"""
return cls(cls()._flavour.gethomedir(None))
def samefile(self, other_path):
"""Return whether other_path is the same or not as this file
(as returned by os.path.samefile()).
"""
st = self.stat()
try:
other_st = other_path.stat()
except AttributeError:
other_st = os.stat(other_path)
return os.path.samestat(st, other_st)
def iterdir(self):
"""Iterate over the files in this directory. Does not yield any
result for the special paths '.' and '..'.
"""
if self._closed:
self._raise_closed()
for name in self._accessor.listdir(self):
if name in {'.', '..'}:
# Yielding a path object for these makes little sense
continue
yield self._make_child_relpath(name)
if self._closed:
self._raise_closed()
def glob(self, pattern):
"""Iterate over this subtree and yield all existing files (of any
kind, including directories) matching the given relative pattern.
"""
if not pattern:
raise ValueError("Unacceptable pattern: {!r}".format(pattern))
drv, root, pattern_parts = self._flavour.parse_parts((pattern,))
if drv or root:
raise NotImplementedError("Non-relative patterns are unsupported")
selector = _make_selector(tuple(pattern_parts), self._flavour)
for p in selector.select_from(self):
yield p
def rglob(self, pattern):
"""Recursively yield all existing files (of any kind, including
directories) matching the given relative pattern, anywhere in
this subtree.
"""
drv, root, pattern_parts = self._flavour.parse_parts((pattern,))
if drv or root:
raise NotImplementedError("Non-relative patterns are unsupported")
selector = _make_selector(("**",) + tuple(pattern_parts), self._flavour)
for p in selector.select_from(self):
yield p
def absolute(self):
"""Return an absolute version of this path. This function works
even if the path doesn't point to anything.
No normalization is done, i.e. all '.' and '..' will be kept along.
Use resolve() to get the canonical path to a file.
"""
# XXX untested yet!
if self._closed:
self._raise_closed()
if self.is_absolute():
return self
# FIXME this must defer to the specific flavour (and, under Windows,
# use nt._getfullpathname())
obj = self._from_parts([os.getcwd()] + self._parts, init=False)
obj._init(template=self)
return obj
def resolve(self, strict=False):
"""
Make the path absolute, resolving all symlinks on the way and also
normalizing it (for example turning slashes into backslashes under
Windows).
"""
if self._closed:
self._raise_closed()
s = self._flavour.resolve(self, strict=strict)
if s is None:
# No symlink resolution => for consistency, raise an error if
# the path doesn't exist or is forbidden
self.stat()
s = str(self.absolute())
# Now we have no symlinks in the path, it's safe to normalize it.
normed = self._flavour.pathmod.normpath(s)
obj = self._from_parts((normed,), init=False)
obj._init(template=self)
return obj
def stat(self):
"""
Return the result of the stat() system call on this path, like
os.stat() does.
"""
return self._accessor.stat(self)
def owner(self):
"""
Return the login name of the file owner.
"""
import pwd
return pwd.getpwuid(self.stat().st_uid).pw_name
def group(self):
"""
Return the group name of the file gid.
"""
import grp
return grp.getgrgid(self.stat().st_gid).gr_name
def open(self, mode='r', buffering=-1, encoding=None,
errors=None, newline=None):
"""
Open the file pointed by this path and return a file object, as
the built-in open() function does.
"""
if self._closed:
self._raise_closed()
return io.open(self, mode, buffering, encoding, errors, newline,
opener=self._opener)
def read_bytes(self):
"""
Open the file in bytes mode, read it, and close the file.
"""
with self.open(mode='rb') as f:
return f.read()
def read_text(self, encoding=None, errors=None):
"""
Open the file in text mode, read it, and close the file.
"""
with self.open(mode='r', encoding=encoding, errors=errors) as f:
return f.read()
def write_bytes(self, data):
"""
Open the file in bytes mode, write to it, and close the file.
"""
# type-check for the buffer interface before truncating the file
view = memoryview(data)
with self.open(mode='wb') as f:
return f.write(view)
def write_text(self, data, encoding=None, errors=None):
"""
Open the file in text mode, write to it, and close the file.
"""
if not isinstance(data, str):
raise TypeError('data must be str, not %s' %
data.__class__.__name__)
with self.open(mode='w', encoding=encoding, errors=errors) as f:
return f.write(data)
def touch(self, mode=0o666, exist_ok=True):
"""
Create this file with the given access mode, if it doesn't exist.
"""
if self._closed:
self._raise_closed()
if exist_ok:
# First try to bump modification time
# Implementation note: GNU touch uses the UTIME_NOW option of
# the utimensat() / futimens() functions.
try:
self._accessor.utime(self, None)
except OSError:
# Avoid exception chaining
pass
else:
return
flags = os.O_CREAT | os.O_WRONLY
if not exist_ok:
flags |= os.O_EXCL
fd = self._raw_open(flags, mode)
os.close(fd)
def mkdir(self, mode=0o777, parents=False, exist_ok=False):
"""
Create a new directory at this given path.
"""
if self._closed:
self._raise_closed()
try:
self._accessor.mkdir(self, mode)
except FileNotFoundError:
if not parents or self.parent == self:
raise
self.parent.mkdir(parents=True, exist_ok=True)
self.mkdir(mode, parents=False, exist_ok=exist_ok)
except OSError:
# Cannot rely on checking for EEXIST, since the operating system
# could give priority to other errors like EACCES or EROFS
if not exist_ok or not self.is_dir():
raise
def chmod(self, mode):
"""
Change the permissions of the path, like os.chmod().
"""
if self._closed:
self._raise_closed()
self._accessor.chmod(self, mode)
def lchmod(self, mode):
"""
Like chmod(), except if the path points to a symlink, the symlink's
permissions are changed, rather than its target's.
"""
if self._closed:
self._raise_closed()
self._accessor.lchmod(self, mode)
def unlink(self, missing_ok=False):
"""
Remove this file or link.
If the path is a directory, use rmdir() instead.
"""
if self._closed:
self._raise_closed()
try:
self._accessor.unlink(self)
except FileNotFoundError:
if not missing_ok:
raise
def rmdir(self):
"""
Remove this directory. The directory must be empty.
"""
if self._closed:
self._raise_closed()
self._accessor.rmdir(self)
def lstat(self):
"""
Like stat(), except if the path points to a symlink, the symlink's
status information is returned, rather than its target's.
"""
if self._closed:
self._raise_closed()
return self._accessor.lstat(self)
def link_to(self, target):
"""
Create a hard link pointing to a path named target.
"""
if self._closed:
self._raise_closed()
self._accessor.link_to(self, target)
def rename(self, target):
"""
Rename this path to the given path,
and return a new Path instance pointing to the given path.
"""
if self._closed:
self._raise_closed()
self._accessor.rename(self, target)
return self.__class__(target)
def replace(self, target):
"""
Rename this path to the given path, clobbering the existing
destination if it exists, and return a new Path instance
pointing to the given path.
"""
if self._closed:
self._raise_closed()
self._accessor.replace(self, target)
return self.__class__(target)
def symlink_to(self, target, target_is_directory=False):
"""
Make this path a symlink pointing to the given path.
Note the order of arguments (self, target) is the reverse of os.symlink's.
"""
if self._closed:
self._raise_closed()
self._accessor.symlink(target, self, target_is_directory)
# Convenience functions for querying the stat results
def exists(self):
"""
Whether this path exists.
"""
try:
self.stat()
except OSError as e:
if not _ignore_error(e):
raise
return False
except ValueError:
# Non-encodable path
return False
return True
def is_dir(self):
"""
Whether this path is a directory.
"""
try:
return S_ISDIR(self.stat().st_mode)
except OSError as e:
if not _ignore_error(e):
raise
# Path doesn't exist or is a broken symlink
# (see https://bitbucket.org/pitrou/pathlib/issue/12/)
return False
except ValueError:
# Non-encodable path
return False
def is_file(self):
"""
Whether this path is a regular file (also True for symlinks pointing
to regular files).
"""
try:
return S_ISREG(self.stat().st_mode)
except OSError as e:
if not _ignore_error(e):
raise
# Path doesn't exist or is a broken symlink
# (see https://bitbucket.org/pitrou/pathlib/issue/12/)
return False
except ValueError:
# Non-encodable path
return False
def is_mount(self):
"""
Check if this path is a POSIX mount point
"""
# Need to exist and be a dir
if not self.exists() or not self.is_dir():
return False
parent = Path(self.parent)
try:
parent_dev = parent.stat().st_dev
except OSError:
return False
dev = self.stat().st_dev
if dev != parent_dev:
return True
ino = self.stat().st_ino
parent_ino = parent.stat().st_ino
return ino == parent_ino
def is_symlink(self):
"""
Whether this path is a symbolic link.
"""
try:
return S_ISLNK(self.lstat().st_mode)
except OSError as e:
if not _ignore_error(e):
raise
# Path doesn't exist
return False
except ValueError:
# Non-encodable path
return False
def is_block_device(self):
"""
Whether this path is a block device.
"""
try:
return S_ISBLK(self.stat().st_mode)
except OSError as e:
if not _ignore_error(e):
raise
# Path doesn't exist or is a broken symlink
# (see https://bitbucket.org/pitrou/pathlib/issue/12/)
return False
except ValueError:
# Non-encodable path
return False
def is_char_device(self):
"""
Whether this path is a character device.
"""
try:
return S_ISCHR(self.stat().st_mode)
except OSError as e:
if not _ignore_error(e):
raise
# Path doesn't exist or is a broken symlink
# (see https://bitbucket.org/pitrou/pathlib/issue/12/)
return False
except ValueError:
# Non-encodable path
return False
def is_fifo(self):
"""
Whether this path is a FIFO.
"""
try:
return S_ISFIFO(self.stat().st_mode)
except OSError as e:
if not _ignore_error(e):
raise
# Path doesn't exist or is a broken symlink
# (see https://bitbucket.org/pitrou/pathlib/issue/12/)
return False
except ValueError:
# Non-encodable path
return False
def is_socket(self):
"""
Whether this path is a socket.
"""
try:
return S_ISSOCK(self.stat().st_mode)
except OSError as e:
if not _ignore_error(e):
raise
# Path doesn't exist or is a broken symlink
# (see https://bitbucket.org/pitrou/pathlib/issue/12/)
return False
except ValueError:
# Non-encodable path
return False
def expanduser(self):
""" Return a new path with expanded ~ and ~user constructs
(as returned by os.path.expanduser)
"""
if (not (self._drv or self._root) and
self._parts and self._parts[0][:1] == '~'):
homedir = self._flavour.gethomedir(self._parts[0][1:])
return self._from_parts([homedir] + self._parts[1:])
return self
class PosixPath(Path, PurePosixPath):
"""Path subclass for non-Windows systems.
On a POSIX system, instantiating a Path should return this object.
"""
__slots__ = ()
class WindowsPath(Path, PureWindowsPath):
"""Path subclass for Windows systems.
On a Windows system, instantiating a Path should return this object.
"""
__slots__ = ()
def owner(self):
raise NotImplementedError("Path.owner() is unsupported on this system")
def group(self):
raise NotImplementedError("Path.group() is unsupported on this system")
def is_mount(self):
raise NotImplementedError("Path.is_mount() is unsupported on this system")
|
[] |
[] |
[
"USERNAME",
"HOMEPATH",
"HOMEDRIVE",
"USERPROFILE",
"HOME"
] |
[]
|
["USERNAME", "HOMEPATH", "HOMEDRIVE", "USERPROFILE", "HOME"]
|
python
| 5 | 0 | |
installer/Installer.java
|
import org.json.*;
import java.awt.*;
import java.awt.TrayIcon.MessageType;
import java.awt.event.*;
import java.awt.image.BufferedImage;
import java.beans.*;
import java.io.*;
import java.lang.reflect.Field;
import java.net.URL;
import java.net.URI;
import java.nio.channels.Channels;
import java.nio.channels.ReadableByteChannel;
import java.security.MessageDigest;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.zip.ZipEntry;
import java.util.zip.ZipInputStream;
import java.util.zip.ZipOutputStream;
import javax.imageio.ImageIO;
import javax.swing.*;
import javax.swing.border.LineBorder;
/**
* Derived from https://github.com/MinecraftForge/Installer/
* Copyright 2013 MinecraftForge developers, & Mark Browning, StellaArtois
*
* Licensed under GNU LGPL v2.1 or later.
*
* @author mabrowning
*
*/
public class Installer extends JPanel implements PropertyChangeListener
{
private static final long serialVersionUID = -562178983462626162L;
private String tempDir = System.getProperty("java.io.tmpdir");
private static final boolean ALLOW_FORGE_INSTALL = true;
private static final boolean DEFAULT_FORGE_INSTALL = false;
private static final boolean ALLOW_HYDRA_INSTALL = false;
private static final boolean ALLOW_KATVR_INSTALL = true;
private static final boolean ALLOW_KIOSK_INSTALL = true;
private static final boolean ALLOW_HRTF_INSTALL = false;
private static final boolean PROMPT_REMOVE_HRTF = true;
private static final boolean ALLOW_SHADERSMOD_INSTALL = false;
private static final boolean NEEDS_2010_REDIST = false;
private static final boolean NEEDS_2012_REDIST = false;
// Currently needed for Win boxes - C++ redists
public static String winredist2012_64url = "http://download.microsoft.com/download/1/6/B/16B06F60-3B20-4FF2-B699-5E9B7962F9AE/VSU_4/vcredist_x64.exe";
public static String winredist2012_32url = "http://download.microsoft.com/download/1/6/B/16B06F60-3B20-4FF2-B699-5E9B7962F9AE/VSU_4/vcredist_x86.exe";
public static String winredist2010_64url = "http://download.microsoft.com/download/A/8/0/A80747C3-41BD-45DF-B505-E9710D2744E0/vcredist_x64.exe";
public static String winredist2010_32url = "http://download.microsoft.com/download/C/6/D/C6D0FD4E-9E53-4897-9B91-836EBA2AACD3/vcredist_x86.exe";
/* DO NOT RENAME THESE STRING CONSTS - THEY ARE USED IN (AND THE VALUES UPDATED BY) THE AUTOMATED BUILD SCRIPTS */
private static final String MINECRAFT_VERSION = "1.15.2";
private static final String MC_VERSION = "1.15.2";
private static final String MC_MD5 = "1d87e7d75a99172f0cffc4f96cdc44da";
private static final String OF_LIB_PATH = "libraries/optifine/OptiFine/";
private static final String OF_FILE_NAME = "1.15.2_HD_U_G1_pre15";
private static final String OF_MD5 = "0127f841a34f112b20889ccf81063adf";
private static final String OF_VERSION_EXT = ".jar";
private static String FORGE_VERSION = "31.2.31";
/* END OF DO NOT RENAME */
private static final String DEFAULT_PROFILE_NAME = "Vivecraft " + MINECRAFT_VERSION;
private static final String DEFAULT_PROFILE_NAME_FORGE = "Vivecraft-Forge " + MINECRAFT_VERSION;
private static final String HOMEPAGE_LINK = "http://www.vivecraft.org";
private static final String DONATION_LINK = "https://www.patreon.com/jrbudda";
private static final String ORIG_FORGE_VERSION = FORGE_VERSION;
private InstallTask task;
private static ProgressMonitor monitor;
static private File targetDir;
private String[] forgeVersions = null;
private boolean forgeVersionInstalled = false;
private static String FULL_FORGE_VERSION = MINECRAFT_VERSION + "-" + FORGE_VERSION;
private String forge_url = "https://maven.minecraftforge.net/net/minecraftforge/forge/" + FULL_FORGE_VERSION + "/forge-" + FULL_FORGE_VERSION + "-installer.jar";
private File forgeInstaller;
private JTextField selectedDirText;
private JLabel infoLabel;
private JDialog dialog;
private JPanel fileEntryPanel;
private Frame emptyFrame;
private String jar_id;
private String version;
private String mod = "";
private JCheckBox useForge;
private JCheckBox useShadersMod;
private ButtonGroup bg = new ButtonGroup();
private JCheckBox createProfile;
private JComboBox forgeVersion;
private JCheckBox useHydra;
private JCheckBox useHrtf;
private JCheckBox katvr;
private JCheckBox kiosk;
private JCheckBox optCustomForgeVersion;
private JTextField txtCustomForgeVersion;
private JComboBox ramAllocation;
private final boolean QUIET_DEV = false;
private File releaseNotes = null;
private static String releaseNotePathAddition = "";
private static JLabel instructions;
private String smcVanillaURL = "http://www.karyonix.net/shadersmod/files/ShadersMod-v2.3.29mc1.7.10-installer.jar";
private String smcForgeURL = "http://www.karyonix.net/shadersmod/files/ShadersModCore-v2.3.31-mc1.7.10-f.jar";
private final String smcVanillaLib = "libraries/shadersmodcore/ShadersModCore/2.3.29mc1.7.10";
private final String smcForgelib = "libraries/shadersmodcore/ShadersModCore/2.3.31mc1.7.10-f";
private final String smcVanillaFile = "ShadersModCore-2.3.29mc1.7.10.jar";
private final String smcForgeFile = "ShadersModCore-2.3.31mc1.7.10-f.jar";
private final String smcVanillaMD5 = "4797D91A1F3752EF47242637901199CB";
private final String smcForgeMD5 = "F66374AEA8DDA5F3B7CCB20C230375D7";
private JTextField txtCustomProfileName;
private JTextField txtCustomGameDir;
private JCheckBox chkCustomProfileName;
private JCheckBox chkCustomGameDir;
static private final String forgeNotFound = "Forge not found..." ;
private String userHomeDir;
private String osType;
private boolean isWindows = false;
private String appDataDir;
boolean isMultiMC = false;
File mmcinst = null;
public Installer(File target)
{
targetDir = target;
ToolTipManager.sharedInstance().setDismissDelay(Integer.MAX_VALUE);
this.setLayout(new BoxLayout(this, BoxLayout.Y_AXIS));
JPanel logoSplash = new JPanel();
logoSplash.setLayout(new BoxLayout(logoSplash, BoxLayout.Y_AXIS));
try {
// Read png
BufferedImage image;
image = ImageIO.read(Installer.class.getResourceAsStream("logo.png"));
ImageIcon icon = new ImageIcon(image.getScaledInstance(500, 200, java.awt.Image.SCALE_SMOOTH));
JLabel logoLabel = new JLabel(icon);
logoLabel.setAlignmentX(LEFT_ALIGNMENT);
logoLabel.setAlignmentY(CENTER_ALIGNMENT);
if (!QUIET_DEV) // VIVE - hide oculus logo
logoSplash.add(logoLabel);
} catch (IOException e) {
} catch( IllegalArgumentException e) {
}
userHomeDir = System.getProperty("user.home", ".");
osType = System.getProperty("os.name").toLowerCase();
if (osType.contains("win"))
{
isWindows = true;
appDataDir = System.getenv("APPDATA");
}
version = "UNKNOWN";
try {
InputStream ver = Installer.class.getResourceAsStream("version");
if( ver != null )
{
String[] tok = new BufferedReader(new InputStreamReader(ver)).readLine().split(":");
if( tok.length > 0)
{
jar_id = tok[0];
version = tok[1];
} else {
throw new Exception("token length is 0!");
}
} else {
throw new Exception("version stream is null!");
}
} catch (Exception e) {
JOptionPane.showMessageDialog(null,
e.getMessage(),"",JOptionPane.WARNING_MESSAGE);
}
// Read release notes, save to file
String tmpFileName = System.getProperty("java.io.tmpdir") + releaseNotePathAddition + "Vivecraft" + version.toLowerCase() + "_release_notes.txt";
releaseNotes = new File(tmpFileName);
InputStream is = Installer.class.getResourceAsStream("release_notes.txt");
if (!copyInputStreamToFile(is, releaseNotes)) {
releaseNotes = null;
}
JLabel tag = new JLabel("Welcome! This will install Vivecraft "+ version);
tag.setAlignmentX(LEFT_ALIGNMENT);
tag.setAlignmentY(CENTER_ALIGNMENT);
logoSplash.add(tag);
logoSplash.add(Box.createRigidArea(new Dimension(5,20)));
tag = new JLabel("Select path to minecraft. (Only change this if using MultiMC.)");
tag.setAlignmentX(LEFT_ALIGNMENT);
tag.setAlignmentY(CENTER_ALIGNMENT);
logoSplash.add(tag);
logoSplash.setAlignmentX(LEFT_ALIGNMENT);
logoSplash.setAlignmentY(TOP_ALIGNMENT);
this.add(logoSplash);
JPanel entryPanel = new JPanel();
entryPanel.setLayout(new BoxLayout(entryPanel,BoxLayout.X_AXIS));
entryPanel.setAlignmentX(LEFT_ALIGNMENT);
entryPanel.setAlignmentY(TOP_ALIGNMENT);
selectedDirText = new JTextField();
selectedDirText.setEditable(false);
selectedDirText.setToolTipText("Path to minecraft");
selectedDirText.setAlignmentX(LEFT_ALIGNMENT);
selectedDirText.setAlignmentY(TOP_ALIGNMENT);
selectedDirText.setMaximumSize(new Dimension(400,20));
JButton dirSelect = new JButton();
dirSelect.setMaximumSize(new Dimension(20,20));
dirSelect.setAction(new FileSelectAction());
dirSelect.setText("...");
dirSelect.setToolTipText("Select an alternative minecraft directory");
dirSelect.setAlignmentX(LEFT_ALIGNMENT);
dirSelect.setAlignmentY(TOP_ALIGNMENT);
entryPanel.add(selectedDirText);
entryPanel.add(dirSelect);
infoLabel = new JLabel();
infoLabel.setHorizontalTextPosition(JLabel.LEFT);
infoLabel.setVerticalTextPosition(JLabel.TOP);
infoLabel.setAlignmentX(LEFT_ALIGNMENT);
infoLabel.setAlignmentY(TOP_ALIGNMENT);
infoLabel.setVisible(false);
fileEntryPanel = new JPanel();
fileEntryPanel.setLayout(new BoxLayout(fileEntryPanel,BoxLayout.Y_AXIS));
fileEntryPanel.setAlignmentX(LEFT_ALIGNMENT);
fileEntryPanel.setAlignmentY(TOP_ALIGNMENT);
fileEntryPanel.add(entryPanel);
fileEntryPanel.add(infoLabel);
this.add(fileEntryPanel);
this.add(Box.createVerticalStrut(5));
//Forge Options
JPanel forgePanel = new JPanel();
forgePanel.setLayout( new BoxLayout(forgePanel, BoxLayout.X_AXIS));
//Create forge: no/yes buttons
useForge = new JCheckBox();
AbstractAction actf = new updateActionF();
actf.putValue(AbstractAction.NAME, "Install Vivecraft with Forge");
useForge.setAction(actf);
useForge.setSelected(DEFAULT_FORGE_INSTALL);
forgeVersion = new JComboBox();
if (!ALLOW_FORGE_INSTALL)
useForge.setEnabled(false);
useForge.setToolTipText(
"<html>" +
"If checked, installs Vivecraft with Forge support.<br>" +
"</html>");
//Add "yes" and "which version" to the forgePanel
useForge.setAlignmentX(LEFT_ALIGNMENT);
forgeVersion.setAlignmentX(LEFT_ALIGNMENT);
forgePanel.setAlignmentX(LEFT_ALIGNMENT);
forgePanel.add(useForge);
optCustomForgeVersion = new JCheckBox();
AbstractAction actf2 = new updateActionF();
actf2.putValue(AbstractAction.NAME, "Custom Version");
optCustomForgeVersion.setAction(actf2);
txtCustomForgeVersion = new JTextField(FORGE_VERSION);
txtCustomForgeVersion.setMaximumSize(new Dimension(100,20));
forgePanel.add(optCustomForgeVersion);
forgePanel.add(txtCustomForgeVersion);
//forgePanel.add(forgeVersion);
//Create Profile
createProfile = new JCheckBox("", true);
AbstractAction actp = new updateActionP();
actp.putValue(AbstractAction.NAME, "Create Vivecraft launcher profile");
createProfile.setAction(actp);
createProfile.setAlignmentX(LEFT_ALIGNMENT);
createProfile.setSelected(true);
createProfile.setToolTipText(
"<html>" +
"Creates or updates a Minecraft Launcher profile for Vivecraft with the selected settings.<br>" +
"You should typically leave this checked." +
"</html>");
//Binaural Audio
useHrtf = new JCheckBox("Enable binaural audio (Only needed once per PC)", false);
useHrtf.setToolTipText(
"<html>" +
"If checked, the installer will create the configuration file needed for OpenAL HRTF<br>" +
"ear-aware sound in Minecraft (and other games).<br>" +
" If the file has previously been created, you do not need to check this again.<br>" +
" NOTE: Your sound card's output MUST be set to 44.1Khz.<br>" +
" WARNING, will overwrite " + (isWindows ? (appDataDir + "\\alsoft.ini") : (userHomeDir + "/.alsoftrc")) + "!<br>" +
" Delete the " + (isWindows ? "alsoft.ini" : "alsoftrc") + " file to disable HRTF again." +
"</html>");
useHrtf.setAlignmentX(LEFT_ALIGNMENT);
//ShadersMod
useShadersMod = new JCheckBox();
useShadersMod.setAlignmentX(LEFT_ALIGNMENT);
AbstractAction acts = new updateActionSM();
acts.putValue(AbstractAction.NAME, "Install Vivecraft with ShadersMod 2.3.29");
useShadersMod.setAction(acts);
useShadersMod.setToolTipText(
"<html>" +
"If checked, sets the vivecraft profile to use ShadersMod <br>" +
"support." +
"</html>");
//RAM Allocation
JPanel ramPanel = new JPanel();
ramPanel.setLayout( new BoxLayout(ramPanel, BoxLayout.X_AXIS));
ramPanel.setAlignmentX(LEFT_ALIGNMENT);
ramPanel.setAlignmentY(TOP_ALIGNMENT);
Integer[] rams = {1,2,4,6,8};
ramAllocation = new JComboBox(rams);
ramAllocation.setSelectedIndex(1);
ramAllocation.setToolTipText(
"<html>" +
"Select the amount of Ram, in GB to allocate to the Vivecraft profile.<br>" +
"2GB is recommended. More than 1GB of ram requires 64 bit PC and java." +
"</html>");
ramAllocation.setAlignmentX(LEFT_ALIGNMENT);
ramAllocation.setMaximumSize( new Dimension((int)ramAllocation.getPreferredSize().getWidth(), 20));
AbstractAction actram = new updateActionRam();
actram.putValue(AbstractAction.NAME, "Profile Ram Allocation (GB)");
ramAllocation.setAction(actram);
JLabel ram = new JLabel(" Profile Ram Allocation (GB) ");
ram.setAlignmentX(LEFT_ALIGNMENT);
ramPanel.add(ram);
ramPanel.add(ramAllocation);
//Custom Profile
JPanel namePanel = new JPanel();
namePanel.setLayout( new BoxLayout(namePanel, BoxLayout.X_AXIS));
namePanel.setAlignmentX(LEFT_ALIGNMENT);
namePanel.setAlignmentY(TOP_ALIGNMENT);
txtCustomProfileName = new JTextField();
txtCustomProfileName.setAlignmentX(LEFT_ALIGNMENT);
txtCustomProfileName.setMaximumSize(new Dimension(250,20));
txtCustomProfileName.setEditable(false);
chkCustomProfileName = new JCheckBox();
chkCustomProfileName.setAlignmentX(LEFT_ALIGNMENT);
AbstractAction u = new updateTxtEnabled();
u.putValue(AbstractAction.NAME, "Custom Profile Name");
chkCustomProfileName.setAction(u);
chkCustomProfileName.setToolTipText(
"<html>" +
"Enter a custom name for this profile</html>");
namePanel.add(Box.createRigidArea(new Dimension(36,20)));
namePanel.add(chkCustomProfileName);
namePanel.add(txtCustomProfileName);
// Custom Game Dir
JPanel gameDirPanel = new JPanel();
gameDirPanel.setLayout( new BoxLayout(gameDirPanel, BoxLayout.X_AXIS));
gameDirPanel.setAlignmentX(LEFT_ALIGNMENT);
gameDirPanel.setAlignmentY(TOP_ALIGNMENT);
txtCustomGameDir= new JTextField();
txtCustomGameDir.setAlignmentX(LEFT_ALIGNMENT);
txtCustomGameDir.setMaximumSize(new Dimension(400,20));
txtCustomGameDir.setEditable(false);
chkCustomGameDir = new JCheckBox("Modpack Directory");
chkCustomGameDir.setAlignmentX(LEFT_ALIGNMENT);
chkCustomGameDir.setToolTipText(
"<html>" +
"Points the profile at a different game directory.<br>" +
"Select this to use Vivecraft with a modpack.<br>" +
"The game directory should contain the 'mods' " +
"directory of the desired pack." +
"</html>");
JButton gdirSelect = new JButton();
gdirSelect.setAction(new GameDirSelectAction());
gdirSelect.setText("...");
gdirSelect.setMaximumSize(new Dimension(20,20));
gdirSelect.setToolTipText("Select a modpack directory");
entryPanel.add(gdirSelect);
gameDirPanel.add(Box.createRigidArea(new Dimension(36,20)));
gameDirPanel.add(chkCustomGameDir);
gameDirPanel.add(txtCustomGameDir);
gameDirPanel.add(gdirSelect);
// KATVR
katvr = new JCheckBox("KATVR Treadmill Driver", false);
katvr.setToolTipText(
"<html>" +
"If checked, install the drivers needed for KATVR Treadmill<br>" +
"DO NOT select this unless you have the KATVR runtime installed.</html>");
katvr.setAlignmentX(LEFT_ALIGNMENT);
katvr.setEnabled(isWindows);
kiosk = new JCheckBox("Kiosk Mode", false);
kiosk.setToolTipText(
"<html>" +
"If checked, disables use of in-game menu via controller" +
"</html>");
kiosk.setAlignmentX(LEFT_ALIGNMENT);
this.add(forgePanel);
if(ALLOW_SHADERSMOD_INSTALL) this.add(useShadersMod);
this.add(createProfile);
this.add(ramPanel);
this.add(namePanel);
this.add(gameDirPanel);
if(ALLOW_HRTF_INSTALL)this.add(useHrtf);
this.add(new JLabel(" "));
if(ALLOW_KATVR_INSTALL||ALLOW_KIOSK_INSTALL) this.add(new JLabel("Advanced Options"));
if(ALLOW_KIOSK_INSTALL) this.add(kiosk);
if(ALLOW_KATVR_INSTALL) this.add(katvr);
this.add(Box.createRigidArea(new Dimension(5,20)));
instructions = new JLabel("",SwingConstants.CENTER);
instructions.setAlignmentX(CENTER_ALIGNMENT);
instructions.setAlignmentY(TOP_ALIGNMENT);
instructions.setForeground(Color.RED);
instructions.setPreferredSize(new Dimension(20, 40));
this.add(instructions);
this.add(Box.createVerticalGlue());
JLabel wiki = linkify("Vivecraft home page",HOMEPAGE_LINK,"Vivecraft Home");
JLabel donate = linkify("If you think Vivecraft is awesome, please consider supporting us on Patreon",DONATION_LINK,"jrbudda's Patreon");
JLabel optifine = linkify("Vivecraft includes Optifine. Consider supporting it as well.","http://optifine.net/donate.php","http://optifine.net/donate.php");
wiki.setAlignmentX(CENTER_ALIGNMENT);
wiki.setHorizontalAlignment(SwingConstants.CENTER);
donate.setAlignmentX(CENTER_ALIGNMENT);
donate.setHorizontalAlignment(SwingConstants.CENTER);
optifine.setAlignmentX(CENTER_ALIGNMENT);
optifine.setHorizontalAlignment(SwingConstants.CENTER);
this.add(Box.createRigidArea(new Dimension(5,20)));
this.add( wiki );
this.add( donate );
this.add( optifine );
updateFilePath();
updateInstructions();
}
public void run()
{
JOptionPane optionPane = new JOptionPane(this, JOptionPane.PLAIN_MESSAGE, JOptionPane.OK_CANCEL_OPTION, null, new String[]{"Install", "Cancel"});
emptyFrame = new Frame("Vivecraft Installer");
emptyFrame.setUndecorated(true);
emptyFrame.setVisible(true);
emptyFrame.setLocationRelativeTo(null);
dialog = optionPane.createDialog(emptyFrame, "Vivecraft Installer");
dialog.setResizable(true);
dialog.setSize(620,748);
dialog.setDefaultCloseOperation(JDialog.DISPOSE_ON_CLOSE);
dialog.setVisible(true);
String str = ((String)optionPane.getValue());
if (str !=null && ((String)optionPane.getValue()).equalsIgnoreCase("Install"))
{
String check = System.getenv("_JAVA_OPTIONS");
if (check != null && check.toLowerCase().contains("xmx")){
JOptionPane.showOptionDialog(
null,
"The installer has detected a java override environment variable on your system\n"+
"This will limit the maximum amount of memory available to java and may cause Minecraft to crash or run poorly.\n"+
"You should remove this variable before launching the game.\n\n"+
"Found _JAVA_OPTIONS " + check,
"Warning!",
JOptionPane.DEFAULT_OPTION,
JOptionPane.ERROR_MESSAGE, null, null, null);
}
//check for multimc
for(File f : targetDir.listFiles()){
if(f.getName().equalsIgnoreCase("multimc.exe") || (f.getName().equalsIgnoreCase("multimc") && f.isFile()) || f.getName().equalsIgnoreCase("multimc.cfg")){
ArrayList<File> ilist = new ArrayList<File>();
File insts = new File(targetDir, "instances");
for(File inst : insts.listFiles()){
if(inst.isDirectory() && !inst.getName().startsWith("_"))
ilist.add(inst);
}
JComboBox icb = new JComboBox(ilist.toArray());
File sel =(File) JOptionPane.showInputDialog(null,"Select MultiMC Instance.","MultiMC Detected", JOptionPane.PLAIN_MESSAGE, null, ilist.toArray(), null);
if(sel != null){
mmcinst = sel;
isMultiMC = true;
} else {
dialog.dispose();
emptyFrame.dispose();
}
break; // don't ask multiple times
}
}
//
int option = 0;
String msg = "Please ensure you have closed the Minecraft Launcher before proceeding.";
if(isMultiMC)
msg = "Please ensure you have closed MultiMC before proceeding.";
if(createProfile.isSelected() || isMultiMC)
option = JOptionPane.showOptionDialog(
null,
msg,
"Important!",
JOptionPane.OK_CANCEL_OPTION,
JOptionPane.WARNING_MESSAGE, null, null, null);
if (option == JOptionPane.OK_OPTION) {
monitor = new ProgressMonitor(null, "Installing Vivecraft...", "", 0, 100);
monitor.setMillisToDecideToPopup(0);
monitor.setMillisToPopup(0);
task = new InstallTask();
task.addPropertyChangeListener(this);
task.execute();
}
else{
dialog.dispose();
emptyFrame.dispose();
}
}
else{
dialog.dispose();
emptyFrame.dispose();
}
}
class InstallTask extends SwingWorker<Void, Void>{
/*
* Main task. Executed in background thread.
*/
public String finalMessage;
@Override
public Void doInBackground()
{
StringBuilder sbErrors = new StringBuilder();
String minecriftVersionName = "vivecraft-" + version + mod;
boolean checkedRedists = false;
boolean redistSuccess = true;
boolean downloadedForge = false;
boolean installedForge = false;
if (useForge.isSelected())
mod = "-forge";
monitor.setProgress(0);
try {
// Set progress dialog size (using reflection - hacky)
Field progressdialog = monitor.getClass().getDeclaredField("dialog");
if (progressdialog != null) {
progressdialog.setAccessible(true);
Dialog dlg = (Dialog) progressdialog.get(monitor);
if (dlg != null) {
dlg.setSize(550, 200);
dlg.setLocationRelativeTo(null);
}
}
}
catch (Exception e) {}
finalMessage = "Failed: Couldn't download C++ redistributables. ";
monitor.setNote("Checking for required libraries...");
monitor.setProgress(5);
boolean downloadedOptifine = false;
if(OF_FILE_NAME != ""){
finalMessage = "Failed: Couldn't download Optifine. ";
monitor.setNote("Checking Optifine... Please donate to them!");
monitor.setProgress(42);
// Attempt optifine download...
monitor.setNote("Downloading Optifine... Please donate to them!");
for (int i = 1; i <= 3; i++)
{
if (monitor.isCanceled()) return null;
if (DownloadOptiFine())
{
// Got it!
downloadedOptifine = true;
break;
}
// Failed. Sleep a bit and retry...
if (i < 3) {
try {
Thread.sleep(i * 1000);
}
catch (InterruptedException e) {
}
monitor.setNote("Downloading Optifine...retrying...");
}
}
} else {
downloadedOptifine = true;
}
if(useShadersMod.isSelected()){
finalMessage = "Failed: Couldn't download ShadersMod. ";
monitor.setNote("Checking ShadersModCore");
monitor.setProgress(42);
boolean downloadedSMC = false;
monitor.setNote("Downloading ShadersModCore");
for (int i = 1; i <= 3; i++)
{
if (downloadSMC(useForge.isSelected()))
{
// Got it!
downloadedSMC = true;
break;
}
// Failed. Sleep a bit and retry...
if (i < 3) {
monitor.setNote("Downloading ShadersModCore... waiting...");
try {
Thread.sleep(i * 1000);
}
catch (InterruptedException e) {
}
monitor.setNote("Downloading ShadersModCore...retrying...");
}
}
}
monitor.setProgress(50);
// VIVE START - install openVR
monitor.setProgress(52);
monitor.setNote("Installing OpenVR...");
finalMessage = "Failed: Couldn't extract openvr_api.dll to .minecraft folder.";
if(!InstallOpenVR())
{
monitor.close();
return null;
}
// VIVE END - install openVR
// Setup forge if necessary
if(useForge.isSelected()){
if(optCustomForgeVersion.isSelected())
FORGE_VERSION = txtCustomForgeVersion.getText();
FULL_FORGE_VERSION = MINECRAFT_VERSION + "-" + FORGE_VERSION;
forgeInstaller = new File(tempDir + "/forge-" + FULL_FORGE_VERSION + "-installer.jar");
forge_url = "https://maven.minecraftforge.net/net/minecraftforge/forge/" + FULL_FORGE_VERSION + "/forge-" + FULL_FORGE_VERSION + "-installer.jar";
if( targetDir.exists() ) {
File ForgeDir = new File( targetDir, "libraries"+File.separator+"net"+File.separator+"minecraftforge"+File.separator+"forge");
if( ForgeDir.isDirectory() ) {
forgeVersions = ForgeDir.list();
if (forgeVersions != null && forgeVersions.length > 0) {
// Check for the currently required forge
for (String forgeVersion : forgeVersions) {
if (forgeVersion.contains(FORGE_VERSION)) {
File forgeVersionDir = new File(ForgeDir, forgeVersion);
if (forgeVersionDir.isDirectory()) {
for (File forgeVersionFile : forgeVersionDir.listFiles()) {
if (forgeVersionFile.length() > 512000) { // check for some realistically sized files because Mojang's launcher does stupid nonsense
forgeVersionInstalled = true;
break;
}
}
}
break;
}
}
}
}
}
if (useForge.isSelected() && !forgeVersionInstalled && !isMultiMC) {
monitor.setProgress(55);
monitor.setNote("Downloading Forge " + FULL_FORGE_VERSION + "...");
downloadedForge = downloadFile(forge_url, forgeInstaller);
if(!downloadedForge)
JOptionPane.showMessageDialog(null, "Could not download Forge. Please exit this installer and download it manually", "Forge Installation", JOptionPane.WARNING_MESSAGE);
}
if (downloadedForge && !forgeVersionInstalled) {
monitor.setProgress(65);
monitor.setNote("Installing Forge " + FULL_FORGE_VERSION + "...");
installedForge = installForge(forgeInstaller);
}
}
monitor.setProgress(75);
monitor.setNote("Extracting correct Minecrift version...");
finalMessage = "Failed: Couldn't extract Minecrift. Try redownloading this installer.";
if(!ExtractVersion())
{
monitor.close();
return null;
}
finalMessage = "Failed to setup HRTF.";
if(useHrtf.isSelected())
{
monitor.setProgress(85);
monitor.setNote("Configuring HRTF audio...");
if(!EnableHRTF())
{
sbErrors.append("Failed to set up HRTF! Vivecraft will still work but audio won't be binaural.\n");
}
}
if(PROMPT_REMOVE_HRTF)
DeleteLegacyHRTF();
boolean profileCreated = false;
finalMessage = "Failed: Couldn't setup profile!";
String profileName = getMinecraftProfileName(useForge.isSelected(), useShadersMod.isSelected());
if(chkCustomProfileName.isSelected() && txtCustomProfileName.getText().trim() != ""){
profileName = txtCustomProfileName.getText();
}
if(!isMultiMC){
if (createProfile.isSelected())
{
monitor.setProgress(95);
monitor.setNote("Creating Vivecraft profile...");
if (!updateLauncherJson(targetDir, minecriftVersionName, profileName))
sbErrors.append("Failed to set up 'Vivecraft' profile (you can still manually select Edit Profile->Use Version " + minecriftVersionName + " in the Minecraft launcher)\n");
else
profileCreated = true;
}
} else {
if (!updateMMCInst(mmcinst, minecriftVersionName))
sbErrors.append("Failed to set up 'Vivecraft' into instance.");
else
profileCreated = true;
}
if (!downloadedOptifine) {
finalMessage = "Installed (but failed to download OptiFine). Restart Minecraft" +
(profileCreated == false ? " and Edit Profile->Use Version " + minecriftVersionName : " and select the '" + getMinecraftProfileName(useForge.isSelected(), useShadersMod.isSelected()) + "' profile.") +
"\nPlease download OptiFine " + OF_FILE_NAME + " from https://optifine.net/downloads before attempting to play." +
"\nDo not run and install it, instead rename the file to OptiFine-" + OF_FILE_NAME + " (note the hyphen) and manually place it into the following directory:" +
"\n" + (isMultiMC ? new File(mmcinst, "libraries").getAbsolutePath() : new File(targetDir, OF_LIB_PATH + OF_FILE_NAME).getAbsolutePath());
}
else {
if(isMultiMC && mmcinst != null)
if (profileCreated) finalMessage = "Installed successfully!. MultiMC Instance: " + mmcinst.toString();
else finalMessage = "Installed but failed to update instance, launch may fail. See vivecraft.org for manual configuration.";
else
finalMessage = "Installed successfully! Restart Minecraft" +
(profileCreated == false ? " and Edit Profile->Use Version " + minecriftVersionName : " and select the '" + profileName + "' profile.");
}
monitor.setProgress(100);
monitor.close();
return null;
}
/*
* Executed in event dispatching thread
*/
@Override
public void done() {
setCursor(null); // turn off the wait cursor
JOptionPane.showMessageDialog(null, finalMessage, "Complete", JOptionPane.INFORMATION_MESSAGE);
dialog.dispose();
emptyFrame.dispose();
}
private boolean DownloadOptiFine()
{
boolean success = true;
boolean deleted = false;
try {
File fod = new File(targetDir,OF_LIB_PATH+OF_FILE_NAME+"_LIB");
if(isMultiMC)
fod = new File(mmcinst,"libraries");
fod.mkdirs();
File fo = new File(fod,"OptiFine-"+OF_FILE_NAME+"_LIB.jar");
// Attempt to get the Optifine MD5
String optOnDiskMd5 = GetMd5(fo);
System.out.println(optOnDiskMd5 == null ? fo.getCanonicalPath() : fo.getCanonicalPath() + " MD5: " + optOnDiskMd5);
// Test MD5
if (optOnDiskMd5 == null)
{
// Just continue...
monitor.setNote("Optifine not found - downloading");
}
else if (!optOnDiskMd5.equalsIgnoreCase(OF_MD5)) {
// Bad copy. Attempt delete just to make sure.
monitor.setNote("Optifine MD5 bad - downloading");
try {
deleted = fo.delete();
}
catch (Exception ex1) {
JOptionPane.showMessageDialog(null, "Could not delete existing Optifine jar " +ex1.getLocalizedMessage(), "Optifine Installation", JOptionPane.WARNING_MESSAGE);
ex1.printStackTrace();
}
}
else {
// A good copy!
monitor.setNote("Optifine MD5 good! " + OF_MD5);
return true;
}
// Need to attempt download...
success = downloadFile("http://vivecraft.org/jar/Optifine/OptiFine_" + OF_FILE_NAME + "_LIB" + OF_VERSION_EXT, fo);
// Check (potentially) downloaded optifine md5
optOnDiskMd5 = GetMd5(fo);
if (success == false || optOnDiskMd5 == null || !optOnDiskMd5.equalsIgnoreCase(OF_MD5)) {
// No good
if (optOnDiskMd5 != null)
monitor.setNote("Optifine - bad MD5. Got " + optOnDiskMd5 + ", expected " + OF_MD5);
try {
deleted = fo.delete();
}
catch (Exception ex1) {
JOptionPane.showMessageDialog(null, "Could not delete existing Optifine jar " +ex1.getLocalizedMessage(), "Download File", JOptionPane.WARNING_MESSAGE);
ex1.printStackTrace();
}
return false;
}
return true;
} catch (Exception e) {
finalMessage += " Error: "+e.getLocalizedMessage();
}
return false;
}
private boolean downloadSMC(boolean forge)
{
String dir = null;
String file = null;
String url = null;
String goodmd5 = null;
String temp = "temp.jar";
if (forge) {
dir = smcForgelib;
file = smcForgeFile;
url = smcForgeURL;
goodmd5 = smcForgeMD5;
} else {
dir = smcVanillaLib;
file = smcVanillaFile;
url = smcVanillaURL;
goodmd5 = smcVanillaMD5;
}
boolean success = true;
boolean deleted = false;
try {
File fod = new File(targetDir,dir);
fod.mkdirs();
File fo = new File(fod,file);
// Attempt to get the Optifine MD5
String md5 = GetMd5(fo);
System.out.println(md5 == null ? fo.getCanonicalPath() : fo.getCanonicalPath() + " MD5: " + md5);
// Test MD5
if (md5 == null)
{
// Just continue...
System.out.println("ShadersMod not found - downloading");
}
else if (!md5.equalsIgnoreCase(goodmd5)) {
// Bad copy. Attempt delete just to make sure.
System.out.println("ShadersMod MD5 bad - downloading");
try {
deleted = fo.delete();
}
catch (Exception ex1) {
ex1.printStackTrace();
}
}
else {
// A good copy!
System.out.println("ShadersMod MD5 good! " + md5);
return true;
}
// Need to attempt download...
if(forge) {
success = downloadFile(url, fo);
}else {
File t = new File(fod,temp);
if( downloadFile(url, t)){
ZipInputStream temp_jar = new ZipInputStream(new FileInputStream(t));
ZipEntry ze = null;
byte data[] = new byte[1024];
while ((ze = temp_jar.getNextEntry()) != null) {
if(ze.getName().equals(file)) //extract the core jar.
{
FileOutputStream output = new FileOutputStream(fo);
try
{
byte[] buffer = new byte[2048];
int len = 0;
while ((len = temp_jar.read(buffer)) > 0)
{
output.write(buffer, 0, len);
}
}
finally
{
if(output!=null) output.close();
}
}
}
temp_jar.close();
t.delete();
return true;
} else {
return false;
}
}
//Check (potentially) downloaded shadersmodcore md5
md5 = GetMd5(fo);
if (success == false || md5 == null || !md5.equalsIgnoreCase(goodmd5)) {
// No good
if (md5 != null)
System.out.println("ShadersMod - bad MD5. Got " + md5 + ", expected " + goodmd5);
try {
deleted = fo.delete();
}
catch (Exception ex1) {
ex1.printStackTrace();
}
return false;
}
return true;
} catch (Exception e) {
finalMessage += " Error: "+e.getLocalizedMessage();
}
return false;
}
private boolean downloadFile(String surl, File fo)
{
return downloadFile(surl, fo, null);
}
private boolean downloadFile(String surl, File fo, String md5)
{
boolean success = true;
FileOutputStream fos = null;
try {
fos = new FileOutputStream(fo);
System.out.println(surl);
URL url = new URL(surl);
ReadableByteChannel rbc = Channels.newChannel(url.openStream());
long bytes = fos.getChannel().transferFrom(rbc, 0, Long.MAX_VALUE);
fos.flush();
}
catch(Exception ex) {
JOptionPane.showMessageDialog(null, "Could not download from " + surl + " to " + fo.getName() + " \r\n " + ex.getLocalizedMessage(), "Error downloading", JOptionPane.ERROR_MESSAGE);
ex.printStackTrace();
success = false;
}
finally {
if (fos != null) {
try {
fos.close();
} catch (Exception e) { }
}
}
if (success) {
if (!checkMD5(fo, md5)){
JOptionPane.showMessageDialog(null, "Bad md5 for " + fo.getName() + "!" + " actual: " + GetMd5(fo).toLowerCase(),"Error downloading", JOptionPane.ERROR_MESSAGE);
fo.delete();
success = false;
}
} else {
JOptionPane.showMessageDialog(null, "Could not install " + surl, "Download File", JOptionPane.INFORMATION_MESSAGE);
}
return success;
}
private boolean checkMD5(File a, String b){
if (a.exists() == false) return false;
if(b == null) return true;
return GetMd5(a).equalsIgnoreCase(b);
}
private String GetMd5(File fo)
{
if (!fo.exists())
return null;
if (fo.length() < 1)
return null;
FileInputStream fis = null;
try {
MessageDigest md = MessageDigest.getInstance("MD5");
fis = new FileInputStream(fo);
byte[] buffer = new byte[(int)fo.length()];
int numOfBytesRead = 0;
while( (numOfBytesRead = fis.read(buffer)) > 0)
{
md.update(buffer, 0, numOfBytesRead);
}
byte[] hash = md.digest();
StringBuilder sb = new StringBuilder();
for (byte b : hash) {
sb.append(String.format("%02X", b));
}
return sb.toString();
}
catch (Exception ex)
{
return null;
}
finally {
if (fis != null)
{
try {
fis.close();
} catch (IOException e) {
e.printStackTrace();
}
}
}
}
// Shamelessly ripped from Forge ClientInstall
private boolean installForge(File target)
{
try {
JOptionPane.showMessageDialog(null, "The Forge installer will launch. In it, please ensure \"Install client\" is selected and the correct directory is specified (default unless you changed it).", "Forge Installation", JOptionPane.INFORMATION_MESSAGE);
final Process proc = new ProcessBuilder(isWindows ? "javaw" : "java", "-jar", target.getAbsolutePath()).start();
new Thread("Forge Installer Stdout") { // needed otherwise subprocess blocks
@Override
public void run() {
try {
BufferedReader br = new BufferedReader(new InputStreamReader(proc.getInputStream()));
String line;
while ((line = br.readLine()) != null) {
System.out.println(line);
}
} catch (Exception e) {
e.printStackTrace();
}
}
}.start();
new Thread("Forge Installer Stderr") { // same
@Override
public void run() {
try {
BufferedReader br = new BufferedReader(new InputStreamReader(proc.getErrorStream()));
String line;
while ((line = br.readLine()) != null) {
System.err.println(line);
}
} catch (Exception e) {
e.printStackTrace();
}
}
}.start();
proc.waitFor();
} catch (Exception ex) {
ex.printStackTrace();
JOptionPane.showMessageDialog(null, "Error occurred launching Forge installer: " + ex.getClass().getName() + ": " + ex.getMessage() + "\nYou will need to install Forge " + FULL_FORGE_VERSION + " manually.", "Error", JOptionPane.ERROR_MESSAGE);
return false;
}
return true;
}
private boolean ExtractVersion() {
if( jar_id != null )
{
InputStream version_json;
if(isMultiMC) {
String filename = "version-multimc.json";
if (useForge.isSelected())
filename = "version-multimc-forge.json";
version_json = Installer.class.getResourceAsStream(filename);
}
else if(useForge.isSelected() /*&& forgeVersion.getSelectedItem() != forgeNotFound*/ )
{
String filename;
if(!useShadersMod.isSelected()){
filename = "version-forge.json";
mod="-forge";
}
else{
filename = "version-forge-shadersmod.json";
mod="-forge-shadersmod";
}
version_json = new FilterInputStream( Installer.class.getResourceAsStream(filename) ) {
public int read(byte[] buff) throws IOException {
int ret = in.read(buff);
if( ret > 0 ) {
String s = new String( buff,0, ret, "UTF-8");
if(optCustomForgeVersion.isSelected())
s = s.replace(ORIG_FORGE_VERSION, FORGE_VERSION);
ret = s.length();
System.arraycopy(s.getBytes("UTF-8"), 0, buff, 0, ret);
}
return ret;
}
};
} else {
String filename;
if( useShadersMod.isSelected() ) {
filename = "version-shadersmod.json";
mod="-shadersmod";
} else {
filename = "version.json";
}
version_json = Installer.class.getResourceAsStream(filename);
}
jar_id += mod;
InputStream version_jar = Installer.class.getResourceAsStream("version.jar");
if( version_jar != null && version_json != null )
try {
File ver_dir = null;
if(isMultiMC){
ver_dir = new File(mmcinst,"patches");
jar_id = "vivecraft";
}
else
ver_dir = new File(new File(targetDir,"versions"),jar_id);
ver_dir.mkdirs();
File ver_json_file = new File (ver_dir, jar_id+".json");
FileOutputStream ver_json = new FileOutputStream(ver_json_file);
int d;
byte data[] = new byte[40960];
// Extract json
while ((d = version_json.read(data)) != -1) {
ver_json.write(data,0,d);
}
ver_json.close();
//modify json args if needed
try {
int jsonIndentSpaces = 2;
File fileJson = ver_json_file;
String json = readAsciiFile(fileJson);
json = json.replace("$FILE",jar_id);
JSONObject root = new JSONObject(json);
String args = (String)root.opt("minecraftArguments");
if(args!=null) {
if(katvr.isSelected()) args += " --katvr";
if(kiosk.isSelected()) args += " --kiosk";
root.put("minecraftArguments", args);
}
if(isMultiMC)
root.remove("id");
/*if(isMultiMC && useForge.isSelected()) {
JSONArray tw = (JSONArray) root.get("+tweakers");
tw = new JSONArray();
tw.put("org.vivecraft.tweaker.MinecriftForgeTweaker");
tw.put("net.minecraftforge.fml.common.launcher.FMLTweaker");
tw.put("optifine.OptiFineForgeTweaker");
root.put("+tweakers", tw);
}*/
FileWriter fwJson = new FileWriter(fileJson);
fwJson.write(root.toString(jsonIndentSpaces));
fwJson.flush();
fwJson.close();
}
catch (Exception e) {
finalMessage += " Error: " + e.getMessage();
}
// Extract new lib
File lib_dir = new File(targetDir,"libraries/com/mtbs3d/minecrift/"+version);
if(isMultiMC)
lib_dir = new File(mmcinst,"libraries");
lib_dir.mkdirs();
File ver_file = new File (lib_dir, "minecrift-"+version+".jar");
FileOutputStream ver_jar = new FileOutputStream(ver_file);
while ((d = version_jar.read(data)) != -1) {
ver_jar.write(data,0,d);
}
ver_jar.close();
return ver_json_file.exists() && ver_file.exists();
} catch (Exception e) {
finalMessage += " Error: " + e.getMessage();
}
}
return false;
}
private boolean DeleteLegacyHRTF() {
// Find the correct location
File alsoftrc;
//I honestly have no clue where Mac stores this, so I'm assuming the same as Linux.
if (isWindows && appDataDir != null)
{
alsoftrc = new File(appDataDir, "alsoft.ini");
}
else
{
alsoftrc = new File(userHomeDir, ".alsoftrc");
}
try
{
//check if exists and prompt
if(alsoftrc.exists()) {
int ret = JOptionPane.showConfirmDialog(null,
"Binaural Audio .ini file found. Vivecraft now handles this setting in-game.\r\nWould you like to delete this file?\r\n\r\nChoose 'No' only if you play older versions of Vivecraft or have some other need for a system-wide alsoft.ini",
"Remove legacy file",
JOptionPane.YES_NO_OPTION,
JOptionPane.QUESTION_MESSAGE);
if(ret == JOptionPane.YES_OPTION) {
alsoftrc.delete();
}
}
}
catch (Exception e)
{
finalMessage += " Error: "+e.getLocalizedMessage();
}
return false;
}
private boolean EnableHRTF() // Implementation by Zach Jaggi
{
// Find the correct location to stick alsoftrc
File alsoftrc;
//I honestly have no clue where Mac stores this, so I'm assuming the same as Linux.
if (isWindows && appDataDir != null)
{
alsoftrc = new File(appDataDir, "alsoft.ini");
}
else
{
alsoftrc = new File(userHomeDir, ".alsoftrc");
}
try
{
//Overwrite the current file.
alsoftrc.createNewFile();
PrintWriter writer = new PrintWriter(alsoftrc);
writer.write("hrtf = true\n");
writer.write("frequency = 44100\n");
writer.close();
return true;
}
catch (Exception e)
{
finalMessage += " Error: "+e.getLocalizedMessage();
}
return false;
}
// VIVE START - install openVR dlls
private boolean InstallOpenVR() {
//nope.
return true;
}
private boolean installFile(String osFolder, String resource){
File win32_dir = new File (targetDir, osFolder);
win32_dir.mkdirs();
InputStream openvrdll = Installer.class.getResourceAsStream(resource);
File dll_out = new File (targetDir, resource);
if (!copyInputStreamToFile(openvrdll, dll_out)){
return false;
}
return true;
}
// VIVE END - install openVR dll
private void sleep(int millis)
{
try {
Thread.sleep(millis);
} catch (InterruptedException e) {}
}
private boolean updateLauncherJson(File mcBaseDirFile, String minecriftVer, String profileName)
{
boolean result = false;
try {
int jsonIndentSpaces = 2;
File fileJson = new File(mcBaseDirFile, "launcher_profiles.json");
String json = readAsciiFile(fileJson);
JSONObject root = new JSONObject(json);
//System.out.println(root.toString(jsonIndentSpaces));
JSONObject profiles = (JSONObject)root.get("profiles");
JSONObject prof = null;
try {
prof = (JSONObject) profiles.get(profileName);
}
catch (Exception e) {
//this is normal if doesnt exist.
}
java.text.DateFormat dateFormat=new java.text.SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ss.SSSZ");
if (prof == null) {
prof = new JSONObject();
prof.put("created", dateFormat.format(new java.util.Date()));
profiles.put(profileName, prof);
}
prof.put("lastVersionId", minecriftVer + mod);
int minAlloc = ramAllocation.getSelectedItem() == Integer.valueOf(1) ? 1 : 2;
prof.put("javaArgs", "-Xmx" + ramAllocation.getSelectedItem() + "G -Xms" + minAlloc + "G -XX:+UseParallelGC -XX:ParallelGCThreads=3 -XX:MaxGCPauseMillis=3 -Xmn256M -Dfml.ignoreInvalidMinecraftCertificates=true -Dfml.ignorePatchDiscrepancies=true");
prof.put("name", profileName);
prof.put("icon", "Creeper_Head");
prof.put("type", "custom");
prof.put("lastUsed", dateFormat.format(new java.util.Date()));
if(chkCustomGameDir.isSelected() && txtCustomGameDir.getText().trim() != ""){
String dir = txtCustomGameDir.getText();
if (dir.endsWith("\\mods")) dir = dir.substring(0, dir.length()-5);
if (dir.endsWith("\\mods\\")) dir = dir.substring(0, dir.length()-6);
prof.put("gameDir", txtCustomGameDir.getText());
} else {
prof.remove("gameDir");
}
FileWriter fwJson = new FileWriter(fileJson);
fwJson.write(root.toString(jsonIndentSpaces));
fwJson.flush();
fwJson.close();
result = true;
}
catch (Exception e) {
e.printStackTrace();
}
return result;
}
private boolean updateMMCInst(File mcBaseDirFile, String minecriftVer)
{
boolean result = false;
try {
File cfg = new File(mcBaseDirFile, "instance.cfg");
if(!cfg.exists()) return result;
BufferedReader r = new BufferedReader(new FileReader(cfg));
java.util.List<String> lines = new ArrayList<String>();
String l;
while((l = r.readLine()) != null){
if(l.startsWith("JvmArgs"))
continue;
if(l.startsWith("MaxMemAlloc"))
continue;
if(l.startsWith("MinMemAlloc"))
continue;
if(l.startsWith("OverrideJavaArgs"))
continue;
if(l.startsWith("OverrideMemory"))
continue;
lines.add(l);
}
lines.add("MinMemAlloc=" + ((Integer)ramAllocation.getSelectedItem())*1024);
lines.add("MaxMemAlloc=" + ((Integer)ramAllocation.getSelectedItem())*1024);
lines.add("OverrideJavaArgs=true");
lines.add("OverrideMemory=true");
lines.add("JvmArgs=-XX:+UseParallelGC -XX:ParallelGCThreads=3 -XX:MaxGCPauseMillis=3 -Xmn256M -Dfml.ignoreInvalidMinecraftCertificates=true -Dfml.ignorePatchDiscrepancies=true");
r.close();
String[] arr = lines.toArray(new String[lines.size()]);
Arrays.sort(arr);
BufferedWriter w = new BufferedWriter(new FileWriter(cfg,false));
for (String string : arr) {
w.write(string);
w.newLine();
}
w.close();
File mmcpack = new File(mcBaseDirFile, "mmc-pack.json");
if(!mmcpack.exists()) return result;
String json = readAsciiFile(mmcpack);
JSONObject root = new JSONObject(json);
JSONArray components = (JSONArray)root.get("components");
JSONObject v = new JSONObject();
v.put("cachedName", "Vivecraft");
v.put("uid", "vivecraft");
components.put(v);
FileWriter fwJson = new FileWriter(mmcpack);
fwJson.write(root.toString(2));
fwJson.flush();
fwJson.close();
result = true;
}
catch (Exception e) {
JOptionPane.showMessageDialog(null,
e.toString(),"",JOptionPane.WARNING_MESSAGE);
}
return result;
}
}// End InstallTask
@Override
public void propertyChange(PropertyChangeEvent evt) {
if ("progress" == evt.getPropertyName()) {
int progress = (Integer) evt.getNewValue();
System.out.println(progress);
}
}
private static void createAndShowGUI() {
String userHomeDir = System.getProperty("user.home", ".");
String osType = System.getProperty("os.name").toLowerCase();
String mcDir = ".minecraft";
File minecraftDir;
if (osType.contains("win") && System.getenv("APPDATA") != null)
{
minecraftDir = new File(System.getenv("APPDATA"), mcDir);
}
else if (osType.contains("mac"))
{
minecraftDir = new File(new File(new File(userHomeDir, "Library"),"Application Support"),"minecraft");
}
else
{
minecraftDir = new File(userHomeDir, mcDir);
releaseNotePathAddition = "/";
}
Installer panel = new Installer(minecraftDir);
panel.run();
}
private class FileSelectAction extends AbstractAction
{
private static final long serialVersionUID = 743815386102831493L;
@Override
public void actionPerformed(ActionEvent e)
{
JFileChooser dirChooser = new JFileChooser();
dirChooser.setFileSelectionMode(JFileChooser.DIRECTORIES_ONLY);
dirChooser.setFileHidingEnabled(false);
dirChooser.ensureFileIsVisible(targetDir);
dirChooser.setSelectedFile(targetDir);
int response = dirChooser.showOpenDialog(Installer.this);
switch (response)
{
case JFileChooser.APPROVE_OPTION:
targetDir = dirChooser.getSelectedFile();
updateFilePath();
break;
default:
break;
}
}
}
private class GameDirSelectAction extends AbstractAction
{
@Override
public void actionPerformed(ActionEvent e)
{
JFileChooser dirChooser = new JFileChooser();
dirChooser.setFileSelectionMode(JFileChooser.DIRECTORIES_ONLY);
dirChooser.setFileHidingEnabled(false);
dirChooser.ensureFileIsVisible(targetDir);
dirChooser.setSelectedFile(targetDir);
int response = dirChooser.showOpenDialog(Installer.this);
switch (response)
{
case JFileChooser.APPROVE_OPTION:
txtCustomGameDir.setText(dirChooser.getSelectedFile().toString());
break;
default:
break;
}
}
}
private class updateTxtEnabled extends AbstractAction
{
@Override
public void actionPerformed(ActionEvent e)
{
txtCustomProfileName.setEditable(chkCustomProfileName.isSelected());
}
}
private class updateActionF extends AbstractAction
{
@Override
public void actionPerformed(ActionEvent e)
{
updateInstructions();
if (useForge.isSelected()) ramAllocation.setSelectedIndex(2);
else ramAllocation.setSelectedIndex(1);
updateInstructions();
}
}
private class updateActionSM extends AbstractAction
{
@Override
public void actionPerformed(ActionEvent e)
{
updateInstructions();
}
}
private class updateActionP extends AbstractAction
{
@Override
public void actionPerformed(ActionEvent e)
{
updateInstructions();
}
}
private class updateActionRam extends AbstractAction
{
@Override
public void actionPerformed(ActionEvent e)
{
updateInstructions();
}
}
private void updateInstructions(){
String out = "<html>";
if(createProfile.isSelected()){
out += "Please make sure the Minecraft Launcher is not running.";
if(chkCustomProfileName.isSelected() == false){
txtCustomProfileName.setText(getMinecraftProfileName(useForge.isSelected(), useShadersMod.isSelected()));
}
if (ramAllocation.getSelectedIndex() == 0) {
out += "<br>Vivecraft may not run well with only 1 GB of memory!";
}
}
if (useForge.isSelected()){
if(optCustomForgeVersion.isSelected())
out += "<br>Custom Forge version NOT guaranteed to work!";
}
out+="</html>";
instructions.setText(out);
ramAllocation.setEnabled(createProfile.isSelected());
txtCustomForgeVersion.setEnabled(optCustomForgeVersion.isSelected());
txtCustomForgeVersion.setVisible(useForge.isSelected());
optCustomForgeVersion.setVisible(useForge.isSelected());
this.revalidate();
}
private void updateFilePath()
{
try
{
targetDir = targetDir.getCanonicalFile();
selectedDirText.setText(targetDir.getPath());
selectedDirText.setForeground(Color.BLACK);
infoLabel.setVisible(false);
fileEntryPanel.setBorder(null);
if (dialog!=null)
{
dialog.invalidate();
dialog.pack();
}
}
catch (IOException e)
{
selectedDirText.setForeground(Color.RED);
fileEntryPanel.setBorder(new LineBorder(Color.RED));
infoLabel.setText("<html>"+"Error!"+"</html>");
infoLabel.setVisible(true);
if (dialog!=null)
{
dialog.invalidate();
dialog.pack();
}
}
if( forgeVersions == null || forgeVersions.length == 0 )
forgeVersions = new String[] { };
forgeVersion.setModel( new DefaultComboBoxModel(forgeVersions));
}
public static void main(String[] args)
{
// I'm gonna shit a JVM
System.setProperty("java.net.preferIPv4Stack" , "true");
try {
// Set System L&F
UIManager.setLookAndFeel(
UIManager.getSystemLookAndFeelClassName());
} catch (Exception e) { }
try {
javax.swing.SwingUtilities.invokeLater(new Runnable() {
public void run() {
createAndShowGUI();
}
});
} catch (Exception e) { e.printStackTrace(); }
}
public static JLabel linkify(final String text, String URL, String toolTip)
{
URI temp = null;
try
{
temp = new URI(URL);
}
catch (Exception e)
{
e.printStackTrace();
}
final URI uri = temp;
final JLabel link = new JLabel();
link.setText("<HTML><FONT color=\"#000099\">"+text+"</FONT></HTML>");
if(!toolTip.equals(""))
link.setToolTipText(toolTip);
link.setCursor(new Cursor(Cursor.HAND_CURSOR));
link.addMouseListener(new MouseListener() {
public void mouseExited(MouseEvent arg0) {
link.setText("<HTML><FONT color=\"#000099\">"+text+"</FONT></HTML>");
}
public void mouseEntered(MouseEvent arg0) {
link.setText("<HTML><FONT color=\"#000099\"><U>"+text+"</U></FONT></HTML>");
}
public void mouseClicked(MouseEvent arg0) {
if (Desktop.isDesktopSupported()) {
try {
Desktop.getDesktop().browse(uri);
} catch (Exception e) {
e.printStackTrace();
}
} else {
JOptionPane pane = new JOptionPane("Could not open link.");
JDialog dialog = pane.createDialog(new JFrame(), "");
dialog.setVisible(true);
}
}
public void mousePressed(MouseEvent e) {
}
public void mouseReleased(MouseEvent e) {
}
});
return link;
}
private String getMinecraftProfileName(boolean usingForge, boolean sm)
{
if(!usingForge) return DEFAULT_PROFILE_NAME;
else return DEFAULT_PROFILE_NAME_FORGE;
}
public static String readAsciiFile(File file)
throws IOException
{
FileInputStream fin = new FileInputStream(file);
InputStreamReader inr = new InputStreamReader(fin, "ASCII");
BufferedReader br = new BufferedReader(inr);
StringBuffer sb = new StringBuffer();
for (;;) {
String line = br.readLine();
if (line == null)
break;
sb.append(line);
sb.append("\n");
}
br.close();
inr.close();
fin.close();
return sb.toString();
}
private boolean copyInputStreamToFile( InputStream in, File file )
{
if (in == null || file == null)
return false;
boolean success = true;
try {
OutputStream out = new FileOutputStream(file);
byte[] buf = new byte[1024];
int len;
while((len=in.read(buf))>0){
out.write(buf,0,len);
}
out.close();
in.close();
} catch (Exception e) {
e.printStackTrace();
success = false;
}
return success;
}
}
|
[
"\"APPDATA\"",
"\"_JAVA_OPTIONS\"",
"\"APPDATA\"",
"\"APPDATA\""
] |
[] |
[
"APPDATA",
"_JAVA_OPTIONS"
] |
[]
|
["APPDATA", "_JAVA_OPTIONS"]
|
java
| 2 | 0 | |
download_from_google_storage.py
|
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Download files from Google Storage based on SHA1 sums."""
import hashlib
import optparse
import os
import Queue
import re
import shutil
import stat
import sys
import tarfile
import threading
import time
import subprocess2
GSUTIL_DEFAULT_PATH = os.path.join(
os.path.dirname(os.path.abspath(__file__)), 'gsutil.py')
# Maps sys.platform to what we actually want to call them.
PLATFORM_MAPPING = {
'cygwin': 'win',
'darwin': 'mac',
'linux2': 'linux',
'win32': 'win',
}
class FileNotFoundError(IOError):
pass
class InvalidFileError(IOError):
pass
class InvalidPlatformError(Exception):
pass
def GetNormalizedPlatform():
"""Returns the result of sys.platform accounting for cygwin.
Under cygwin, this will always return "win32" like the native Python."""
if sys.platform == 'cygwin':
return 'win32'
return sys.platform
# Common utilities
class Gsutil(object):
"""Call gsutil with some predefined settings. This is a convenience object,
and is also immutable."""
MAX_TRIES = 5
RETRY_BASE_DELAY = 5.0
RETRY_DELAY_MULTIPLE = 1.3
def __init__(self, path, boto_path=None, timeout=None, version='4.26'):
if not os.path.exists(path):
raise FileNotFoundError('GSUtil not found in %s' % path)
self.path = path
self.timeout = timeout
self.boto_path = boto_path
self.version = version
def get_sub_env(self):
env = os.environ.copy()
if self.boto_path == os.devnull:
env['AWS_CREDENTIAL_FILE'] = ''
env['BOTO_CONFIG'] = ''
elif self.boto_path:
env['AWS_CREDENTIAL_FILE'] = self.boto_path
env['BOTO_CONFIG'] = self.boto_path
return env
def call(self, *args):
cmd = [sys.executable, self.path, '--force-version', self.version]
cmd.extend(args)
return subprocess2.call(cmd, env=self.get_sub_env(), timeout=self.timeout)
def check_call(self, *args):
cmd = [sys.executable, self.path, '--force-version', self.version]
cmd.extend(args)
((out, err), code) = subprocess2.communicate(
cmd,
stdout=subprocess2.PIPE,
stderr=subprocess2.PIPE,
env=self.get_sub_env(),
timeout=self.timeout)
# Parse output.
status_code_match = re.search('status=([0-9]+)', err)
if status_code_match:
return (int(status_code_match.group(1)), out, err)
if ('You are attempting to access protected data with '
'no configured credentials.' in err):
return (403, out, err)
if 'matched no objects' in err:
return (404, out, err)
return (code, out, err)
def check_call_with_retries(self, *args):
delay = self.RETRY_BASE_DELAY
for i in xrange(self.MAX_TRIES):
code, out, err = self.check_call(*args)
if not code or i == self.MAX_TRIES - 1:
break
time.sleep(delay)
delay *= self.RETRY_DELAY_MULTIPLE
return code, out, err
def check_platform(target):
"""Checks if any parent directory of target matches (win|mac|linux)."""
assert os.path.isabs(target)
root, target_name = os.path.split(target)
if not target_name:
return None
if target_name in ('linux', 'mac', 'win'):
return target_name
return check_platform(root)
def get_sha1(filename):
sha1 = hashlib.sha1()
with open(filename, 'rb') as f:
while True:
# Read in 1mb chunks, so it doesn't all have to be loaded into memory.
chunk = f.read(1024*1024)
if not chunk:
break
sha1.update(chunk)
return sha1.hexdigest()
# Download-specific code starts here
def enumerate_work_queue(input_filename, work_queue, directory,
recursive, ignore_errors, output, sha1_file,
auto_platform):
if sha1_file:
if not os.path.exists(input_filename):
if not ignore_errors:
raise FileNotFoundError('%s not found.' % input_filename)
print >> sys.stderr, '%s not found.' % input_filename
with open(input_filename, 'rb') as f:
sha1_match = re.match('^([A-Za-z0-9]{40})$', f.read(1024).rstrip())
if sha1_match:
work_queue.put((sha1_match.groups(1)[0], output))
return 1
if not ignore_errors:
raise InvalidFileError('No sha1 sum found in %s.' % input_filename)
print >> sys.stderr, 'No sha1 sum found in %s.' % input_filename
return 0
if not directory:
work_queue.put((input_filename, output))
return 1
work_queue_size = 0
for root, dirs, files in os.walk(input_filename):
if not recursive:
for item in dirs[:]:
dirs.remove(item)
else:
for exclude in ['.svn', '.git']:
if exclude in dirs:
dirs.remove(exclude)
for filename in files:
full_path = os.path.join(root, filename)
if full_path.endswith('.sha1'):
if auto_platform:
# Skip if the platform does not match.
target_platform = check_platform(os.path.abspath(full_path))
if not target_platform:
err = ('--auto_platform passed in but no platform name found in '
'the path of %s' % full_path)
if not ignore_errors:
raise InvalidFileError(err)
print >> sys.stderr, err
continue
current_platform = PLATFORM_MAPPING[sys.platform]
if current_platform != target_platform:
continue
with open(full_path, 'rb') as f:
sha1_match = re.match('^([A-Za-z0-9]{40})$', f.read(1024).rstrip())
if sha1_match:
work_queue.put(
(sha1_match.groups(1)[0], full_path.replace('.sha1', '')))
work_queue_size += 1
else:
if not ignore_errors:
raise InvalidFileError('No sha1 sum found in %s.' % filename)
print >> sys.stderr, 'No sha1 sum found in %s.' % filename
return work_queue_size
def _validate_tar_file(tar, prefix):
def _validate(tarinfo):
"""Returns false if the tarinfo is something we explicitly forbid."""
if tarinfo.issym() or tarinfo.islnk():
return False
if '..' in tarinfo.name or not tarinfo.name.startswith(prefix):
return False
return True
return all(map(_validate, tar.getmembers()))
def _downloader_worker_thread(thread_num, q, force, base_url,
gsutil, out_q, ret_codes, verbose, extract,
delete=True):
while True:
input_sha1_sum, output_filename = q.get()
if input_sha1_sum is None:
return
extract_dir = None
if extract:
if not output_filename.endswith('.tar.gz'):
out_q.put('%d> Error: %s is not a tar.gz archive.' % (
thread_num, output_filename))
ret_codes.put((1, '%s is not a tar.gz archive.' % (output_filename)))
continue
extract_dir = output_filename[0:len(output_filename)-7]
if os.path.exists(output_filename) and not force:
if not extract or os.path.exists(extract_dir):
if get_sha1(output_filename) == input_sha1_sum:
if verbose:
out_q.put(
'%d> File %s exists and SHA1 matches. Skipping.' % (
thread_num, output_filename))
continue
# Check if file exists.
file_url = '%s/%s' % (base_url, input_sha1_sum)
(code, _, err) = gsutil.check_call('ls', file_url)
if code != 0:
if code == 404:
out_q.put('%d> File %s for %s does not exist, skipping.' % (
thread_num, file_url, output_filename))
ret_codes.put((1, 'File %s for %s does not exist.' % (
file_url, output_filename)))
else:
# Other error, probably auth related (bad ~/.boto, etc).
out_q.put('%d> Failed to fetch file %s for %s, skipping. [Err: %s]' % (
thread_num, file_url, output_filename, err))
ret_codes.put((1, 'Failed to fetch file %s for %s. [Err: %s]' % (
file_url, output_filename, err)))
continue
# Fetch the file.
out_q.put('%d> Downloading %s...' % (thread_num, output_filename))
try:
if delete:
os.remove(output_filename) # Delete the file if it exists already.
except OSError:
if os.path.exists(output_filename):
out_q.put('%d> Warning: deleting %s failed.' % (
thread_num, output_filename))
code, _, err = gsutil.check_call('cp', file_url, output_filename)
if code != 0:
out_q.put('%d> %s' % (thread_num, err))
ret_codes.put((code, err))
continue
remote_sha1 = get_sha1(output_filename)
if remote_sha1 != input_sha1_sum:
msg = ('%d> ERROR remote sha1 (%s) does not match expected sha1 (%s).' %
(thread_num, remote_sha1, input_sha1_sum))
out_q.put(msg)
ret_codes.put((20, msg))
continue
if extract:
if not tarfile.is_tarfile(output_filename):
out_q.put('%d> Error: %s is not a tar.gz archive.' % (
thread_num, output_filename))
ret_codes.put((1, '%s is not a tar.gz archive.' % (output_filename)))
continue
with tarfile.open(output_filename, 'r:gz') as tar:
dirname = os.path.dirname(os.path.abspath(output_filename))
if not _validate_tar_file(tar, os.path.basename(extract_dir)):
out_q.put('%d> Error: %s contains files outside %s.' % (
thread_num, output_filename, extract_dir))
ret_codes.put((1, '%s contains invalid entries.' % (output_filename)))
continue
if os.path.exists(extract_dir):
try:
shutil.rmtree(extract_dir)
out_q.put('%d> Removed %s...' % (thread_num, extract_dir))
except OSError:
out_q.put('%d> Warning: Can\'t delete: %s' % (
thread_num, extract_dir))
ret_codes.put((1, 'Can\'t delete %s.' % (extract_dir)))
continue
out_q.put('%d> Extracting %d entries from %s to %s' %
(thread_num, len(tar.getmembers()),output_filename,
extract_dir))
tar.extractall(path=dirname)
# Set executable bit.
if sys.platform == 'cygwin':
# Under cygwin, mark all files as executable. The executable flag in
# Google Storage will not be set when uploading from Windows, so if
# this script is running under cygwin and we're downloading an
# executable, it will be unrunnable from inside cygwin without this.
st = os.stat(output_filename)
os.chmod(output_filename, st.st_mode | stat.S_IEXEC)
elif sys.platform != 'win32':
# On non-Windows platforms, key off of the custom header
# "x-goog-meta-executable".
code, out, _ = gsutil.check_call('stat', file_url)
if code != 0:
out_q.put('%d> %s' % (thread_num, err))
ret_codes.put((code, err))
elif re.search(r'executable:\s*1', out):
st = os.stat(output_filename)
os.chmod(output_filename, st.st_mode | stat.S_IEXEC)
def printer_worker(output_queue):
while True:
line = output_queue.get()
# Its plausible we want to print empty lines.
if line is None:
break
print line
def download_from_google_storage(
input_filename, base_url, gsutil, num_threads, directory, recursive,
force, output, ignore_errors, sha1_file, verbose, auto_platform, extract):
# Start up all the worker threads.
all_threads = []
download_start = time.time()
stdout_queue = Queue.Queue()
work_queue = Queue.Queue()
ret_codes = Queue.Queue()
ret_codes.put((0, None))
for thread_num in range(num_threads):
t = threading.Thread(
target=_downloader_worker_thread,
args=[thread_num, work_queue, force, base_url,
gsutil, stdout_queue, ret_codes, verbose, extract])
t.daemon = True
t.start()
all_threads.append(t)
printer_thread = threading.Thread(target=printer_worker, args=[stdout_queue])
printer_thread.daemon = True
printer_thread.start()
# Enumerate our work queue.
work_queue_size = enumerate_work_queue(
input_filename, work_queue, directory, recursive,
ignore_errors, output, sha1_file, auto_platform)
for _ in all_threads:
work_queue.put((None, None)) # Used to tell worker threads to stop.
# Wait for all downloads to finish.
for t in all_threads:
t.join()
stdout_queue.put(None)
printer_thread.join()
# See if we ran into any errors.
max_ret_code = 0
for ret_code, message in ret_codes.queue:
max_ret_code = max(ret_code, max_ret_code)
if message:
print >> sys.stderr, message
if verbose and not max_ret_code:
print 'Success!'
if verbose:
print 'Downloading %d files took %1f second(s)' % (
work_queue_size, time.time() - download_start)
return max_ret_code
def main(args):
usage = ('usage: %prog [options] target\n'
'Target must be:\n'
' (default) a sha1 sum ([A-Za-z0-9]{40}).\n'
' (-s or --sha1_file) a .sha1 file, containing a sha1 sum on '
'the first line.\n'
' (-d or --directory) A directory to scan for .sha1 files.')
parser = optparse.OptionParser(usage)
parser.add_option('-o', '--output',
help='Specify the output file name. Defaults to: '
'(a) Given a SHA1 hash, the name is the SHA1 hash. '
'(b) Given a .sha1 file or directory, the name will '
'match (.*).sha1.')
parser.add_option('-b', '--bucket',
help='Google Storage bucket to fetch from.')
parser.add_option('-e', '--boto',
help='Specify a custom boto file.')
parser.add_option('-c', '--no_resume', action='store_true',
help='DEPRECATED: Resume download if file is '
'partially downloaded.')
parser.add_option('-f', '--force', action='store_true',
help='Force download even if local file exists.')
parser.add_option('-i', '--ignore_errors', action='store_true',
help='Don\'t throw error if we find an invalid .sha1 file.')
parser.add_option('-r', '--recursive', action='store_true',
help='Scan folders recursively for .sha1 files. '
'Must be used with -d/--directory')
parser.add_option('-t', '--num_threads', default=1, type='int',
help='Number of downloader threads to run.')
parser.add_option('-d', '--directory', action='store_true',
help='The target is a directory. '
'Cannot be used with -s/--sha1_file.')
parser.add_option('-s', '--sha1_file', action='store_true',
help='The target is a file containing a sha1 sum. '
'Cannot be used with -d/--directory.')
parser.add_option('-g', '--config', action='store_true',
help='Alias for "gsutil config". Run this if you want '
'to initialize your saved Google Storage '
'credentials. This will create a read-only '
'credentials file in ~/.boto.depot_tools.')
parser.add_option('-n', '--no_auth', action='store_true',
help='Skip auth checking. Use if it\'s known that the '
'target bucket is a public bucket.')
parser.add_option('-p', '--platform',
help='A regular expression that is compared against '
'Python\'s sys.platform. If this option is specified, '
'the download will happen only if there is a match.')
parser.add_option('-a', '--auto_platform',
action='store_true',
help='Detects if any parent folder of the target matches '
'(linux|mac|win). If so, the script will only '
'process files that are in the paths that '
'that matches the current platform.')
parser.add_option('-u', '--extract',
action='store_true',
help='Extract a downloaded tar.gz file. '
'Leaves the tar.gz file around for sha1 verification'
'If a directory with the same name as the tar.gz '
'file already exists, is deleted (to get a '
'clean state in case of update.)')
parser.add_option('-v', '--verbose', action='store_true', default=True,
help='DEPRECATED: Defaults to True. Use --no-verbose '
'to suppress.')
parser.add_option('-q', '--quiet', action='store_false', dest='verbose',
help='Suppresses diagnostic and progress information.')
(options, args) = parser.parse_args()
# Make sure we should run at all based on platform matching.
if options.platform:
if options.auto_platform:
parser.error('--platform can not be specified with --auto_platform')
if not re.match(options.platform, GetNormalizedPlatform()):
if options.verbose:
print('The current platform doesn\'t match "%s", skipping.' %
options.platform)
return 0
# Set the boto file to /dev/null if we don't need auth.
if options.no_auth:
if (set(('http_proxy', 'https_proxy')).intersection(
env.lower() for env in os.environ) and
'NO_AUTH_BOTO_CONFIG' not in os.environ):
print >> sys.stderr, ('NOTICE: You have PROXY values set in your '
'environment, but gsutil in depot_tools does not '
'(yet) obey them.')
print >> sys.stderr, ('Also, --no_auth prevents the normal BOTO_CONFIG '
'environment variable from being used.')
print >> sys.stderr, ('To use a proxy in this situation, please supply '
'those settings in a .boto file pointed to by '
'the NO_AUTH_BOTO_CONFIG environment var.')
options.boto = os.environ.get('NO_AUTH_BOTO_CONFIG', os.devnull)
# Make sure gsutil exists where we expect it to.
if os.path.exists(GSUTIL_DEFAULT_PATH):
gsutil = Gsutil(GSUTIL_DEFAULT_PATH,
boto_path=options.boto)
else:
parser.error('gsutil not found in %s, bad depot_tools checkout?' %
GSUTIL_DEFAULT_PATH)
# Passing in -g/--config will run our copy of GSUtil, then quit.
if options.config:
print '===Note from depot_tools==='
print 'If you do not have a project ID, enter "0" when asked for one.'
print '===End note from depot_tools==='
print
return gsutil.call('config')
if not args:
parser.error('Missing target.')
if len(args) > 1:
parser.error('Too many targets.')
if not options.bucket:
parser.error('Missing bucket. Specify bucket with --bucket.')
if options.sha1_file and options.directory:
parser.error('Both --directory and --sha1_file are specified, '
'can only specify one.')
if options.recursive and not options.directory:
parser.error('--recursive specified but --directory not specified.')
if options.output and options.directory:
parser.error('--directory is specified, so --output has no effect.')
if (not (options.sha1_file or options.directory)
and options.auto_platform):
parser.error('--auto_platform must be specified with either '
'--sha1_file or --directory')
input_filename = args[0]
# Set output filename if not specified.
if not options.output and not options.directory:
if not options.sha1_file:
# Target is a sha1 sum, so output filename would also be the sha1 sum.
options.output = input_filename
elif options.sha1_file:
# Target is a .sha1 file.
if not input_filename.endswith('.sha1'):
parser.error('--sha1_file is specified, but the input filename '
'does not end with .sha1, and no --output is specified. '
'Either make sure the input filename has a .sha1 '
'extension, or specify --output.')
options.output = input_filename[:-5]
else:
parser.error('Unreachable state.')
base_url = 'gs://%s' % options.bucket
return download_from_google_storage(
input_filename, base_url, gsutil, options.num_threads, options.directory,
options.recursive, options.force, options.output, options.ignore_errors,
options.sha1_file, options.verbose, options.auto_platform,
options.extract)
if __name__ == '__main__':
sys.exit(main(sys.argv))
|
[] |
[] |
[
"NO_AUTH_BOTO_CONFIG"
] |
[]
|
["NO_AUTH_BOTO_CONFIG"]
|
python
| 1 | 0 | |
purnkleen/wsgi.py
|
"""
WSGI config for purnkleen project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "purnkleen.settings")
application = get_wsgi_application()
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
src/clerk.go
|
package core
import (
"context"
"github.com/cozy-hosting/clerk"
"github.com/sirupsen/logrus"
"go.uber.org/fx"
"os"
)
var ClerkModule = fx.Provide(
NewClerkConnection,
)
func NewClerkConnection(lifecycle fx.Lifecycle, logger *logrus.Logger) clerk.Connection {
connectionString := os.Getenv("CLERK_CONNECTION_STRING")
connection, err := clerk.NewMongoConnection(connectionString)
if err != nil {
logger.Fatal(err)
}
lifecycle.Append(fx.Hook{
OnStop: func(ctx context.Context) error {
connection.Close(func(err error) {
logger.Fatal(err)
})
return nil
},
})
logger.Info("Successfully connected to MongoDB")
return connection
}
|
[
"\"CLERK_CONNECTION_STRING\""
] |
[] |
[
"CLERK_CONNECTION_STRING"
] |
[]
|
["CLERK_CONNECTION_STRING"]
|
go
| 1 | 0 | |
main.go
|
package main
import (
"bytes"
"fmt"
"math/rand"
"os"
"time"
"github.com/rockneurotiko/go-tgbot"
"github.com/joho/godotenv"
)
var avaliableCommands = map[string]string{
"/start": "Go! Go! Go!",
"/help": "HALP!",
}
func helpHandler(bot tgbot.TgBot, msg tgbot.Message, text string) *string {
var buffer bytes.Buffer
var str string
for cmd, helptext := range avaliableCommands {
str = fmt.Sprintf("%s - %s\n", cmd, helptext)
buffer.WriteString(str)
}
bot.Answer(msg).Text(buffer.String()).End()
return nil
}
func echoHandler(bot tgbot.TgBot, msg tgbot.Message, vals []string, kvals map[string]string) *string {
newmsg := fmt.Sprintf("[Echoed]: %s", vals[1])
return &newmsg
}
func testeHandler(bot tgbot.TgBot, msg tgbot.Message, text string) *string {
replies := []string{
"Peste",
"Teste",
"Leste",
"Oeste",
"Veste",
}
reply := fmt.Sprintf(replies[rand.Intn(len(replies))])
return &reply
}
func instagramHandler(bot tgbot.TgBot, msg tgbot.Message, text string) *string {
bot.Answer(msg).Text(">instagram").ReplyToMessage(msg.ID).End()
return nil
}
func anyHandler(bot tgbot.TgBot, msg tgbot.Message) {
rand.Seed(time.Now().Unix())
if rand.Intn(100) == 1 {
bot.Answer(msg).Text("Isso!").ReplyToMessage(msg.ID).End()
}
}
func main() {
godotenv.Load()
token := os.Getenv("TELEGRAM_KEY")
bot := tgbot.NewTgBot(token)
bot.SimpleCommandFn(`^/help`, helpHandler)
bot.CommandFn(`echo (.+)`, echoHandler)
bot.SimpleRegexFn(`^(?i)teste$`, testeHandler)
bot.SimpleRegexFn(`(?i)instagram`, instagramHandler)
bot.AnyMsgFn(anyHandler)
bot.SimpleStart()
}
|
[
"\"TELEGRAM_KEY\""
] |
[] |
[
"TELEGRAM_KEY"
] |
[]
|
["TELEGRAM_KEY"]
|
go
| 1 | 0 | |
mws/acceptance/private_access_settings_test.go
|
package acceptance
import (
"os"
"testing"
"github.com/databrickslabs/terraform-provider-databricks/internal/acceptance"
)
func TestMwsAccPrivateAccessSettings(t *testing.T) {
cloudEnv := os.Getenv("CLOUD_ENV")
if cloudEnv != "MWS" {
t.Skip("Cannot run test on non-MWS environment")
}
acceptance.Test(t, []acceptance.Step{
{
Template: `
resource "databricks_mws_private_access_settings" "this" {
account_id = "{env.DATABRICKS_ACCOUNT_ID}"
private_access_settings_name = "tf-{var.RANDOM}"
region = "{env.TEST_REGION}"
}`,
},
})
}
|
[
"\"CLOUD_ENV\""
] |
[] |
[
"CLOUD_ENV"
] |
[]
|
["CLOUD_ENV"]
|
go
| 1 | 0 | |
tests/unit/gapic/deploy_v1/test_cloud_deploy.py
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import mock
import grpc
from grpc.experimental import aio
import math
import pytest
from proto.marshal.rules.dates import DurationRule, TimestampRule
from google.api_core import client_options
from google.api_core import exceptions as core_exceptions
from google.api_core import future
from google.api_core import gapic_v1
from google.api_core import grpc_helpers
from google.api_core import grpc_helpers_async
from google.api_core import operation_async # type: ignore
from google.api_core import operations_v1
from google.api_core import path_template
from google.auth import credentials as ga_credentials
from google.auth.exceptions import MutualTLSChannelError
from google.cloud.deploy_v1.services.cloud_deploy import CloudDeployAsyncClient
from google.cloud.deploy_v1.services.cloud_deploy import CloudDeployClient
from google.cloud.deploy_v1.services.cloud_deploy import pagers
from google.cloud.deploy_v1.services.cloud_deploy import transports
from google.cloud.deploy_v1.types import cloud_deploy
from google.longrunning import operations_pb2
from google.oauth2 import service_account
from google.protobuf import field_mask_pb2 # type: ignore
from google.protobuf import timestamp_pb2 # type: ignore
import google.auth
def client_cert_source_callback():
return b"cert bytes", b"key bytes"
# If default endpoint is localhost, then default mtls endpoint will be the same.
# This method modifies the default endpoint so the client can produce a different
# mtls endpoint for endpoint testing purposes.
def modify_default_endpoint(client):
return (
"foo.googleapis.com"
if ("localhost" in client.DEFAULT_ENDPOINT)
else client.DEFAULT_ENDPOINT
)
def test__get_default_mtls_endpoint():
api_endpoint = "example.googleapis.com"
api_mtls_endpoint = "example.mtls.googleapis.com"
sandbox_endpoint = "example.sandbox.googleapis.com"
sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com"
non_googleapi = "api.example.com"
assert CloudDeployClient._get_default_mtls_endpoint(None) is None
assert (
CloudDeployClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint
)
assert (
CloudDeployClient._get_default_mtls_endpoint(api_mtls_endpoint)
== api_mtls_endpoint
)
assert (
CloudDeployClient._get_default_mtls_endpoint(sandbox_endpoint)
== sandbox_mtls_endpoint
)
assert (
CloudDeployClient._get_default_mtls_endpoint(sandbox_mtls_endpoint)
== sandbox_mtls_endpoint
)
assert CloudDeployClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi
@pytest.mark.parametrize("client_class", [CloudDeployClient, CloudDeployAsyncClient,])
def test_cloud_deploy_client_from_service_account_info(client_class):
creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(
service_account.Credentials, "from_service_account_info"
) as factory:
factory.return_value = creds
info = {"valid": True}
client = client_class.from_service_account_info(info)
assert client.transport._credentials == creds
assert isinstance(client, client_class)
assert client.transport._host == "clouddeploy.googleapis.com:443"
@pytest.mark.parametrize(
"transport_class,transport_name",
[
(transports.CloudDeployGrpcTransport, "grpc"),
(transports.CloudDeployGrpcAsyncIOTransport, "grpc_asyncio"),
],
)
def test_cloud_deploy_client_service_account_always_use_jwt(
transport_class, transport_name
):
with mock.patch.object(
service_account.Credentials, "with_always_use_jwt_access", create=True
) as use_jwt:
creds = service_account.Credentials(None, None, None)
transport = transport_class(credentials=creds, always_use_jwt_access=True)
use_jwt.assert_called_once_with(True)
with mock.patch.object(
service_account.Credentials, "with_always_use_jwt_access", create=True
) as use_jwt:
creds = service_account.Credentials(None, None, None)
transport = transport_class(credentials=creds, always_use_jwt_access=False)
use_jwt.assert_not_called()
@pytest.mark.parametrize("client_class", [CloudDeployClient, CloudDeployAsyncClient,])
def test_cloud_deploy_client_from_service_account_file(client_class):
creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(
service_account.Credentials, "from_service_account_file"
) as factory:
factory.return_value = creds
client = client_class.from_service_account_file("dummy/file/path.json")
assert client.transport._credentials == creds
assert isinstance(client, client_class)
client = client_class.from_service_account_json("dummy/file/path.json")
assert client.transport._credentials == creds
assert isinstance(client, client_class)
assert client.transport._host == "clouddeploy.googleapis.com:443"
def test_cloud_deploy_client_get_transport_class():
transport = CloudDeployClient.get_transport_class()
available_transports = [
transports.CloudDeployGrpcTransport,
]
assert transport in available_transports
transport = CloudDeployClient.get_transport_class("grpc")
assert transport == transports.CloudDeployGrpcTransport
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[
(CloudDeployClient, transports.CloudDeployGrpcTransport, "grpc"),
(
CloudDeployAsyncClient,
transports.CloudDeployGrpcAsyncIOTransport,
"grpc_asyncio",
),
],
)
@mock.patch.object(
CloudDeployClient, "DEFAULT_ENDPOINT", modify_default_endpoint(CloudDeployClient)
)
@mock.patch.object(
CloudDeployAsyncClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(CloudDeployAsyncClient),
)
def test_cloud_deploy_client_client_options(
client_class, transport_class, transport_name
):
# Check that if channel is provided we won't create a new one.
with mock.patch.object(CloudDeployClient, "get_transport_class") as gtc:
transport = transport_class(credentials=ga_credentials.AnonymousCredentials())
client = client_class(transport=transport)
gtc.assert_not_called()
# Check that if channel is provided via str we will create a new one.
with mock.patch.object(CloudDeployClient, "get_transport_class") as gtc:
client = client_class(transport=transport_name)
gtc.assert_called()
# Check the case api_endpoint is provided.
options = client_options.ClientOptions(api_endpoint="squid.clam.whelk")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name, client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host="squid.clam.whelk",
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "never".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "always".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}):
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_MTLS_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has
# unsupported value.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}):
with pytest.raises(MutualTLSChannelError):
client = client_class(transport=transport_name)
# Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}
):
with pytest.raises(ValueError):
client = client_class(transport=transport_name)
# Check the case quota_project_id is provided
options = client_options.ClientOptions(quota_project_id="octopus")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id="octopus",
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name,use_client_cert_env",
[
(CloudDeployClient, transports.CloudDeployGrpcTransport, "grpc", "true"),
(
CloudDeployAsyncClient,
transports.CloudDeployGrpcAsyncIOTransport,
"grpc_asyncio",
"true",
),
(CloudDeployClient, transports.CloudDeployGrpcTransport, "grpc", "false"),
(
CloudDeployAsyncClient,
transports.CloudDeployGrpcAsyncIOTransport,
"grpc_asyncio",
"false",
),
],
)
@mock.patch.object(
CloudDeployClient, "DEFAULT_ENDPOINT", modify_default_endpoint(CloudDeployClient)
)
@mock.patch.object(
CloudDeployAsyncClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(CloudDeployAsyncClient),
)
@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"})
def test_cloud_deploy_client_mtls_env_auto(
client_class, transport_class, transport_name, use_client_cert_env
):
# This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default
# mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists.
# Check the case client_cert_source is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
options = client_options.ClientOptions(
client_cert_source=client_cert_source_callback
)
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
if use_client_cert_env == "false":
expected_client_cert_source = None
expected_host = client.DEFAULT_ENDPOINT
else:
expected_client_cert_source = client_cert_source_callback
expected_host = client.DEFAULT_MTLS_ENDPOINT
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=expected_host,
scopes=None,
client_cert_source_for_mtls=expected_client_cert_source,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case ADC client cert is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
with mock.patch.object(transport_class, "__init__") as patched:
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=True,
):
with mock.patch(
"google.auth.transport.mtls.default_client_cert_source",
return_value=client_cert_source_callback,
):
if use_client_cert_env == "false":
expected_host = client.DEFAULT_ENDPOINT
expected_client_cert_source = None
else:
expected_host = client.DEFAULT_MTLS_ENDPOINT
expected_client_cert_source = client_cert_source_callback
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=expected_host,
scopes=None,
client_cert_source_for_mtls=expected_client_cert_source,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case client_cert_source and ADC client cert are not provided.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
with mock.patch.object(transport_class, "__init__") as patched:
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=False,
):
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize("client_class", [CloudDeployClient, CloudDeployAsyncClient])
@mock.patch.object(
CloudDeployClient, "DEFAULT_ENDPOINT", modify_default_endpoint(CloudDeployClient)
)
@mock.patch.object(
CloudDeployAsyncClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(CloudDeployAsyncClient),
)
def test_cloud_deploy_client_get_mtls_endpoint_and_cert_source(client_class):
mock_client_cert_source = mock.Mock()
# Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "true".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
mock_api_endpoint = "foo"
options = client_options.ClientOptions(
client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint
)
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source(
options
)
assert api_endpoint == mock_api_endpoint
assert cert_source == mock_client_cert_source
# Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "false".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "false"}):
mock_client_cert_source = mock.Mock()
mock_api_endpoint = "foo"
options = client_options.ClientOptions(
client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint
)
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source(
options
)
assert api_endpoint == mock_api_endpoint
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "never".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_ENDPOINT
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "always".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}):
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert doesn't exist.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=False,
):
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_ENDPOINT
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert exists.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=True,
):
with mock.patch(
"google.auth.transport.mtls.default_client_cert_source",
return_value=mock_client_cert_source,
):
(
api_endpoint,
cert_source,
) = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT
assert cert_source == mock_client_cert_source
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[
(CloudDeployClient, transports.CloudDeployGrpcTransport, "grpc"),
(
CloudDeployAsyncClient,
transports.CloudDeployGrpcAsyncIOTransport,
"grpc_asyncio",
),
],
)
def test_cloud_deploy_client_client_options_scopes(
client_class, transport_class, transport_name
):
# Check the case scopes are provided.
options = client_options.ClientOptions(scopes=["1", "2"],)
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=["1", "2"],
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[
(CloudDeployClient, transports.CloudDeployGrpcTransport, "grpc"),
(
CloudDeployAsyncClient,
transports.CloudDeployGrpcAsyncIOTransport,
"grpc_asyncio",
),
],
)
def test_cloud_deploy_client_client_options_credentials_file(
client_class, transport_class, transport_name
):
# Check the case credentials file is provided.
options = client_options.ClientOptions(credentials_file="credentials.json")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file="credentials.json",
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
def test_cloud_deploy_client_client_options_from_dict():
with mock.patch(
"google.cloud.deploy_v1.services.cloud_deploy.transports.CloudDeployGrpcTransport.__init__"
) as grpc_transport:
grpc_transport.return_value = None
client = CloudDeployClient(client_options={"api_endpoint": "squid.clam.whelk"})
grpc_transport.assert_called_once_with(
credentials=None,
credentials_file=None,
host="squid.clam.whelk",
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"request_type", [cloud_deploy.ListDeliveryPipelinesRequest, dict,]
)
def test_list_delivery_pipelines(request_type, transport: str = "grpc"):
client = CloudDeployClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_delivery_pipelines), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = cloud_deploy.ListDeliveryPipelinesResponse(
next_page_token="next_page_token_value", unreachable=["unreachable_value"],
)
response = client.list_delivery_pipelines(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == cloud_deploy.ListDeliveryPipelinesRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListDeliveryPipelinesPager)
assert response.next_page_token == "next_page_token_value"
assert response.unreachable == ["unreachable_value"]
def test_list_delivery_pipelines_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = CloudDeployClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_delivery_pipelines), "__call__"
) as call:
client.list_delivery_pipelines()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == cloud_deploy.ListDeliveryPipelinesRequest()
@pytest.mark.asyncio
async def test_list_delivery_pipelines_async(
transport: str = "grpc_asyncio",
request_type=cloud_deploy.ListDeliveryPipelinesRequest,
):
client = CloudDeployAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_delivery_pipelines), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
cloud_deploy.ListDeliveryPipelinesResponse(
next_page_token="next_page_token_value",
unreachable=["unreachable_value"],
)
)
response = await client.list_delivery_pipelines(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == cloud_deploy.ListDeliveryPipelinesRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListDeliveryPipelinesAsyncPager)
assert response.next_page_token == "next_page_token_value"
assert response.unreachable == ["unreachable_value"]
@pytest.mark.asyncio
async def test_list_delivery_pipelines_async_from_dict():
await test_list_delivery_pipelines_async(request_type=dict)
def test_list_delivery_pipelines_field_headers():
client = CloudDeployClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = cloud_deploy.ListDeliveryPipelinesRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_delivery_pipelines), "__call__"
) as call:
call.return_value = cloud_deploy.ListDeliveryPipelinesResponse()
client.list_delivery_pipelines(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_list_delivery_pipelines_field_headers_async():
client = CloudDeployAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = cloud_deploy.ListDeliveryPipelinesRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_delivery_pipelines), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
cloud_deploy.ListDeliveryPipelinesResponse()
)
await client.list_delivery_pipelines(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
def test_list_delivery_pipelines_flattened():
client = CloudDeployClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_delivery_pipelines), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = cloud_deploy.ListDeliveryPipelinesResponse()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.list_delivery_pipelines(parent="parent_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
def test_list_delivery_pipelines_flattened_error():
client = CloudDeployClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.list_delivery_pipelines(
cloud_deploy.ListDeliveryPipelinesRequest(), parent="parent_value",
)
@pytest.mark.asyncio
async def test_list_delivery_pipelines_flattened_async():
client = CloudDeployAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_delivery_pipelines), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = cloud_deploy.ListDeliveryPipelinesResponse()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
cloud_deploy.ListDeliveryPipelinesResponse()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.list_delivery_pipelines(parent="parent_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_list_delivery_pipelines_flattened_error_async():
client = CloudDeployAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.list_delivery_pipelines(
cloud_deploy.ListDeliveryPipelinesRequest(), parent="parent_value",
)
def test_list_delivery_pipelines_pager(transport_name: str = "grpc"):
client = CloudDeployClient(
credentials=ga_credentials.AnonymousCredentials, transport=transport_name,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_delivery_pipelines), "__call__"
) as call:
# Set the response to a series of pages.
call.side_effect = (
cloud_deploy.ListDeliveryPipelinesResponse(
delivery_pipelines=[
cloud_deploy.DeliveryPipeline(),
cloud_deploy.DeliveryPipeline(),
cloud_deploy.DeliveryPipeline(),
],
next_page_token="abc",
),
cloud_deploy.ListDeliveryPipelinesResponse(
delivery_pipelines=[], next_page_token="def",
),
cloud_deploy.ListDeliveryPipelinesResponse(
delivery_pipelines=[cloud_deploy.DeliveryPipeline(),],
next_page_token="ghi",
),
cloud_deploy.ListDeliveryPipelinesResponse(
delivery_pipelines=[
cloud_deploy.DeliveryPipeline(),
cloud_deploy.DeliveryPipeline(),
],
),
RuntimeError,
)
metadata = ()
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)),
)
pager = client.list_delivery_pipelines(request={})
assert pager._metadata == metadata
results = [i for i in pager]
assert len(results) == 6
assert all(isinstance(i, cloud_deploy.DeliveryPipeline) for i in results)
def test_list_delivery_pipelines_pages(transport_name: str = "grpc"):
client = CloudDeployClient(
credentials=ga_credentials.AnonymousCredentials, transport=transport_name,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_delivery_pipelines), "__call__"
) as call:
# Set the response to a series of pages.
call.side_effect = (
cloud_deploy.ListDeliveryPipelinesResponse(
delivery_pipelines=[
cloud_deploy.DeliveryPipeline(),
cloud_deploy.DeliveryPipeline(),
cloud_deploy.DeliveryPipeline(),
],
next_page_token="abc",
),
cloud_deploy.ListDeliveryPipelinesResponse(
delivery_pipelines=[], next_page_token="def",
),
cloud_deploy.ListDeliveryPipelinesResponse(
delivery_pipelines=[cloud_deploy.DeliveryPipeline(),],
next_page_token="ghi",
),
cloud_deploy.ListDeliveryPipelinesResponse(
delivery_pipelines=[
cloud_deploy.DeliveryPipeline(),
cloud_deploy.DeliveryPipeline(),
],
),
RuntimeError,
)
pages = list(client.list_delivery_pipelines(request={}).pages)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@pytest.mark.asyncio
async def test_list_delivery_pipelines_async_pager():
client = CloudDeployAsyncClient(credentials=ga_credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_delivery_pipelines),
"__call__",
new_callable=mock.AsyncMock,
) as call:
# Set the response to a series of pages.
call.side_effect = (
cloud_deploy.ListDeliveryPipelinesResponse(
delivery_pipelines=[
cloud_deploy.DeliveryPipeline(),
cloud_deploy.DeliveryPipeline(),
cloud_deploy.DeliveryPipeline(),
],
next_page_token="abc",
),
cloud_deploy.ListDeliveryPipelinesResponse(
delivery_pipelines=[], next_page_token="def",
),
cloud_deploy.ListDeliveryPipelinesResponse(
delivery_pipelines=[cloud_deploy.DeliveryPipeline(),],
next_page_token="ghi",
),
cloud_deploy.ListDeliveryPipelinesResponse(
delivery_pipelines=[
cloud_deploy.DeliveryPipeline(),
cloud_deploy.DeliveryPipeline(),
],
),
RuntimeError,
)
async_pager = await client.list_delivery_pipelines(request={},)
assert async_pager.next_page_token == "abc"
responses = []
async for response in async_pager:
responses.append(response)
assert len(responses) == 6
assert all(isinstance(i, cloud_deploy.DeliveryPipeline) for i in responses)
@pytest.mark.asyncio
async def test_list_delivery_pipelines_async_pages():
client = CloudDeployAsyncClient(credentials=ga_credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_delivery_pipelines),
"__call__",
new_callable=mock.AsyncMock,
) as call:
# Set the response to a series of pages.
call.side_effect = (
cloud_deploy.ListDeliveryPipelinesResponse(
delivery_pipelines=[
cloud_deploy.DeliveryPipeline(),
cloud_deploy.DeliveryPipeline(),
cloud_deploy.DeliveryPipeline(),
],
next_page_token="abc",
),
cloud_deploy.ListDeliveryPipelinesResponse(
delivery_pipelines=[], next_page_token="def",
),
cloud_deploy.ListDeliveryPipelinesResponse(
delivery_pipelines=[cloud_deploy.DeliveryPipeline(),],
next_page_token="ghi",
),
cloud_deploy.ListDeliveryPipelinesResponse(
delivery_pipelines=[
cloud_deploy.DeliveryPipeline(),
cloud_deploy.DeliveryPipeline(),
],
),
RuntimeError,
)
pages = []
async for page_ in (await client.list_delivery_pipelines(request={})).pages:
pages.append(page_)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@pytest.mark.parametrize(
"request_type", [cloud_deploy.GetDeliveryPipelineRequest, dict,]
)
def test_get_delivery_pipeline(request_type, transport: str = "grpc"):
client = CloudDeployClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_delivery_pipeline), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = cloud_deploy.DeliveryPipeline(
name="name_value",
uid="uid_value",
description="description_value",
etag="etag_value",
serial_pipeline=cloud_deploy.SerialPipeline(
stages=[cloud_deploy.Stage(target_id="target_id_value")]
),
)
response = client.get_delivery_pipeline(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == cloud_deploy.GetDeliveryPipelineRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, cloud_deploy.DeliveryPipeline)
assert response.name == "name_value"
assert response.uid == "uid_value"
assert response.description == "description_value"
assert response.etag == "etag_value"
def test_get_delivery_pipeline_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = CloudDeployClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_delivery_pipeline), "__call__"
) as call:
client.get_delivery_pipeline()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == cloud_deploy.GetDeliveryPipelineRequest()
@pytest.mark.asyncio
async def test_get_delivery_pipeline_async(
transport: str = "grpc_asyncio",
request_type=cloud_deploy.GetDeliveryPipelineRequest,
):
client = CloudDeployAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_delivery_pipeline), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
cloud_deploy.DeliveryPipeline(
name="name_value",
uid="uid_value",
description="description_value",
etag="etag_value",
)
)
response = await client.get_delivery_pipeline(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == cloud_deploy.GetDeliveryPipelineRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, cloud_deploy.DeliveryPipeline)
assert response.name == "name_value"
assert response.uid == "uid_value"
assert response.description == "description_value"
assert response.etag == "etag_value"
@pytest.mark.asyncio
async def test_get_delivery_pipeline_async_from_dict():
await test_get_delivery_pipeline_async(request_type=dict)
def test_get_delivery_pipeline_field_headers():
client = CloudDeployClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = cloud_deploy.GetDeliveryPipelineRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_delivery_pipeline), "__call__"
) as call:
call.return_value = cloud_deploy.DeliveryPipeline()
client.get_delivery_pipeline(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_get_delivery_pipeline_field_headers_async():
client = CloudDeployAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = cloud_deploy.GetDeliveryPipelineRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_delivery_pipeline), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
cloud_deploy.DeliveryPipeline()
)
await client.get_delivery_pipeline(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
def test_get_delivery_pipeline_flattened():
client = CloudDeployClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_delivery_pipeline), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = cloud_deploy.DeliveryPipeline()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.get_delivery_pipeline(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
def test_get_delivery_pipeline_flattened_error():
client = CloudDeployClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.get_delivery_pipeline(
cloud_deploy.GetDeliveryPipelineRequest(), name="name_value",
)
@pytest.mark.asyncio
async def test_get_delivery_pipeline_flattened_async():
client = CloudDeployAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_delivery_pipeline), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = cloud_deploy.DeliveryPipeline()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
cloud_deploy.DeliveryPipeline()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.get_delivery_pipeline(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_get_delivery_pipeline_flattened_error_async():
client = CloudDeployAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.get_delivery_pipeline(
cloud_deploy.GetDeliveryPipelineRequest(), name="name_value",
)
@pytest.mark.parametrize(
"request_type", [cloud_deploy.CreateDeliveryPipelineRequest, dict,]
)
def test_create_delivery_pipeline(request_type, transport: str = "grpc"):
client = CloudDeployClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_delivery_pipeline), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/spam")
response = client.create_delivery_pipeline(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == cloud_deploy.CreateDeliveryPipelineRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
def test_create_delivery_pipeline_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = CloudDeployClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_delivery_pipeline), "__call__"
) as call:
client.create_delivery_pipeline()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == cloud_deploy.CreateDeliveryPipelineRequest()
@pytest.mark.asyncio
async def test_create_delivery_pipeline_async(
transport: str = "grpc_asyncio",
request_type=cloud_deploy.CreateDeliveryPipelineRequest,
):
client = CloudDeployAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_delivery_pipeline), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
response = await client.create_delivery_pipeline(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == cloud_deploy.CreateDeliveryPipelineRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
@pytest.mark.asyncio
async def test_create_delivery_pipeline_async_from_dict():
await test_create_delivery_pipeline_async(request_type=dict)
def test_create_delivery_pipeline_field_headers():
client = CloudDeployClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = cloud_deploy.CreateDeliveryPipelineRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_delivery_pipeline), "__call__"
) as call:
call.return_value = operations_pb2.Operation(name="operations/op")
client.create_delivery_pipeline(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_create_delivery_pipeline_field_headers_async():
client = CloudDeployAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = cloud_deploy.CreateDeliveryPipelineRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_delivery_pipeline), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/op")
)
await client.create_delivery_pipeline(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
def test_create_delivery_pipeline_flattened():
client = CloudDeployClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_delivery_pipeline), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.create_delivery_pipeline(
parent="parent_value",
delivery_pipeline=cloud_deploy.DeliveryPipeline(name="name_value"),
delivery_pipeline_id="delivery_pipeline_id_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
arg = args[0].delivery_pipeline
mock_val = cloud_deploy.DeliveryPipeline(name="name_value")
assert arg == mock_val
arg = args[0].delivery_pipeline_id
mock_val = "delivery_pipeline_id_value"
assert arg == mock_val
def test_create_delivery_pipeline_flattened_error():
client = CloudDeployClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.create_delivery_pipeline(
cloud_deploy.CreateDeliveryPipelineRequest(),
parent="parent_value",
delivery_pipeline=cloud_deploy.DeliveryPipeline(name="name_value"),
delivery_pipeline_id="delivery_pipeline_id_value",
)
@pytest.mark.asyncio
async def test_create_delivery_pipeline_flattened_async():
client = CloudDeployAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_delivery_pipeline), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.create_delivery_pipeline(
parent="parent_value",
delivery_pipeline=cloud_deploy.DeliveryPipeline(name="name_value"),
delivery_pipeline_id="delivery_pipeline_id_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
arg = args[0].delivery_pipeline
mock_val = cloud_deploy.DeliveryPipeline(name="name_value")
assert arg == mock_val
arg = args[0].delivery_pipeline_id
mock_val = "delivery_pipeline_id_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_create_delivery_pipeline_flattened_error_async():
client = CloudDeployAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.create_delivery_pipeline(
cloud_deploy.CreateDeliveryPipelineRequest(),
parent="parent_value",
delivery_pipeline=cloud_deploy.DeliveryPipeline(name="name_value"),
delivery_pipeline_id="delivery_pipeline_id_value",
)
@pytest.mark.parametrize(
"request_type", [cloud_deploy.UpdateDeliveryPipelineRequest, dict,]
)
def test_update_delivery_pipeline(request_type, transport: str = "grpc"):
client = CloudDeployClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_delivery_pipeline), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/spam")
response = client.update_delivery_pipeline(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == cloud_deploy.UpdateDeliveryPipelineRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
def test_update_delivery_pipeline_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = CloudDeployClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_delivery_pipeline), "__call__"
) as call:
client.update_delivery_pipeline()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == cloud_deploy.UpdateDeliveryPipelineRequest()
@pytest.mark.asyncio
async def test_update_delivery_pipeline_async(
transport: str = "grpc_asyncio",
request_type=cloud_deploy.UpdateDeliveryPipelineRequest,
):
client = CloudDeployAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_delivery_pipeline), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
response = await client.update_delivery_pipeline(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == cloud_deploy.UpdateDeliveryPipelineRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
@pytest.mark.asyncio
async def test_update_delivery_pipeline_async_from_dict():
await test_update_delivery_pipeline_async(request_type=dict)
def test_update_delivery_pipeline_field_headers():
client = CloudDeployClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = cloud_deploy.UpdateDeliveryPipelineRequest()
request.delivery_pipeline.name = "delivery_pipeline.name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_delivery_pipeline), "__call__"
) as call:
call.return_value = operations_pb2.Operation(name="operations/op")
client.update_delivery_pipeline(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"delivery_pipeline.name=delivery_pipeline.name/value",
) in kw["metadata"]
@pytest.mark.asyncio
async def test_update_delivery_pipeline_field_headers_async():
client = CloudDeployAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = cloud_deploy.UpdateDeliveryPipelineRequest()
request.delivery_pipeline.name = "delivery_pipeline.name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_delivery_pipeline), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/op")
)
await client.update_delivery_pipeline(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"delivery_pipeline.name=delivery_pipeline.name/value",
) in kw["metadata"]
def test_update_delivery_pipeline_flattened():
client = CloudDeployClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_delivery_pipeline), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.update_delivery_pipeline(
delivery_pipeline=cloud_deploy.DeliveryPipeline(name="name_value"),
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].delivery_pipeline
mock_val = cloud_deploy.DeliveryPipeline(name="name_value")
assert arg == mock_val
arg = args[0].update_mask
mock_val = field_mask_pb2.FieldMask(paths=["paths_value"])
assert arg == mock_val
def test_update_delivery_pipeline_flattened_error():
client = CloudDeployClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.update_delivery_pipeline(
cloud_deploy.UpdateDeliveryPipelineRequest(),
delivery_pipeline=cloud_deploy.DeliveryPipeline(name="name_value"),
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
@pytest.mark.asyncio
async def test_update_delivery_pipeline_flattened_async():
client = CloudDeployAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_delivery_pipeline), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.update_delivery_pipeline(
delivery_pipeline=cloud_deploy.DeliveryPipeline(name="name_value"),
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].delivery_pipeline
mock_val = cloud_deploy.DeliveryPipeline(name="name_value")
assert arg == mock_val
arg = args[0].update_mask
mock_val = field_mask_pb2.FieldMask(paths=["paths_value"])
assert arg == mock_val
@pytest.mark.asyncio
async def test_update_delivery_pipeline_flattened_error_async():
client = CloudDeployAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.update_delivery_pipeline(
cloud_deploy.UpdateDeliveryPipelineRequest(),
delivery_pipeline=cloud_deploy.DeliveryPipeline(name="name_value"),
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
@pytest.mark.parametrize(
"request_type", [cloud_deploy.DeleteDeliveryPipelineRequest, dict,]
)
def test_delete_delivery_pipeline(request_type, transport: str = "grpc"):
client = CloudDeployClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_delivery_pipeline), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/spam")
response = client.delete_delivery_pipeline(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == cloud_deploy.DeleteDeliveryPipelineRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
def test_delete_delivery_pipeline_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = CloudDeployClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_delivery_pipeline), "__call__"
) as call:
client.delete_delivery_pipeline()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == cloud_deploy.DeleteDeliveryPipelineRequest()
@pytest.mark.asyncio
async def test_delete_delivery_pipeline_async(
transport: str = "grpc_asyncio",
request_type=cloud_deploy.DeleteDeliveryPipelineRequest,
):
client = CloudDeployAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_delivery_pipeline), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
response = await client.delete_delivery_pipeline(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == cloud_deploy.DeleteDeliveryPipelineRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
@pytest.mark.asyncio
async def test_delete_delivery_pipeline_async_from_dict():
await test_delete_delivery_pipeline_async(request_type=dict)
def test_delete_delivery_pipeline_field_headers():
client = CloudDeployClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = cloud_deploy.DeleteDeliveryPipelineRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_delivery_pipeline), "__call__"
) as call:
call.return_value = operations_pb2.Operation(name="operations/op")
client.delete_delivery_pipeline(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_delete_delivery_pipeline_field_headers_async():
client = CloudDeployAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = cloud_deploy.DeleteDeliveryPipelineRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_delivery_pipeline), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/op")
)
await client.delete_delivery_pipeline(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
def test_delete_delivery_pipeline_flattened():
client = CloudDeployClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_delivery_pipeline), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.delete_delivery_pipeline(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
def test_delete_delivery_pipeline_flattened_error():
client = CloudDeployClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.delete_delivery_pipeline(
cloud_deploy.DeleteDeliveryPipelineRequest(), name="name_value",
)
@pytest.mark.asyncio
async def test_delete_delivery_pipeline_flattened_async():
client = CloudDeployAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_delivery_pipeline), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.delete_delivery_pipeline(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_delete_delivery_pipeline_flattened_error_async():
client = CloudDeployAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.delete_delivery_pipeline(
cloud_deploy.DeleteDeliveryPipelineRequest(), name="name_value",
)
@pytest.mark.parametrize("request_type", [cloud_deploy.ListTargetsRequest, dict,])
def test_list_targets(request_type, transport: str = "grpc"):
client = CloudDeployClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_targets), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = cloud_deploy.ListTargetsResponse(
next_page_token="next_page_token_value", unreachable=["unreachable_value"],
)
response = client.list_targets(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == cloud_deploy.ListTargetsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListTargetsPager)
assert response.next_page_token == "next_page_token_value"
assert response.unreachable == ["unreachable_value"]
def test_list_targets_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = CloudDeployClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_targets), "__call__") as call:
client.list_targets()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == cloud_deploy.ListTargetsRequest()
@pytest.mark.asyncio
async def test_list_targets_async(
transport: str = "grpc_asyncio", request_type=cloud_deploy.ListTargetsRequest
):
client = CloudDeployAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_targets), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
cloud_deploy.ListTargetsResponse(
next_page_token="next_page_token_value",
unreachable=["unreachable_value"],
)
)
response = await client.list_targets(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == cloud_deploy.ListTargetsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListTargetsAsyncPager)
assert response.next_page_token == "next_page_token_value"
assert response.unreachable == ["unreachable_value"]
@pytest.mark.asyncio
async def test_list_targets_async_from_dict():
await test_list_targets_async(request_type=dict)
def test_list_targets_field_headers():
client = CloudDeployClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = cloud_deploy.ListTargetsRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_targets), "__call__") as call:
call.return_value = cloud_deploy.ListTargetsResponse()
client.list_targets(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_list_targets_field_headers_async():
client = CloudDeployAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = cloud_deploy.ListTargetsRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_targets), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
cloud_deploy.ListTargetsResponse()
)
await client.list_targets(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
def test_list_targets_flattened():
client = CloudDeployClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_targets), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = cloud_deploy.ListTargetsResponse()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.list_targets(parent="parent_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
def test_list_targets_flattened_error():
client = CloudDeployClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.list_targets(
cloud_deploy.ListTargetsRequest(), parent="parent_value",
)
@pytest.mark.asyncio
async def test_list_targets_flattened_async():
client = CloudDeployAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_targets), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = cloud_deploy.ListTargetsResponse()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
cloud_deploy.ListTargetsResponse()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.list_targets(parent="parent_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_list_targets_flattened_error_async():
client = CloudDeployAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.list_targets(
cloud_deploy.ListTargetsRequest(), parent="parent_value",
)
def test_list_targets_pager(transport_name: str = "grpc"):
client = CloudDeployClient(
credentials=ga_credentials.AnonymousCredentials, transport=transport_name,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_targets), "__call__") as call:
# Set the response to a series of pages.
call.side_effect = (
cloud_deploy.ListTargetsResponse(
targets=[
cloud_deploy.Target(),
cloud_deploy.Target(),
cloud_deploy.Target(),
],
next_page_token="abc",
),
cloud_deploy.ListTargetsResponse(targets=[], next_page_token="def",),
cloud_deploy.ListTargetsResponse(
targets=[cloud_deploy.Target(),], next_page_token="ghi",
),
cloud_deploy.ListTargetsResponse(
targets=[cloud_deploy.Target(), cloud_deploy.Target(),],
),
RuntimeError,
)
metadata = ()
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)),
)
pager = client.list_targets(request={})
assert pager._metadata == metadata
results = [i for i in pager]
assert len(results) == 6
assert all(isinstance(i, cloud_deploy.Target) for i in results)
def test_list_targets_pages(transport_name: str = "grpc"):
client = CloudDeployClient(
credentials=ga_credentials.AnonymousCredentials, transport=transport_name,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_targets), "__call__") as call:
# Set the response to a series of pages.
call.side_effect = (
cloud_deploy.ListTargetsResponse(
targets=[
cloud_deploy.Target(),
cloud_deploy.Target(),
cloud_deploy.Target(),
],
next_page_token="abc",
),
cloud_deploy.ListTargetsResponse(targets=[], next_page_token="def",),
cloud_deploy.ListTargetsResponse(
targets=[cloud_deploy.Target(),], next_page_token="ghi",
),
cloud_deploy.ListTargetsResponse(
targets=[cloud_deploy.Target(), cloud_deploy.Target(),],
),
RuntimeError,
)
pages = list(client.list_targets(request={}).pages)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@pytest.mark.asyncio
async def test_list_targets_async_pager():
client = CloudDeployAsyncClient(credentials=ga_credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_targets), "__call__", new_callable=mock.AsyncMock
) as call:
# Set the response to a series of pages.
call.side_effect = (
cloud_deploy.ListTargetsResponse(
targets=[
cloud_deploy.Target(),
cloud_deploy.Target(),
cloud_deploy.Target(),
],
next_page_token="abc",
),
cloud_deploy.ListTargetsResponse(targets=[], next_page_token="def",),
cloud_deploy.ListTargetsResponse(
targets=[cloud_deploy.Target(),], next_page_token="ghi",
),
cloud_deploy.ListTargetsResponse(
targets=[cloud_deploy.Target(), cloud_deploy.Target(),],
),
RuntimeError,
)
async_pager = await client.list_targets(request={},)
assert async_pager.next_page_token == "abc"
responses = []
async for response in async_pager:
responses.append(response)
assert len(responses) == 6
assert all(isinstance(i, cloud_deploy.Target) for i in responses)
@pytest.mark.asyncio
async def test_list_targets_async_pages():
client = CloudDeployAsyncClient(credentials=ga_credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_targets), "__call__", new_callable=mock.AsyncMock
) as call:
# Set the response to a series of pages.
call.side_effect = (
cloud_deploy.ListTargetsResponse(
targets=[
cloud_deploy.Target(),
cloud_deploy.Target(),
cloud_deploy.Target(),
],
next_page_token="abc",
),
cloud_deploy.ListTargetsResponse(targets=[], next_page_token="def",),
cloud_deploy.ListTargetsResponse(
targets=[cloud_deploy.Target(),], next_page_token="ghi",
),
cloud_deploy.ListTargetsResponse(
targets=[cloud_deploy.Target(), cloud_deploy.Target(),],
),
RuntimeError,
)
pages = []
async for page_ in (await client.list_targets(request={})).pages:
pages.append(page_)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@pytest.mark.parametrize("request_type", [cloud_deploy.GetTargetRequest, dict,])
def test_get_target(request_type, transport: str = "grpc"):
client = CloudDeployClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_target), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = cloud_deploy.Target(
name="name_value",
target_id="target_id_value",
uid="uid_value",
description="description_value",
require_approval=True,
etag="etag_value",
gke=cloud_deploy.GkeCluster(cluster="cluster_value"),
)
response = client.get_target(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == cloud_deploy.GetTargetRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, cloud_deploy.Target)
assert response.name == "name_value"
assert response.target_id == "target_id_value"
assert response.uid == "uid_value"
assert response.description == "description_value"
assert response.require_approval is True
assert response.etag == "etag_value"
def test_get_target_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = CloudDeployClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_target), "__call__") as call:
client.get_target()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == cloud_deploy.GetTargetRequest()
@pytest.mark.asyncio
async def test_get_target_async(
transport: str = "grpc_asyncio", request_type=cloud_deploy.GetTargetRequest
):
client = CloudDeployAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_target), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
cloud_deploy.Target(
name="name_value",
target_id="target_id_value",
uid="uid_value",
description="description_value",
require_approval=True,
etag="etag_value",
)
)
response = await client.get_target(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == cloud_deploy.GetTargetRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, cloud_deploy.Target)
assert response.name == "name_value"
assert response.target_id == "target_id_value"
assert response.uid == "uid_value"
assert response.description == "description_value"
assert response.require_approval is True
assert response.etag == "etag_value"
@pytest.mark.asyncio
async def test_get_target_async_from_dict():
await test_get_target_async(request_type=dict)
def test_get_target_field_headers():
client = CloudDeployClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = cloud_deploy.GetTargetRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_target), "__call__") as call:
call.return_value = cloud_deploy.Target()
client.get_target(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_get_target_field_headers_async():
client = CloudDeployAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = cloud_deploy.GetTargetRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_target), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(cloud_deploy.Target())
await client.get_target(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
def test_get_target_flattened():
client = CloudDeployClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_target), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = cloud_deploy.Target()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.get_target(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
def test_get_target_flattened_error():
client = CloudDeployClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.get_target(
cloud_deploy.GetTargetRequest(), name="name_value",
)
@pytest.mark.asyncio
async def test_get_target_flattened_async():
client = CloudDeployAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_target), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = cloud_deploy.Target()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(cloud_deploy.Target())
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.get_target(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_get_target_flattened_error_async():
client = CloudDeployAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.get_target(
cloud_deploy.GetTargetRequest(), name="name_value",
)
@pytest.mark.parametrize("request_type", [cloud_deploy.CreateTargetRequest, dict,])
def test_create_target(request_type, transport: str = "grpc"):
client = CloudDeployClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_target), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/spam")
response = client.create_target(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == cloud_deploy.CreateTargetRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
def test_create_target_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = CloudDeployClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_target), "__call__") as call:
client.create_target()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == cloud_deploy.CreateTargetRequest()
@pytest.mark.asyncio
async def test_create_target_async(
transport: str = "grpc_asyncio", request_type=cloud_deploy.CreateTargetRequest
):
client = CloudDeployAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_target), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
response = await client.create_target(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == cloud_deploy.CreateTargetRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
@pytest.mark.asyncio
async def test_create_target_async_from_dict():
await test_create_target_async(request_type=dict)
def test_create_target_field_headers():
client = CloudDeployClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = cloud_deploy.CreateTargetRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_target), "__call__") as call:
call.return_value = operations_pb2.Operation(name="operations/op")
client.create_target(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_create_target_field_headers_async():
client = CloudDeployAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = cloud_deploy.CreateTargetRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_target), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/op")
)
await client.create_target(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
def test_create_target_flattened():
client = CloudDeployClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_target), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.create_target(
parent="parent_value",
target=cloud_deploy.Target(name="name_value"),
target_id="target_id_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
arg = args[0].target
mock_val = cloud_deploy.Target(name="name_value")
assert arg == mock_val
arg = args[0].target_id
mock_val = "target_id_value"
assert arg == mock_val
def test_create_target_flattened_error():
client = CloudDeployClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.create_target(
cloud_deploy.CreateTargetRequest(),
parent="parent_value",
target=cloud_deploy.Target(name="name_value"),
target_id="target_id_value",
)
@pytest.mark.asyncio
async def test_create_target_flattened_async():
client = CloudDeployAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_target), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.create_target(
parent="parent_value",
target=cloud_deploy.Target(name="name_value"),
target_id="target_id_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
arg = args[0].target
mock_val = cloud_deploy.Target(name="name_value")
assert arg == mock_val
arg = args[0].target_id
mock_val = "target_id_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_create_target_flattened_error_async():
client = CloudDeployAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.create_target(
cloud_deploy.CreateTargetRequest(),
parent="parent_value",
target=cloud_deploy.Target(name="name_value"),
target_id="target_id_value",
)
@pytest.mark.parametrize("request_type", [cloud_deploy.UpdateTargetRequest, dict,])
def test_update_target(request_type, transport: str = "grpc"):
client = CloudDeployClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_target), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/spam")
response = client.update_target(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == cloud_deploy.UpdateTargetRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
def test_update_target_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = CloudDeployClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_target), "__call__") as call:
client.update_target()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == cloud_deploy.UpdateTargetRequest()
@pytest.mark.asyncio
async def test_update_target_async(
transport: str = "grpc_asyncio", request_type=cloud_deploy.UpdateTargetRequest
):
client = CloudDeployAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_target), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
response = await client.update_target(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == cloud_deploy.UpdateTargetRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
@pytest.mark.asyncio
async def test_update_target_async_from_dict():
await test_update_target_async(request_type=dict)
def test_update_target_field_headers():
client = CloudDeployClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = cloud_deploy.UpdateTargetRequest()
request.target.name = "target.name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_target), "__call__") as call:
call.return_value = operations_pb2.Operation(name="operations/op")
client.update_target(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "target.name=target.name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_update_target_field_headers_async():
client = CloudDeployAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = cloud_deploy.UpdateTargetRequest()
request.target.name = "target.name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_target), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/op")
)
await client.update_target(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "target.name=target.name/value",) in kw["metadata"]
def test_update_target_flattened():
client = CloudDeployClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_target), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.update_target(
target=cloud_deploy.Target(name="name_value"),
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].target
mock_val = cloud_deploy.Target(name="name_value")
assert arg == mock_val
arg = args[0].update_mask
mock_val = field_mask_pb2.FieldMask(paths=["paths_value"])
assert arg == mock_val
def test_update_target_flattened_error():
client = CloudDeployClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.update_target(
cloud_deploy.UpdateTargetRequest(),
target=cloud_deploy.Target(name="name_value"),
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
@pytest.mark.asyncio
async def test_update_target_flattened_async():
client = CloudDeployAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_target), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.update_target(
target=cloud_deploy.Target(name="name_value"),
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].target
mock_val = cloud_deploy.Target(name="name_value")
assert arg == mock_val
arg = args[0].update_mask
mock_val = field_mask_pb2.FieldMask(paths=["paths_value"])
assert arg == mock_val
@pytest.mark.asyncio
async def test_update_target_flattened_error_async():
client = CloudDeployAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.update_target(
cloud_deploy.UpdateTargetRequest(),
target=cloud_deploy.Target(name="name_value"),
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
@pytest.mark.parametrize("request_type", [cloud_deploy.DeleteTargetRequest, dict,])
def test_delete_target(request_type, transport: str = "grpc"):
client = CloudDeployClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_target), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/spam")
response = client.delete_target(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == cloud_deploy.DeleteTargetRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
def test_delete_target_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = CloudDeployClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_target), "__call__") as call:
client.delete_target()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == cloud_deploy.DeleteTargetRequest()
@pytest.mark.asyncio
async def test_delete_target_async(
transport: str = "grpc_asyncio", request_type=cloud_deploy.DeleteTargetRequest
):
client = CloudDeployAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_target), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
response = await client.delete_target(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == cloud_deploy.DeleteTargetRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
@pytest.mark.asyncio
async def test_delete_target_async_from_dict():
await test_delete_target_async(request_type=dict)
def test_delete_target_field_headers():
client = CloudDeployClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = cloud_deploy.DeleteTargetRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_target), "__call__") as call:
call.return_value = operations_pb2.Operation(name="operations/op")
client.delete_target(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_delete_target_field_headers_async():
client = CloudDeployAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = cloud_deploy.DeleteTargetRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_target), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/op")
)
await client.delete_target(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
def test_delete_target_flattened():
client = CloudDeployClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_target), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.delete_target(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
def test_delete_target_flattened_error():
client = CloudDeployClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.delete_target(
cloud_deploy.DeleteTargetRequest(), name="name_value",
)
@pytest.mark.asyncio
async def test_delete_target_flattened_async():
client = CloudDeployAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_target), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.delete_target(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_delete_target_flattened_error_async():
client = CloudDeployAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.delete_target(
cloud_deploy.DeleteTargetRequest(), name="name_value",
)
@pytest.mark.parametrize("request_type", [cloud_deploy.ListReleasesRequest, dict,])
def test_list_releases(request_type, transport: str = "grpc"):
client = CloudDeployClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_releases), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = cloud_deploy.ListReleasesResponse(
next_page_token="next_page_token_value", unreachable=["unreachable_value"],
)
response = client.list_releases(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == cloud_deploy.ListReleasesRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListReleasesPager)
assert response.next_page_token == "next_page_token_value"
assert response.unreachable == ["unreachable_value"]
def test_list_releases_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = CloudDeployClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_releases), "__call__") as call:
client.list_releases()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == cloud_deploy.ListReleasesRequest()
@pytest.mark.asyncio
async def test_list_releases_async(
transport: str = "grpc_asyncio", request_type=cloud_deploy.ListReleasesRequest
):
client = CloudDeployAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_releases), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
cloud_deploy.ListReleasesResponse(
next_page_token="next_page_token_value",
unreachable=["unreachable_value"],
)
)
response = await client.list_releases(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == cloud_deploy.ListReleasesRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListReleasesAsyncPager)
assert response.next_page_token == "next_page_token_value"
assert response.unreachable == ["unreachable_value"]
@pytest.mark.asyncio
async def test_list_releases_async_from_dict():
await test_list_releases_async(request_type=dict)
def test_list_releases_field_headers():
client = CloudDeployClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = cloud_deploy.ListReleasesRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_releases), "__call__") as call:
call.return_value = cloud_deploy.ListReleasesResponse()
client.list_releases(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_list_releases_field_headers_async():
client = CloudDeployAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = cloud_deploy.ListReleasesRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_releases), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
cloud_deploy.ListReleasesResponse()
)
await client.list_releases(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
def test_list_releases_flattened():
client = CloudDeployClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_releases), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = cloud_deploy.ListReleasesResponse()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.list_releases(parent="parent_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
def test_list_releases_flattened_error():
client = CloudDeployClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.list_releases(
cloud_deploy.ListReleasesRequest(), parent="parent_value",
)
@pytest.mark.asyncio
async def test_list_releases_flattened_async():
client = CloudDeployAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_releases), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = cloud_deploy.ListReleasesResponse()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
cloud_deploy.ListReleasesResponse()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.list_releases(parent="parent_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_list_releases_flattened_error_async():
client = CloudDeployAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.list_releases(
cloud_deploy.ListReleasesRequest(), parent="parent_value",
)
def test_list_releases_pager(transport_name: str = "grpc"):
client = CloudDeployClient(
credentials=ga_credentials.AnonymousCredentials, transport=transport_name,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_releases), "__call__") as call:
# Set the response to a series of pages.
call.side_effect = (
cloud_deploy.ListReleasesResponse(
releases=[
cloud_deploy.Release(),
cloud_deploy.Release(),
cloud_deploy.Release(),
],
next_page_token="abc",
),
cloud_deploy.ListReleasesResponse(releases=[], next_page_token="def",),
cloud_deploy.ListReleasesResponse(
releases=[cloud_deploy.Release(),], next_page_token="ghi",
),
cloud_deploy.ListReleasesResponse(
releases=[cloud_deploy.Release(), cloud_deploy.Release(),],
),
RuntimeError,
)
metadata = ()
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)),
)
pager = client.list_releases(request={})
assert pager._metadata == metadata
results = [i for i in pager]
assert len(results) == 6
assert all(isinstance(i, cloud_deploy.Release) for i in results)
def test_list_releases_pages(transport_name: str = "grpc"):
client = CloudDeployClient(
credentials=ga_credentials.AnonymousCredentials, transport=transport_name,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_releases), "__call__") as call:
# Set the response to a series of pages.
call.side_effect = (
cloud_deploy.ListReleasesResponse(
releases=[
cloud_deploy.Release(),
cloud_deploy.Release(),
cloud_deploy.Release(),
],
next_page_token="abc",
),
cloud_deploy.ListReleasesResponse(releases=[], next_page_token="def",),
cloud_deploy.ListReleasesResponse(
releases=[cloud_deploy.Release(),], next_page_token="ghi",
),
cloud_deploy.ListReleasesResponse(
releases=[cloud_deploy.Release(), cloud_deploy.Release(),],
),
RuntimeError,
)
pages = list(client.list_releases(request={}).pages)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@pytest.mark.asyncio
async def test_list_releases_async_pager():
client = CloudDeployAsyncClient(credentials=ga_credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_releases), "__call__", new_callable=mock.AsyncMock
) as call:
# Set the response to a series of pages.
call.side_effect = (
cloud_deploy.ListReleasesResponse(
releases=[
cloud_deploy.Release(),
cloud_deploy.Release(),
cloud_deploy.Release(),
],
next_page_token="abc",
),
cloud_deploy.ListReleasesResponse(releases=[], next_page_token="def",),
cloud_deploy.ListReleasesResponse(
releases=[cloud_deploy.Release(),], next_page_token="ghi",
),
cloud_deploy.ListReleasesResponse(
releases=[cloud_deploy.Release(), cloud_deploy.Release(),],
),
RuntimeError,
)
async_pager = await client.list_releases(request={},)
assert async_pager.next_page_token == "abc"
responses = []
async for response in async_pager:
responses.append(response)
assert len(responses) == 6
assert all(isinstance(i, cloud_deploy.Release) for i in responses)
@pytest.mark.asyncio
async def test_list_releases_async_pages():
client = CloudDeployAsyncClient(credentials=ga_credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_releases), "__call__", new_callable=mock.AsyncMock
) as call:
# Set the response to a series of pages.
call.side_effect = (
cloud_deploy.ListReleasesResponse(
releases=[
cloud_deploy.Release(),
cloud_deploy.Release(),
cloud_deploy.Release(),
],
next_page_token="abc",
),
cloud_deploy.ListReleasesResponse(releases=[], next_page_token="def",),
cloud_deploy.ListReleasesResponse(
releases=[cloud_deploy.Release(),], next_page_token="ghi",
),
cloud_deploy.ListReleasesResponse(
releases=[cloud_deploy.Release(), cloud_deploy.Release(),],
),
RuntimeError,
)
pages = []
async for page_ in (await client.list_releases(request={})).pages:
pages.append(page_)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@pytest.mark.parametrize("request_type", [cloud_deploy.GetReleaseRequest, dict,])
def test_get_release(request_type, transport: str = "grpc"):
client = CloudDeployClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_release), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = cloud_deploy.Release(
name="name_value",
uid="uid_value",
description="description_value",
skaffold_config_uri="skaffold_config_uri_value",
skaffold_config_path="skaffold_config_path_value",
render_state=cloud_deploy.Release.RenderState.SUCCEEDED,
etag="etag_value",
skaffold_version="skaffold_version_value",
)
response = client.get_release(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == cloud_deploy.GetReleaseRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, cloud_deploy.Release)
assert response.name == "name_value"
assert response.uid == "uid_value"
assert response.description == "description_value"
assert response.skaffold_config_uri == "skaffold_config_uri_value"
assert response.skaffold_config_path == "skaffold_config_path_value"
assert response.render_state == cloud_deploy.Release.RenderState.SUCCEEDED
assert response.etag == "etag_value"
assert response.skaffold_version == "skaffold_version_value"
def test_get_release_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = CloudDeployClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_release), "__call__") as call:
client.get_release()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == cloud_deploy.GetReleaseRequest()
@pytest.mark.asyncio
async def test_get_release_async(
transport: str = "grpc_asyncio", request_type=cloud_deploy.GetReleaseRequest
):
client = CloudDeployAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_release), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
cloud_deploy.Release(
name="name_value",
uid="uid_value",
description="description_value",
skaffold_config_uri="skaffold_config_uri_value",
skaffold_config_path="skaffold_config_path_value",
render_state=cloud_deploy.Release.RenderState.SUCCEEDED,
etag="etag_value",
skaffold_version="skaffold_version_value",
)
)
response = await client.get_release(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == cloud_deploy.GetReleaseRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, cloud_deploy.Release)
assert response.name == "name_value"
assert response.uid == "uid_value"
assert response.description == "description_value"
assert response.skaffold_config_uri == "skaffold_config_uri_value"
assert response.skaffold_config_path == "skaffold_config_path_value"
assert response.render_state == cloud_deploy.Release.RenderState.SUCCEEDED
assert response.etag == "etag_value"
assert response.skaffold_version == "skaffold_version_value"
@pytest.mark.asyncio
async def test_get_release_async_from_dict():
await test_get_release_async(request_type=dict)
def test_get_release_field_headers():
client = CloudDeployClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = cloud_deploy.GetReleaseRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_release), "__call__") as call:
call.return_value = cloud_deploy.Release()
client.get_release(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_get_release_field_headers_async():
client = CloudDeployAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = cloud_deploy.GetReleaseRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_release), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
cloud_deploy.Release()
)
await client.get_release(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
def test_get_release_flattened():
client = CloudDeployClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_release), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = cloud_deploy.Release()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.get_release(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
def test_get_release_flattened_error():
client = CloudDeployClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.get_release(
cloud_deploy.GetReleaseRequest(), name="name_value",
)
@pytest.mark.asyncio
async def test_get_release_flattened_async():
client = CloudDeployAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_release), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = cloud_deploy.Release()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
cloud_deploy.Release()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.get_release(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_get_release_flattened_error_async():
client = CloudDeployAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.get_release(
cloud_deploy.GetReleaseRequest(), name="name_value",
)
@pytest.mark.parametrize("request_type", [cloud_deploy.CreateReleaseRequest, dict,])
def test_create_release(request_type, transport: str = "grpc"):
client = CloudDeployClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_release), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/spam")
response = client.create_release(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == cloud_deploy.CreateReleaseRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
def test_create_release_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = CloudDeployClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_release), "__call__") as call:
client.create_release()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == cloud_deploy.CreateReleaseRequest()
@pytest.mark.asyncio
async def test_create_release_async(
transport: str = "grpc_asyncio", request_type=cloud_deploy.CreateReleaseRequest
):
client = CloudDeployAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_release), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
response = await client.create_release(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == cloud_deploy.CreateReleaseRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
@pytest.mark.asyncio
async def test_create_release_async_from_dict():
await test_create_release_async(request_type=dict)
def test_create_release_field_headers():
client = CloudDeployClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = cloud_deploy.CreateReleaseRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_release), "__call__") as call:
call.return_value = operations_pb2.Operation(name="operations/op")
client.create_release(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_create_release_field_headers_async():
client = CloudDeployAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = cloud_deploy.CreateReleaseRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_release), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/op")
)
await client.create_release(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
def test_create_release_flattened():
client = CloudDeployClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_release), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.create_release(
parent="parent_value",
release=cloud_deploy.Release(name="name_value"),
release_id="release_id_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
arg = args[0].release
mock_val = cloud_deploy.Release(name="name_value")
assert arg == mock_val
arg = args[0].release_id
mock_val = "release_id_value"
assert arg == mock_val
def test_create_release_flattened_error():
client = CloudDeployClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.create_release(
cloud_deploy.CreateReleaseRequest(),
parent="parent_value",
release=cloud_deploy.Release(name="name_value"),
release_id="release_id_value",
)
@pytest.mark.asyncio
async def test_create_release_flattened_async():
client = CloudDeployAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_release), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.create_release(
parent="parent_value",
release=cloud_deploy.Release(name="name_value"),
release_id="release_id_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
arg = args[0].release
mock_val = cloud_deploy.Release(name="name_value")
assert arg == mock_val
arg = args[0].release_id
mock_val = "release_id_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_create_release_flattened_error_async():
client = CloudDeployAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.create_release(
cloud_deploy.CreateReleaseRequest(),
parent="parent_value",
release=cloud_deploy.Release(name="name_value"),
release_id="release_id_value",
)
@pytest.mark.parametrize("request_type", [cloud_deploy.ApproveRolloutRequest, dict,])
def test_approve_rollout(request_type, transport: str = "grpc"):
client = CloudDeployClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.approve_rollout), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = cloud_deploy.ApproveRolloutResponse()
response = client.approve_rollout(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == cloud_deploy.ApproveRolloutRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, cloud_deploy.ApproveRolloutResponse)
def test_approve_rollout_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = CloudDeployClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.approve_rollout), "__call__") as call:
client.approve_rollout()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == cloud_deploy.ApproveRolloutRequest()
@pytest.mark.asyncio
async def test_approve_rollout_async(
transport: str = "grpc_asyncio", request_type=cloud_deploy.ApproveRolloutRequest
):
client = CloudDeployAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.approve_rollout), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
cloud_deploy.ApproveRolloutResponse()
)
response = await client.approve_rollout(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == cloud_deploy.ApproveRolloutRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, cloud_deploy.ApproveRolloutResponse)
@pytest.mark.asyncio
async def test_approve_rollout_async_from_dict():
await test_approve_rollout_async(request_type=dict)
def test_approve_rollout_field_headers():
client = CloudDeployClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = cloud_deploy.ApproveRolloutRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.approve_rollout), "__call__") as call:
call.return_value = cloud_deploy.ApproveRolloutResponse()
client.approve_rollout(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_approve_rollout_field_headers_async():
client = CloudDeployAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = cloud_deploy.ApproveRolloutRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.approve_rollout), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
cloud_deploy.ApproveRolloutResponse()
)
await client.approve_rollout(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
def test_approve_rollout_flattened():
client = CloudDeployClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.approve_rollout), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = cloud_deploy.ApproveRolloutResponse()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.approve_rollout(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
def test_approve_rollout_flattened_error():
client = CloudDeployClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.approve_rollout(
cloud_deploy.ApproveRolloutRequest(), name="name_value",
)
@pytest.mark.asyncio
async def test_approve_rollout_flattened_async():
client = CloudDeployAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.approve_rollout), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = cloud_deploy.ApproveRolloutResponse()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
cloud_deploy.ApproveRolloutResponse()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.approve_rollout(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_approve_rollout_flattened_error_async():
client = CloudDeployAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.approve_rollout(
cloud_deploy.ApproveRolloutRequest(), name="name_value",
)
@pytest.mark.parametrize("request_type", [cloud_deploy.ListRolloutsRequest, dict,])
def test_list_rollouts(request_type, transport: str = "grpc"):
client = CloudDeployClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_rollouts), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = cloud_deploy.ListRolloutsResponse(
next_page_token="next_page_token_value", unreachable=["unreachable_value"],
)
response = client.list_rollouts(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == cloud_deploy.ListRolloutsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListRolloutsPager)
assert response.next_page_token == "next_page_token_value"
assert response.unreachable == ["unreachable_value"]
def test_list_rollouts_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = CloudDeployClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_rollouts), "__call__") as call:
client.list_rollouts()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == cloud_deploy.ListRolloutsRequest()
@pytest.mark.asyncio
async def test_list_rollouts_async(
transport: str = "grpc_asyncio", request_type=cloud_deploy.ListRolloutsRequest
):
client = CloudDeployAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_rollouts), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
cloud_deploy.ListRolloutsResponse(
next_page_token="next_page_token_value",
unreachable=["unreachable_value"],
)
)
response = await client.list_rollouts(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == cloud_deploy.ListRolloutsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListRolloutsAsyncPager)
assert response.next_page_token == "next_page_token_value"
assert response.unreachable == ["unreachable_value"]
@pytest.mark.asyncio
async def test_list_rollouts_async_from_dict():
await test_list_rollouts_async(request_type=dict)
def test_list_rollouts_field_headers():
client = CloudDeployClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = cloud_deploy.ListRolloutsRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_rollouts), "__call__") as call:
call.return_value = cloud_deploy.ListRolloutsResponse()
client.list_rollouts(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_list_rollouts_field_headers_async():
client = CloudDeployAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = cloud_deploy.ListRolloutsRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_rollouts), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
cloud_deploy.ListRolloutsResponse()
)
await client.list_rollouts(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
def test_list_rollouts_flattened():
client = CloudDeployClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_rollouts), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = cloud_deploy.ListRolloutsResponse()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.list_rollouts(parent="parent_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
def test_list_rollouts_flattened_error():
client = CloudDeployClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.list_rollouts(
cloud_deploy.ListRolloutsRequest(), parent="parent_value",
)
@pytest.mark.asyncio
async def test_list_rollouts_flattened_async():
client = CloudDeployAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_rollouts), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = cloud_deploy.ListRolloutsResponse()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
cloud_deploy.ListRolloutsResponse()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.list_rollouts(parent="parent_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_list_rollouts_flattened_error_async():
client = CloudDeployAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.list_rollouts(
cloud_deploy.ListRolloutsRequest(), parent="parent_value",
)
def test_list_rollouts_pager(transport_name: str = "grpc"):
client = CloudDeployClient(
credentials=ga_credentials.AnonymousCredentials, transport=transport_name,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_rollouts), "__call__") as call:
# Set the response to a series of pages.
call.side_effect = (
cloud_deploy.ListRolloutsResponse(
rollouts=[
cloud_deploy.Rollout(),
cloud_deploy.Rollout(),
cloud_deploy.Rollout(),
],
next_page_token="abc",
),
cloud_deploy.ListRolloutsResponse(rollouts=[], next_page_token="def",),
cloud_deploy.ListRolloutsResponse(
rollouts=[cloud_deploy.Rollout(),], next_page_token="ghi",
),
cloud_deploy.ListRolloutsResponse(
rollouts=[cloud_deploy.Rollout(), cloud_deploy.Rollout(),],
),
RuntimeError,
)
metadata = ()
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)),
)
pager = client.list_rollouts(request={})
assert pager._metadata == metadata
results = [i for i in pager]
assert len(results) == 6
assert all(isinstance(i, cloud_deploy.Rollout) for i in results)
def test_list_rollouts_pages(transport_name: str = "grpc"):
client = CloudDeployClient(
credentials=ga_credentials.AnonymousCredentials, transport=transport_name,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_rollouts), "__call__") as call:
# Set the response to a series of pages.
call.side_effect = (
cloud_deploy.ListRolloutsResponse(
rollouts=[
cloud_deploy.Rollout(),
cloud_deploy.Rollout(),
cloud_deploy.Rollout(),
],
next_page_token="abc",
),
cloud_deploy.ListRolloutsResponse(rollouts=[], next_page_token="def",),
cloud_deploy.ListRolloutsResponse(
rollouts=[cloud_deploy.Rollout(),], next_page_token="ghi",
),
cloud_deploy.ListRolloutsResponse(
rollouts=[cloud_deploy.Rollout(), cloud_deploy.Rollout(),],
),
RuntimeError,
)
pages = list(client.list_rollouts(request={}).pages)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@pytest.mark.asyncio
async def test_list_rollouts_async_pager():
client = CloudDeployAsyncClient(credentials=ga_credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_rollouts), "__call__", new_callable=mock.AsyncMock
) as call:
# Set the response to a series of pages.
call.side_effect = (
cloud_deploy.ListRolloutsResponse(
rollouts=[
cloud_deploy.Rollout(),
cloud_deploy.Rollout(),
cloud_deploy.Rollout(),
],
next_page_token="abc",
),
cloud_deploy.ListRolloutsResponse(rollouts=[], next_page_token="def",),
cloud_deploy.ListRolloutsResponse(
rollouts=[cloud_deploy.Rollout(),], next_page_token="ghi",
),
cloud_deploy.ListRolloutsResponse(
rollouts=[cloud_deploy.Rollout(), cloud_deploy.Rollout(),],
),
RuntimeError,
)
async_pager = await client.list_rollouts(request={},)
assert async_pager.next_page_token == "abc"
responses = []
async for response in async_pager:
responses.append(response)
assert len(responses) == 6
assert all(isinstance(i, cloud_deploy.Rollout) for i in responses)
@pytest.mark.asyncio
async def test_list_rollouts_async_pages():
client = CloudDeployAsyncClient(credentials=ga_credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_rollouts), "__call__", new_callable=mock.AsyncMock
) as call:
# Set the response to a series of pages.
call.side_effect = (
cloud_deploy.ListRolloutsResponse(
rollouts=[
cloud_deploy.Rollout(),
cloud_deploy.Rollout(),
cloud_deploy.Rollout(),
],
next_page_token="abc",
),
cloud_deploy.ListRolloutsResponse(rollouts=[], next_page_token="def",),
cloud_deploy.ListRolloutsResponse(
rollouts=[cloud_deploy.Rollout(),], next_page_token="ghi",
),
cloud_deploy.ListRolloutsResponse(
rollouts=[cloud_deploy.Rollout(), cloud_deploy.Rollout(),],
),
RuntimeError,
)
pages = []
async for page_ in (await client.list_rollouts(request={})).pages:
pages.append(page_)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@pytest.mark.parametrize("request_type", [cloud_deploy.GetRolloutRequest, dict,])
def test_get_rollout(request_type, transport: str = "grpc"):
client = CloudDeployClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_rollout), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = cloud_deploy.Rollout(
name="name_value",
uid="uid_value",
description="description_value",
target_id="target_id_value",
approval_state=cloud_deploy.Rollout.ApprovalState.NEEDS_APPROVAL,
state=cloud_deploy.Rollout.State.SUCCEEDED,
failure_reason="failure_reason_value",
deploying_build="deploying_build_value",
etag="etag_value",
)
response = client.get_rollout(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == cloud_deploy.GetRolloutRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, cloud_deploy.Rollout)
assert response.name == "name_value"
assert response.uid == "uid_value"
assert response.description == "description_value"
assert response.target_id == "target_id_value"
assert response.approval_state == cloud_deploy.Rollout.ApprovalState.NEEDS_APPROVAL
assert response.state == cloud_deploy.Rollout.State.SUCCEEDED
assert response.failure_reason == "failure_reason_value"
assert response.deploying_build == "deploying_build_value"
assert response.etag == "etag_value"
def test_get_rollout_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = CloudDeployClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_rollout), "__call__") as call:
client.get_rollout()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == cloud_deploy.GetRolloutRequest()
@pytest.mark.asyncio
async def test_get_rollout_async(
transport: str = "grpc_asyncio", request_type=cloud_deploy.GetRolloutRequest
):
client = CloudDeployAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_rollout), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
cloud_deploy.Rollout(
name="name_value",
uid="uid_value",
description="description_value",
target_id="target_id_value",
approval_state=cloud_deploy.Rollout.ApprovalState.NEEDS_APPROVAL,
state=cloud_deploy.Rollout.State.SUCCEEDED,
failure_reason="failure_reason_value",
deploying_build="deploying_build_value",
etag="etag_value",
)
)
response = await client.get_rollout(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == cloud_deploy.GetRolloutRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, cloud_deploy.Rollout)
assert response.name == "name_value"
assert response.uid == "uid_value"
assert response.description == "description_value"
assert response.target_id == "target_id_value"
assert response.approval_state == cloud_deploy.Rollout.ApprovalState.NEEDS_APPROVAL
assert response.state == cloud_deploy.Rollout.State.SUCCEEDED
assert response.failure_reason == "failure_reason_value"
assert response.deploying_build == "deploying_build_value"
assert response.etag == "etag_value"
@pytest.mark.asyncio
async def test_get_rollout_async_from_dict():
await test_get_rollout_async(request_type=dict)
def test_get_rollout_field_headers():
client = CloudDeployClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = cloud_deploy.GetRolloutRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_rollout), "__call__") as call:
call.return_value = cloud_deploy.Rollout()
client.get_rollout(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_get_rollout_field_headers_async():
client = CloudDeployAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = cloud_deploy.GetRolloutRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_rollout), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
cloud_deploy.Rollout()
)
await client.get_rollout(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
def test_get_rollout_flattened():
client = CloudDeployClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_rollout), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = cloud_deploy.Rollout()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.get_rollout(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
def test_get_rollout_flattened_error():
client = CloudDeployClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.get_rollout(
cloud_deploy.GetRolloutRequest(), name="name_value",
)
@pytest.mark.asyncio
async def test_get_rollout_flattened_async():
client = CloudDeployAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_rollout), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = cloud_deploy.Rollout()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
cloud_deploy.Rollout()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.get_rollout(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_get_rollout_flattened_error_async():
client = CloudDeployAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.get_rollout(
cloud_deploy.GetRolloutRequest(), name="name_value",
)
@pytest.mark.parametrize("request_type", [cloud_deploy.CreateRolloutRequest, dict,])
def test_create_rollout(request_type, transport: str = "grpc"):
client = CloudDeployClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_rollout), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/spam")
response = client.create_rollout(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == cloud_deploy.CreateRolloutRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
def test_create_rollout_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = CloudDeployClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_rollout), "__call__") as call:
client.create_rollout()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == cloud_deploy.CreateRolloutRequest()
@pytest.mark.asyncio
async def test_create_rollout_async(
transport: str = "grpc_asyncio", request_type=cloud_deploy.CreateRolloutRequest
):
client = CloudDeployAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_rollout), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
response = await client.create_rollout(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == cloud_deploy.CreateRolloutRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
@pytest.mark.asyncio
async def test_create_rollout_async_from_dict():
await test_create_rollout_async(request_type=dict)
def test_create_rollout_field_headers():
client = CloudDeployClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = cloud_deploy.CreateRolloutRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_rollout), "__call__") as call:
call.return_value = operations_pb2.Operation(name="operations/op")
client.create_rollout(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_create_rollout_field_headers_async():
client = CloudDeployAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = cloud_deploy.CreateRolloutRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_rollout), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/op")
)
await client.create_rollout(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
def test_create_rollout_flattened():
client = CloudDeployClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_rollout), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.create_rollout(
parent="parent_value",
rollout=cloud_deploy.Rollout(name="name_value"),
rollout_id="rollout_id_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
arg = args[0].rollout
mock_val = cloud_deploy.Rollout(name="name_value")
assert arg == mock_val
arg = args[0].rollout_id
mock_val = "rollout_id_value"
assert arg == mock_val
def test_create_rollout_flattened_error():
client = CloudDeployClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.create_rollout(
cloud_deploy.CreateRolloutRequest(),
parent="parent_value",
rollout=cloud_deploy.Rollout(name="name_value"),
rollout_id="rollout_id_value",
)
@pytest.mark.asyncio
async def test_create_rollout_flattened_async():
client = CloudDeployAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_rollout), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.create_rollout(
parent="parent_value",
rollout=cloud_deploy.Rollout(name="name_value"),
rollout_id="rollout_id_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
arg = args[0].rollout
mock_val = cloud_deploy.Rollout(name="name_value")
assert arg == mock_val
arg = args[0].rollout_id
mock_val = "rollout_id_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_create_rollout_flattened_error_async():
client = CloudDeployAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.create_rollout(
cloud_deploy.CreateRolloutRequest(),
parent="parent_value",
rollout=cloud_deploy.Rollout(name="name_value"),
rollout_id="rollout_id_value",
)
@pytest.mark.parametrize("request_type", [cloud_deploy.GetConfigRequest, dict,])
def test_get_config(request_type, transport: str = "grpc"):
client = CloudDeployClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_config), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = cloud_deploy.Config(
name="name_value",
default_skaffold_version="default_skaffold_version_value",
)
response = client.get_config(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == cloud_deploy.GetConfigRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, cloud_deploy.Config)
assert response.name == "name_value"
assert response.default_skaffold_version == "default_skaffold_version_value"
def test_get_config_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = CloudDeployClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_config), "__call__") as call:
client.get_config()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == cloud_deploy.GetConfigRequest()
@pytest.mark.asyncio
async def test_get_config_async(
transport: str = "grpc_asyncio", request_type=cloud_deploy.GetConfigRequest
):
client = CloudDeployAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_config), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
cloud_deploy.Config(
name="name_value",
default_skaffold_version="default_skaffold_version_value",
)
)
response = await client.get_config(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == cloud_deploy.GetConfigRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, cloud_deploy.Config)
assert response.name == "name_value"
assert response.default_skaffold_version == "default_skaffold_version_value"
@pytest.mark.asyncio
async def test_get_config_async_from_dict():
await test_get_config_async(request_type=dict)
def test_get_config_field_headers():
client = CloudDeployClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = cloud_deploy.GetConfigRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_config), "__call__") as call:
call.return_value = cloud_deploy.Config()
client.get_config(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_get_config_field_headers_async():
client = CloudDeployAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = cloud_deploy.GetConfigRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_config), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(cloud_deploy.Config())
await client.get_config(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
def test_get_config_flattened():
client = CloudDeployClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_config), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = cloud_deploy.Config()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.get_config(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
def test_get_config_flattened_error():
client = CloudDeployClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.get_config(
cloud_deploy.GetConfigRequest(), name="name_value",
)
@pytest.mark.asyncio
async def test_get_config_flattened_async():
client = CloudDeployAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_config), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = cloud_deploy.Config()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(cloud_deploy.Config())
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.get_config(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_get_config_flattened_error_async():
client = CloudDeployAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.get_config(
cloud_deploy.GetConfigRequest(), name="name_value",
)
def test_credentials_transport_error():
# It is an error to provide credentials and a transport instance.
transport = transports.CloudDeployGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = CloudDeployClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# It is an error to provide a credentials file and a transport instance.
transport = transports.CloudDeployGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = CloudDeployClient(
client_options={"credentials_file": "credentials.json"},
transport=transport,
)
# It is an error to provide an api_key and a transport instance.
transport = transports.CloudDeployGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
options = client_options.ClientOptions()
options.api_key = "api_key"
with pytest.raises(ValueError):
client = CloudDeployClient(client_options=options, transport=transport,)
# It is an error to provide an api_key and a credential.
options = mock.Mock()
options.api_key = "api_key"
with pytest.raises(ValueError):
client = CloudDeployClient(
client_options=options, credentials=ga_credentials.AnonymousCredentials()
)
# It is an error to provide scopes and a transport instance.
transport = transports.CloudDeployGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = CloudDeployClient(
client_options={"scopes": ["1", "2"]}, transport=transport,
)
def test_transport_instance():
# A client may be instantiated with a custom transport instance.
transport = transports.CloudDeployGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
client = CloudDeployClient(transport=transport)
assert client.transport is transport
def test_transport_get_channel():
# A client may be instantiated with a custom transport instance.
transport = transports.CloudDeployGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
channel = transport.grpc_channel
assert channel
transport = transports.CloudDeployGrpcAsyncIOTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
channel = transport.grpc_channel
assert channel
@pytest.mark.parametrize(
"transport_class",
[transports.CloudDeployGrpcTransport, transports.CloudDeployGrpcAsyncIOTransport,],
)
def test_transport_adc(transport_class):
# Test default credentials are used if not provided.
with mock.patch.object(google.auth, "default") as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class()
adc.assert_called_once()
def test_transport_grpc_default():
# A client should use the gRPC transport by default.
client = CloudDeployClient(credentials=ga_credentials.AnonymousCredentials(),)
assert isinstance(client.transport, transports.CloudDeployGrpcTransport,)
def test_cloud_deploy_base_transport_error():
# Passing both a credentials object and credentials_file should raise an error
with pytest.raises(core_exceptions.DuplicateCredentialArgs):
transport = transports.CloudDeployTransport(
credentials=ga_credentials.AnonymousCredentials(),
credentials_file="credentials.json",
)
def test_cloud_deploy_base_transport():
# Instantiate the base transport.
with mock.patch(
"google.cloud.deploy_v1.services.cloud_deploy.transports.CloudDeployTransport.__init__"
) as Transport:
Transport.return_value = None
transport = transports.CloudDeployTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
# Every method on the transport should just blindly
# raise NotImplementedError.
methods = (
"list_delivery_pipelines",
"get_delivery_pipeline",
"create_delivery_pipeline",
"update_delivery_pipeline",
"delete_delivery_pipeline",
"list_targets",
"get_target",
"create_target",
"update_target",
"delete_target",
"list_releases",
"get_release",
"create_release",
"approve_rollout",
"list_rollouts",
"get_rollout",
"create_rollout",
"get_config",
)
for method in methods:
with pytest.raises(NotImplementedError):
getattr(transport, method)(request=object())
with pytest.raises(NotImplementedError):
transport.close()
# Additionally, the LRO client (a property) should
# also raise NotImplementedError
with pytest.raises(NotImplementedError):
transport.operations_client
def test_cloud_deploy_base_transport_with_credentials_file():
# Instantiate the base transport with a credentials file
with mock.patch.object(
google.auth, "load_credentials_from_file", autospec=True
) as load_creds, mock.patch(
"google.cloud.deploy_v1.services.cloud_deploy.transports.CloudDeployTransport._prep_wrapped_messages"
) as Transport:
Transport.return_value = None
load_creds.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.CloudDeployTransport(
credentials_file="credentials.json", quota_project_id="octopus",
)
load_creds.assert_called_once_with(
"credentials.json",
scopes=None,
default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
quota_project_id="octopus",
)
def test_cloud_deploy_base_transport_with_adc():
# Test the default credentials are used if credentials and credentials_file are None.
with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch(
"google.cloud.deploy_v1.services.cloud_deploy.transports.CloudDeployTransport._prep_wrapped_messages"
) as Transport:
Transport.return_value = None
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.CloudDeployTransport()
adc.assert_called_once()
def test_cloud_deploy_auth_adc():
# If no credentials are provided, we should use ADC credentials.
with mock.patch.object(google.auth, "default", autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
CloudDeployClient()
adc.assert_called_once_with(
scopes=None,
default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
quota_project_id=None,
)
@pytest.mark.parametrize(
"transport_class",
[transports.CloudDeployGrpcTransport, transports.CloudDeployGrpcAsyncIOTransport,],
)
def test_cloud_deploy_transport_auth_adc(transport_class):
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(google.auth, "default", autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class(quota_project_id="octopus", scopes=["1", "2"])
adc.assert_called_once_with(
scopes=["1", "2"],
default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
quota_project_id="octopus",
)
@pytest.mark.parametrize(
"transport_class,grpc_helpers",
[
(transports.CloudDeployGrpcTransport, grpc_helpers),
(transports.CloudDeployGrpcAsyncIOTransport, grpc_helpers_async),
],
)
def test_cloud_deploy_transport_create_channel(transport_class, grpc_helpers):
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(
google.auth, "default", autospec=True
) as adc, mock.patch.object(
grpc_helpers, "create_channel", autospec=True
) as create_channel:
creds = ga_credentials.AnonymousCredentials()
adc.return_value = (creds, None)
transport_class(quota_project_id="octopus", scopes=["1", "2"])
create_channel.assert_called_with(
"clouddeploy.googleapis.com:443",
credentials=creds,
credentials_file=None,
quota_project_id="octopus",
default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
scopes=["1", "2"],
default_host="clouddeploy.googleapis.com",
ssl_credentials=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
@pytest.mark.parametrize(
"transport_class",
[transports.CloudDeployGrpcTransport, transports.CloudDeployGrpcAsyncIOTransport],
)
def test_cloud_deploy_grpc_transport_client_cert_source_for_mtls(transport_class):
cred = ga_credentials.AnonymousCredentials()
# Check ssl_channel_credentials is used if provided.
with mock.patch.object(transport_class, "create_channel") as mock_create_channel:
mock_ssl_channel_creds = mock.Mock()
transport_class(
host="squid.clam.whelk",
credentials=cred,
ssl_channel_credentials=mock_ssl_channel_creds,
)
mock_create_channel.assert_called_once_with(
"squid.clam.whelk:443",
credentials=cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_channel_creds,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls
# is used.
with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()):
with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred:
transport_class(
credentials=cred,
client_cert_source_for_mtls=client_cert_source_callback,
)
expected_cert, expected_key = client_cert_source_callback()
mock_ssl_cred.assert_called_once_with(
certificate_chain=expected_cert, private_key=expected_key
)
def test_cloud_deploy_host_no_port():
client = CloudDeployClient(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(
api_endpoint="clouddeploy.googleapis.com"
),
)
assert client.transport._host == "clouddeploy.googleapis.com:443"
def test_cloud_deploy_host_with_port():
client = CloudDeployClient(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(
api_endpoint="clouddeploy.googleapis.com:8000"
),
)
assert client.transport._host == "clouddeploy.googleapis.com:8000"
def test_cloud_deploy_grpc_transport_channel():
channel = grpc.secure_channel("http://localhost/", grpc.local_channel_credentials())
# Check that channel is used if provided.
transport = transports.CloudDeployGrpcTransport(
host="squid.clam.whelk", channel=channel,
)
assert transport.grpc_channel == channel
assert transport._host == "squid.clam.whelk:443"
assert transport._ssl_channel_credentials == None
def test_cloud_deploy_grpc_asyncio_transport_channel():
channel = aio.secure_channel("http://localhost/", grpc.local_channel_credentials())
# Check that channel is used if provided.
transport = transports.CloudDeployGrpcAsyncIOTransport(
host="squid.clam.whelk", channel=channel,
)
assert transport.grpc_channel == channel
assert transport._host == "squid.clam.whelk:443"
assert transport._ssl_channel_credentials == None
# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are
# removed from grpc/grpc_asyncio transport constructor.
@pytest.mark.parametrize(
"transport_class",
[transports.CloudDeployGrpcTransport, transports.CloudDeployGrpcAsyncIOTransport],
)
def test_cloud_deploy_transport_channel_mtls_with_client_cert_source(transport_class):
with mock.patch(
"grpc.ssl_channel_credentials", autospec=True
) as grpc_ssl_channel_cred:
with mock.patch.object(
transport_class, "create_channel"
) as grpc_create_channel:
mock_ssl_cred = mock.Mock()
grpc_ssl_channel_cred.return_value = mock_ssl_cred
mock_grpc_channel = mock.Mock()
grpc_create_channel.return_value = mock_grpc_channel
cred = ga_credentials.AnonymousCredentials()
with pytest.warns(DeprecationWarning):
with mock.patch.object(google.auth, "default") as adc:
adc.return_value = (cred, None)
transport = transport_class(
host="squid.clam.whelk",
api_mtls_endpoint="mtls.squid.clam.whelk",
client_cert_source=client_cert_source_callback,
)
adc.assert_called_once()
grpc_ssl_channel_cred.assert_called_once_with(
certificate_chain=b"cert bytes", private_key=b"key bytes"
)
grpc_create_channel.assert_called_once_with(
"mtls.squid.clam.whelk:443",
credentials=cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_cred,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
assert transport.grpc_channel == mock_grpc_channel
assert transport._ssl_channel_credentials == mock_ssl_cred
# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are
# removed from grpc/grpc_asyncio transport constructor.
@pytest.mark.parametrize(
"transport_class",
[transports.CloudDeployGrpcTransport, transports.CloudDeployGrpcAsyncIOTransport],
)
def test_cloud_deploy_transport_channel_mtls_with_adc(transport_class):
mock_ssl_cred = mock.Mock()
with mock.patch.multiple(
"google.auth.transport.grpc.SslCredentials",
__init__=mock.Mock(return_value=None),
ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred),
):
with mock.patch.object(
transport_class, "create_channel"
) as grpc_create_channel:
mock_grpc_channel = mock.Mock()
grpc_create_channel.return_value = mock_grpc_channel
mock_cred = mock.Mock()
with pytest.warns(DeprecationWarning):
transport = transport_class(
host="squid.clam.whelk",
credentials=mock_cred,
api_mtls_endpoint="mtls.squid.clam.whelk",
client_cert_source=None,
)
grpc_create_channel.assert_called_once_with(
"mtls.squid.clam.whelk:443",
credentials=mock_cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_cred,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
assert transport.grpc_channel == mock_grpc_channel
def test_cloud_deploy_grpc_lro_client():
client = CloudDeployClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
transport = client.transport
# Ensure that we have a api-core operations client.
assert isinstance(transport.operations_client, operations_v1.OperationsClient,)
# Ensure that subsequent calls to the property send the exact same object.
assert transport.operations_client is transport.operations_client
def test_cloud_deploy_grpc_lro_async_client():
client = CloudDeployAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc_asyncio",
)
transport = client.transport
# Ensure that we have a api-core operations client.
assert isinstance(transport.operations_client, operations_v1.OperationsAsyncClient,)
# Ensure that subsequent calls to the property send the exact same object.
assert transport.operations_client is transport.operations_client
def test_build_path():
project = "squid"
location = "clam"
build = "whelk"
expected = "projects/{project}/locations/{location}/builds/{build}".format(
project=project, location=location, build=build,
)
actual = CloudDeployClient.build_path(project, location, build)
assert expected == actual
def test_parse_build_path():
expected = {
"project": "octopus",
"location": "oyster",
"build": "nudibranch",
}
path = CloudDeployClient.build_path(**expected)
# Check that the path construction is reversible.
actual = CloudDeployClient.parse_build_path(path)
assert expected == actual
def test_cluster_path():
project = "cuttlefish"
location = "mussel"
cluster = "winkle"
expected = "projects/{project}/locations/{location}/clusters/{cluster}".format(
project=project, location=location, cluster=cluster,
)
actual = CloudDeployClient.cluster_path(project, location, cluster)
assert expected == actual
def test_parse_cluster_path():
expected = {
"project": "nautilus",
"location": "scallop",
"cluster": "abalone",
}
path = CloudDeployClient.cluster_path(**expected)
# Check that the path construction is reversible.
actual = CloudDeployClient.parse_cluster_path(path)
assert expected == actual
def test_config_path():
project = "squid"
location = "clam"
expected = "projects/{project}/locations/{location}/config".format(
project=project, location=location,
)
actual = CloudDeployClient.config_path(project, location)
assert expected == actual
def test_parse_config_path():
expected = {
"project": "whelk",
"location": "octopus",
}
path = CloudDeployClient.config_path(**expected)
# Check that the path construction is reversible.
actual = CloudDeployClient.parse_config_path(path)
assert expected == actual
def test_delivery_pipeline_path():
project = "oyster"
location = "nudibranch"
delivery_pipeline = "cuttlefish"
expected = "projects/{project}/locations/{location}/deliveryPipelines/{delivery_pipeline}".format(
project=project, location=location, delivery_pipeline=delivery_pipeline,
)
actual = CloudDeployClient.delivery_pipeline_path(
project, location, delivery_pipeline
)
assert expected == actual
def test_parse_delivery_pipeline_path():
expected = {
"project": "mussel",
"location": "winkle",
"delivery_pipeline": "nautilus",
}
path = CloudDeployClient.delivery_pipeline_path(**expected)
# Check that the path construction is reversible.
actual = CloudDeployClient.parse_delivery_pipeline_path(path)
assert expected == actual
def test_release_path():
project = "scallop"
location = "abalone"
delivery_pipeline = "squid"
release = "clam"
expected = "projects/{project}/locations/{location}/deliveryPipelines/{delivery_pipeline}/releases/{release}".format(
project=project,
location=location,
delivery_pipeline=delivery_pipeline,
release=release,
)
actual = CloudDeployClient.release_path(
project, location, delivery_pipeline, release
)
assert expected == actual
def test_parse_release_path():
expected = {
"project": "whelk",
"location": "octopus",
"delivery_pipeline": "oyster",
"release": "nudibranch",
}
path = CloudDeployClient.release_path(**expected)
# Check that the path construction is reversible.
actual = CloudDeployClient.parse_release_path(path)
assert expected == actual
def test_rollout_path():
project = "cuttlefish"
location = "mussel"
delivery_pipeline = "winkle"
release = "nautilus"
rollout = "scallop"
expected = "projects/{project}/locations/{location}/deliveryPipelines/{delivery_pipeline}/releases/{release}/rollouts/{rollout}".format(
project=project,
location=location,
delivery_pipeline=delivery_pipeline,
release=release,
rollout=rollout,
)
actual = CloudDeployClient.rollout_path(
project, location, delivery_pipeline, release, rollout
)
assert expected == actual
def test_parse_rollout_path():
expected = {
"project": "abalone",
"location": "squid",
"delivery_pipeline": "clam",
"release": "whelk",
"rollout": "octopus",
}
path = CloudDeployClient.rollout_path(**expected)
# Check that the path construction is reversible.
actual = CloudDeployClient.parse_rollout_path(path)
assert expected == actual
def test_target_path():
project = "oyster"
location = "nudibranch"
target = "cuttlefish"
expected = "projects/{project}/locations/{location}/targets/{target}".format(
project=project, location=location, target=target,
)
actual = CloudDeployClient.target_path(project, location, target)
assert expected == actual
def test_parse_target_path():
expected = {
"project": "mussel",
"location": "winkle",
"target": "nautilus",
}
path = CloudDeployClient.target_path(**expected)
# Check that the path construction is reversible.
actual = CloudDeployClient.parse_target_path(path)
assert expected == actual
def test_worker_pool_path():
project = "scallop"
location = "abalone"
worker_pool = "squid"
expected = "projects/{project}/locations/{location}/workerPools/{worker_pool}".format(
project=project, location=location, worker_pool=worker_pool,
)
actual = CloudDeployClient.worker_pool_path(project, location, worker_pool)
assert expected == actual
def test_parse_worker_pool_path():
expected = {
"project": "clam",
"location": "whelk",
"worker_pool": "octopus",
}
path = CloudDeployClient.worker_pool_path(**expected)
# Check that the path construction is reversible.
actual = CloudDeployClient.parse_worker_pool_path(path)
assert expected == actual
def test_common_billing_account_path():
billing_account = "oyster"
expected = "billingAccounts/{billing_account}".format(
billing_account=billing_account,
)
actual = CloudDeployClient.common_billing_account_path(billing_account)
assert expected == actual
def test_parse_common_billing_account_path():
expected = {
"billing_account": "nudibranch",
}
path = CloudDeployClient.common_billing_account_path(**expected)
# Check that the path construction is reversible.
actual = CloudDeployClient.parse_common_billing_account_path(path)
assert expected == actual
def test_common_folder_path():
folder = "cuttlefish"
expected = "folders/{folder}".format(folder=folder,)
actual = CloudDeployClient.common_folder_path(folder)
assert expected == actual
def test_parse_common_folder_path():
expected = {
"folder": "mussel",
}
path = CloudDeployClient.common_folder_path(**expected)
# Check that the path construction is reversible.
actual = CloudDeployClient.parse_common_folder_path(path)
assert expected == actual
def test_common_organization_path():
organization = "winkle"
expected = "organizations/{organization}".format(organization=organization,)
actual = CloudDeployClient.common_organization_path(organization)
assert expected == actual
def test_parse_common_organization_path():
expected = {
"organization": "nautilus",
}
path = CloudDeployClient.common_organization_path(**expected)
# Check that the path construction is reversible.
actual = CloudDeployClient.parse_common_organization_path(path)
assert expected == actual
def test_common_project_path():
project = "scallop"
expected = "projects/{project}".format(project=project,)
actual = CloudDeployClient.common_project_path(project)
assert expected == actual
def test_parse_common_project_path():
expected = {
"project": "abalone",
}
path = CloudDeployClient.common_project_path(**expected)
# Check that the path construction is reversible.
actual = CloudDeployClient.parse_common_project_path(path)
assert expected == actual
def test_common_location_path():
project = "squid"
location = "clam"
expected = "projects/{project}/locations/{location}".format(
project=project, location=location,
)
actual = CloudDeployClient.common_location_path(project, location)
assert expected == actual
def test_parse_common_location_path():
expected = {
"project": "whelk",
"location": "octopus",
}
path = CloudDeployClient.common_location_path(**expected)
# Check that the path construction is reversible.
actual = CloudDeployClient.parse_common_location_path(path)
assert expected == actual
def test_client_with_default_client_info():
client_info = gapic_v1.client_info.ClientInfo()
with mock.patch.object(
transports.CloudDeployTransport, "_prep_wrapped_messages"
) as prep:
client = CloudDeployClient(
credentials=ga_credentials.AnonymousCredentials(), client_info=client_info,
)
prep.assert_called_once_with(client_info)
with mock.patch.object(
transports.CloudDeployTransport, "_prep_wrapped_messages"
) as prep:
transport_class = CloudDeployClient.get_transport_class()
transport = transport_class(
credentials=ga_credentials.AnonymousCredentials(), client_info=client_info,
)
prep.assert_called_once_with(client_info)
@pytest.mark.asyncio
async def test_transport_close_async():
client = CloudDeployAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc_asyncio",
)
with mock.patch.object(
type(getattr(client.transport, "grpc_channel")), "close"
) as close:
async with client:
close.assert_not_called()
close.assert_called_once()
def test_transport_close():
transports = {
"grpc": "_grpc_channel",
}
for transport, close_name in transports.items():
client = CloudDeployClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport
)
with mock.patch.object(
type(getattr(client.transport, close_name)), "close"
) as close:
with client:
close.assert_not_called()
close.assert_called_once()
def test_client_ctx():
transports = [
"grpc",
]
for transport in transports:
client = CloudDeployClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport
)
# Test client calls underlying transport.
with mock.patch.object(type(client.transport), "close") as close:
close.assert_not_called()
with client:
pass
close.assert_called()
@pytest.mark.parametrize(
"client_class,transport_class",
[
(CloudDeployClient, transports.CloudDeployGrpcTransport),
(CloudDeployAsyncClient, transports.CloudDeployGrpcAsyncIOTransport),
],
)
def test_api_key_credentials(client_class, transport_class):
with mock.patch.object(
google.auth._default, "get_api_key_credentials", create=True
) as get_api_key_credentials:
mock_cred = mock.Mock()
get_api_key_credentials.return_value = mock_cred
options = client_options.ClientOptions()
options.api_key = "api_key"
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options)
patched.assert_called_once_with(
credentials=mock_cred,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
cmd/root.go
|
/*
Copyright © 2021 German Lashevich <[email protected]>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
*/
package cmd
import (
"context"
"fmt"
"net"
"os"
"path/filepath"
"strings"
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
"github.com/cloudflare/cloudflare-go"
"github.com/spf13/viper"
)
var cfgFile string
// rootCmd represents the base command when called without any subcommands
var rootCmd = &cobra.Command{
Use: "cloudflare-dynamic-dns",
Short: "Updates AAAA records at Cloudflare according to the current IPv6 address",
Long: `Updates AAAA records at Cloudflare according to the current IPv6 address.
Requires a network interface name for a IPv6 address lookup, domain name
and Cloudflare API token with edit access rights to corresponding DNS zone.`,
PersistentPreRunE: func(cmd *cobra.Command, args []string) error {
level, err := log.ParseLevel(viper.GetString("log-level"))
if err != nil {
return err
}
log.Info("Setting log level to:", level)
log.SetLevel(level)
return nil
},
Run: func(cmd *cobra.Command, args []string) {
if viper.ConfigFileUsed() != "" {
log.WithField("config", viper.ConfigFileUsed()).Debug("Using config file")
checkConfigAccessMode(viper.ConfigFileUsed())
} else {
log.Debug("No config file used")
}
var (
domain = viper.GetString("domain")
iface = viper.GetString("iface")
systemd = viper.GetBool("systemd")
token = viper.GetString("token")
ttl = viper.GetInt("ttl")
stateFilepath = ""
)
if ttl < 60 || ttl > 86400 {
// NOTE: 1 is a special value which means "use the default TTL"
if ttl != 1 {
log.WithFields(log.Fields{"ttl": ttl}).Warn("TTL must be between 60 and 86400; using Cloudflare's default")
ttl = 1
}
}
if systemd {
stateFilepath = filepath.Join(os.Getenv("STATE_DIRECTORY"), domain)
}
log.WithFields(log.Fields{
"domain": domain,
"iface": iface,
"stateFilepath": stateFilepath,
"systemd": systemd,
"token": fmt.Sprintf("[%d characters]", len(token)),
"ttl": ttl,
}).Info("Configuration")
addr := getIpv6Address(iface)
if systemd && addr == getOldIpv6Address(stateFilepath) {
log.Info("The address hasn't changed, nothing to do")
log.Info(fmt.Sprintf("To bypass this check run without --systemd flag or remove the state file: %s", stateFilepath))
return
}
api, err := cloudflare.NewWithAPIToken(token)
if err != nil {
log.WithError(err).Fatal("Couldn't create API client")
}
ctx := context.Background()
zoneID, err := api.ZoneIDByName(getZoneFromDomain(domain))
if err != nil {
log.WithError(err).Fatal("Couldn't get ZoneID")
}
dnsRecordFilter := cloudflare.DNSRecord{Type: "AAAA", Name: domain}
existingDNSRecords, err := api.DNSRecords(ctx, zoneID, dnsRecordFilter)
if err != nil {
log.WithError(err).WithField("filter", dnsRecordFilter).Fatal("Couldn't get DNS records")
}
log.WithField("records", existingDNSRecords).Debug("Found DNS records")
desiredDNSRecord := cloudflare.DNSRecord{Type: "AAAA", Name: domain, Content: addr, TTL: ttl}
if len(existingDNSRecords) == 0 {
createNewDNSRecord(api, zoneID, desiredDNSRecord)
} else if len(existingDNSRecords) == 1 {
updateDNSRecord(api, zoneID, existingDNSRecords[0], desiredDNSRecord)
} else {
updated := false
for oldRecord := range existingDNSRecords {
if !updated && existingDNSRecords[oldRecord].Content == desiredDNSRecord.Content {
updateDNSRecord(api, zoneID, existingDNSRecords[oldRecord], desiredDNSRecord)
updated = true
} else {
deleteDNSRecord(api, zoneID, existingDNSRecords[oldRecord])
}
}
if !updated {
createNewDNSRecord(api, zoneID, desiredDNSRecord)
}
}
if systemd {
setOldIpv6Address(stateFilepath, addr)
}
},
}
func createNewDNSRecord(api *cloudflare.API, zoneID string, desiredDNSRecord cloudflare.DNSRecord) {
ctx := context.Background()
log.WithField("record", desiredDNSRecord).Info("Create new DNS record")
_, err := api.CreateDNSRecord(ctx, zoneID, desiredDNSRecord)
if err != nil {
log.WithError(err).Fatal("Couldn't create DNS record")
}
}
func updateDNSRecord(api *cloudflare.API, zoneID string, oldRecord cloudflare.DNSRecord, newRecord cloudflare.DNSRecord) {
ctx := context.Background()
if oldRecord.Content == newRecord.Content && oldRecord.TTL ==
newRecord.TTL {
log.WithField("record", oldRecord).Info("DNS record is up to date")
return
}
log.WithFields(log.Fields{
"new": newRecord,
"old": oldRecord,
}).Info("Updating existing DNS record")
err := api.UpdateDNSRecord(ctx, zoneID, oldRecord.ID, newRecord)
if err != nil {
log.WithError(err).WithFields(log.Fields{
"new": newRecord,
"old": oldRecord,
}).Fatal("Couldn't update DNS record")
}
}
func deleteDNSRecord(api *cloudflare.API, zoneID string, record cloudflare.DNSRecord) {
ctx := context.Background()
log.WithField("record", record).Info("Deleting DNS record")
err := api.DeleteDNSRecord(ctx, zoneID, record.ID)
if err != nil {
log.WithError(err).WithField("record", record).Fatal("Couldn't delete DNS record")
}
}
// Execute adds all child commands to the root command and sets flags appropriately.
// This is called by main.main(). It only needs to happen once to the rootCmd.
func Execute() {
cobra.CheckErr(rootCmd.Execute())
}
func init() {
cobra.OnInitialize(initConfig)
rootCmd.PersistentFlags().StringVar(&cfgFile, "config", "", "config file (default is $HOME/.cloudflare-dynamic-dns.yaml)")
rootCmd.Flags().Bool("systemd", false, `Switch operation mode for running in systemd
In this mode previously used ipv6 address is preserved between runs to avoid unnecessary calls to CloudFlare API`)
rootCmd.Flags().Int("ttl", 1, "Time to live, in seconds, of the DNS record. Must be between 60 and 86400, or 1 for 'automatic'")
rootCmd.Flags().String("domain", "", "Domain name to assign the IPv6 address to")
rootCmd.Flags().String("iface", "", "Network interface to look up for a IPv6 address")
rootCmd.Flags().String("log-level", "info", "Sets logging level: trace, debug, info, warning, error, fatal, panic")
rootCmd.Flags().String("token", "", "Cloudflare API token with DNS edit access rights")
viper.BindPFlags(rootCmd.Flags())
}
// initConfig reads in config file and ENV variables if set.
func initConfig() {
if cfgFile != "" {
// Use config file from the flag.
viper.SetConfigFile(cfgFile)
} else {
// Find home directory.
home, err := os.UserHomeDir()
cobra.CheckErr(err)
// Search config in home directory with name ".cloudflare-dynamic-dns" (without extension).
viper.AddConfigPath(home)
viper.SetConfigType("yaml")
viper.SetConfigName(".cloudflare-dynamic-dns")
}
viper.AutomaticEnv() // read in environment variables that match
// If a config file is found, read it in.
if err := viper.ReadInConfig(); err == nil {
log.Info("Using config file:", viper.ConfigFileUsed())
}
}
func getIpv6Address(iface string) string {
netIface, err := net.InterfaceByName(iface)
if err != nil {
log.WithError(err).WithField("iface", iface).Fatal("Can't get the interface")
}
log.WithField("interface", netIface).Debug("Found the interface")
addresses, err := netIface.Addrs()
if err != nil {
log.WithError(err).Fatal("Couldn't get interface addresses")
}
publicIpv6Addresses := []string{}
for _, addr := range addresses {
log.WithField("address", addr).Debug("Found address")
if ipnet, ok := addr.(*net.IPNet); ok && ipnet.IP.IsGlobalUnicast() && ipnet.IP.To4() == nil {
publicIpv6Addresses = append(publicIpv6Addresses, ipnet.IP.String())
}
}
if len(publicIpv6Addresses) == 0 {
log.Fatal("No public IPv6 addresses found")
}
log.WithField("addresses", publicIpv6Addresses).Infof("Found %d public IPv6 addresses, use the first one", len(publicIpv6Addresses))
return publicIpv6Addresses[0]
}
func getZoneFromDomain(domain string) string {
parts := strings.Split(domain, ".")
return strings.Join(parts[len(parts)-2:], ".")
}
func getOldIpv6Address(stateFilepath string) string {
ipv6, err := os.ReadFile(stateFilepath)
if err != nil {
log.WithError(err).Warn("Can't get old ipv6 address")
return "INVALID"
}
return string(ipv6)
}
func setOldIpv6Address(stateFilepath string, ipv6 string) {
err := os.WriteFile(stateFilepath, []byte(ipv6), 0644)
if err != nil {
log.WithError(err).Error("Can't write state file")
}
}
func checkConfigAccessMode(configFilename string) {
info, err := os.Stat(configFilename)
if err != nil {
log.WithError(err).Fatal("Can't get config file info")
}
log.WithField("mode", info.Mode()).Debug("Config file mode")
if info.Mode()&1010 != 0 {
log.Warn("Config file should be accessible only by owner")
}
}
|
[
"\"STATE_DIRECTORY\""
] |
[] |
[
"STATE_DIRECTORY"
] |
[]
|
["STATE_DIRECTORY"]
|
go
| 1 | 0 | |
pop_test.go
|
package pop
import (
stdlog "log"
"os"
"testing"
"time"
"github.com/gobuffalo/nulls"
"github.com/gobuffalo/validate/v3"
"github.com/gobuffalo/validate/v3/validators"
"github.com/gofrs/uuid"
"github.com/stretchr/testify/suite"
"github.com/gobuffalo/pop/v5/logging"
)
var PDB *Connection
type PostgreSQLSuite struct {
suite.Suite
}
type MySQLSuite struct {
suite.Suite
}
type SQLiteSuite struct {
suite.Suite
}
type CockroachSuite struct {
suite.Suite
}
func TestSpecificSuites(t *testing.T) {
switch os.Getenv("SODA_DIALECT") {
case "postgres":
suite.Run(t, &PostgreSQLSuite{})
case "mysql", "mysql_travis":
suite.Run(t, &MySQLSuite{})
case "sqlite":
suite.Run(t, &SQLiteSuite{})
case "cockroach":
suite.Run(t, &CockroachSuite{})
}
}
func init() {
Debug = false
AddLookupPaths("./")
dialect := os.Getenv("SODA_DIALECT")
if dialect != "" {
if err := LoadConfigFile(); err != nil {
stdlog.Panic(err)
}
var err error
PDB, err = Connect(dialect)
log(logging.Info, "Run test with dialect %v", dialect)
if err != nil {
stdlog.Panic(err)
}
} else {
log(logging.Info, "Skipping integration tests")
}
}
func transaction(fn func(tx *Connection)) {
err := PDB.Rollback(func(tx *Connection) {
fn(tx)
})
if err != nil {
stdlog.Fatal(err)
}
}
func ts(s string) string {
return PDB.Dialect.TranslateSQL(s)
}
type Client struct {
ClientID string `db:"id"`
}
func (c Client) TableName() string {
return "clients"
}
type User struct {
ID int `db:"id"`
UserName string `db:"user_name"`
Email string `db:"email"`
Name nulls.String `db:"name"`
Alive nulls.Bool `db:"alive"`
CreatedAt time.Time `db:"created_at"`
UpdatedAt time.Time `db:"updated_at"`
BirthDate nulls.Time `db:"birth_date"`
Bio nulls.String `db:"bio"`
Price nulls.Float64 `db:"price"`
FullName nulls.String `db:"full_name" select:"name as full_name"`
Books Books `has_many:"books" order_by:"title asc"`
FavoriteSong Song `has_one:"song" fk_id:"u_id"`
Houses Addresses `many_to_many:"users_addresses"`
}
// Validate gets run every time you call a "Validate*" (ValidateAndSave, ValidateAndCreate, ValidateAndUpdate) method.
// This method is not required and may be deleted.
func (u *User) Validate(tx *Connection) (*validate.Errors, error) {
return validate.Validate(
&validators.StringIsPresent{Field: u.Name.String, Name: "Name"},
), nil
}
type Users []User
type UserAttribute struct {
ID int `db:"id"`
UserName string `db:"user_name"`
NickName string `db:"nick_name"`
User User `json:"user" belongs_to:"user" fk_id:"UserName" primary_id:"UserName"`
}
type Book struct {
ID int `db:"id"`
Title string `db:"title"`
Isbn string `db:"isbn"`
UserID nulls.Int `db:"user_id"`
User User `belongs_to:"user"`
Description string `db:"description"`
Writers Writers `has_many:"writers"`
TaxiID nulls.Int `db:"taxi_id"`
Taxi Taxi `belongs_to:"taxi"`
CreatedAt time.Time `db:"created_at"`
UpdatedAt time.Time `db:"updated_at"`
}
type Taxi struct {
ID int `db:"id"`
Model string `db:"model"`
UserID nulls.Int `db:"user_id"`
AddressID nulls.Int `db:"address_id"`
Driver *User `belongs_to:"user" fk_id:"user_id"`
Address Address `belongs_to:"address"`
CreatedAt time.Time `db:"created_at"`
UpdatedAt time.Time `db:"updated_at"`
}
// Validate gets run every time you call a "Validate*" (ValidateAndSave, ValidateAndCreate, ValidateAndUpdate) method.
// This method is not required and may be deleted.
func (b *Book) Validate(tx *Connection) (*validate.Errors, error) {
return validate.Validate(
&validators.StringIsPresent{Field: b.Description, Name: "Description"},
), nil
}
type Books []Book
type Writer struct {
ID int `db:"id"`
Name string `db:"name"`
BookID int `db:"book_id"`
Book Book `belongs_to:"book"`
CreatedAt time.Time `db:"created_at"`
UpdatedAt time.Time `db:"updated_at"`
}
type Writers []Writer
type Address struct {
ID int `db:"id"`
Street string `db:"street"`
HouseNumber int `db:"house_number"`
CreatedAt time.Time `db:"created_at"`
UpdatedAt time.Time `db:"updated_at"`
}
type Addresses []Address
type UsersAddress struct {
ID int `db:"id"`
UserID int `db:"user_id"`
AddressID int `db:"address_id"`
CreatedAt time.Time `db:"created_at"`
UpdatedAt time.Time `db:"updated_at"`
}
type UsersAddressQuery struct {
ID int `db:"id"`
UserID int `db:"user_id"`
AddressID int `db:"address_id"`
CreatedAt time.Time `db:"created_at"`
UpdatedAt time.Time `db:"updated_at"`
UserName *string `db:"name" json:"user_name"`
UserEmail *string `db:"email" json:"user_email"`
}
func (UsersAddressQuery) TableName() string {
return "users_addresses"
}
type Friend struct {
ID int `db:"id"`
FirstName string `db:"first_name"`
LastName string `db:"last_name"`
CreatedAt time.Time `db:"created_at"`
UpdatedAt time.Time `db:"updated_at"`
}
func (Friend) TableName() string {
return "good_friends"
}
type Family struct {
ID int `db:"id"`
FirstName string `db:"first_name"`
LastName string `db:"last_name"`
CreatedAt time.Time `db:"created_at"`
UpdatedAt time.Time `db:"updated_at"`
}
func (Family) TableName() string {
// schema.table_name
return "family.members"
}
type Enemy struct {
A string
}
type Song struct {
ID uuid.UUID `db:"id"`
Title string `db:"title"`
UserID int `db:"u_id"`
CreatedAt time.Time `json:"created_at" db:"created_at"`
UpdatedAt time.Time `json:"updated_at" db:"updated_at"`
ComposedByID int `json:"composed_by_id" db:"composed_by_id"`
ComposedBy Composer `belongs_to:"composer"`
}
type Composer struct {
ID int `db:"id"`
Name string `db:"name"`
CreatedAt time.Time `db:"created_at"`
UpdatedAt time.Time `db:"updated_at"`
}
type Course struct {
ID uuid.UUID `json:"id" db:"id"`
CreatedAt time.Time `json:"created_at" db:"created_at"`
UpdatedAt time.Time `json:"updated_at" db:"updated_at"`
}
type CourseCode struct {
ID uuid.UUID `json:"id" db:"id"`
CreatedAt time.Time `json:"created_at" db:"created_at"`
UpdatedAt time.Time `json:"updated_at" db:"updated_at"`
CourseID uuid.UUID `json:"course_id" db:"course_id"`
Course Course `json:"-" belongs_to:"course"`
// Course Course `belongs_to:"course"`
}
type ValidatableCar struct {
ID int64 `db:"id"`
Name string `db:"name"`
CreatedAt time.Time `json:"created_at" db:"created_at"`
UpdatedAt time.Time `json:"updated_at" db:"updated_at"`
}
var validationLogs []string
func (v *ValidatableCar) Validate(tx *Connection) (*validate.Errors, error) {
validationLogs = append(validationLogs, "Validate")
verrs := validate.Validate(&validators.StringIsPresent{Field: v.Name, Name: "Name"})
return verrs, nil
}
func (v *ValidatableCar) ValidateSave(tx *Connection) (*validate.Errors, error) {
validationLogs = append(validationLogs, "ValidateSave")
return nil, nil
}
func (v *ValidatableCar) ValidateUpdate(tx *Connection) (*validate.Errors, error) {
validationLogs = append(validationLogs, "ValidateUpdate")
return nil, nil
}
func (v *ValidatableCar) ValidateCreate(tx *Connection) (*validate.Errors, error) {
validationLogs = append(validationLogs, "ValidateCreate")
return nil, nil
}
type NotValidatableCar struct {
ID int `db:"id"`
Name string `db:"name"`
CreatedAt time.Time `json:"created_at" db:"created_at"`
UpdatedAt time.Time `json:"updated_at" db:"updated_at"`
}
type CallbacksUser struct {
ID int `db:"id"`
BeforeS string `db:"before_s"`
BeforeC string `db:"before_c"`
BeforeU string `db:"before_u"`
BeforeD string `db:"before_d"`
BeforeV string `db:"before_v"`
AfterS string `db:"after_s"`
AfterC string `db:"after_c"`
AfterU string `db:"after_u"`
AfterD string `db:"after_d"`
AfterF string `db:"after_f"`
CreatedAt time.Time `json:"created_at" db:"created_at"`
UpdatedAt time.Time `json:"updated_at" db:"updated_at"`
}
type CallbacksUsers []CallbacksUser
func (u *CallbacksUser) BeforeSave(tx *Connection) error {
u.BeforeS = "BeforeSave"
return nil
}
func (u *CallbacksUser) BeforeUpdate(tx *Connection) error {
u.BeforeU = "BeforeUpdate"
return nil
}
func (u *CallbacksUser) BeforeCreate(tx *Connection) error {
u.BeforeC = "BeforeCreate"
return nil
}
func (u *CallbacksUser) BeforeDestroy(tx *Connection) error {
u.BeforeD = "BeforeDestroy"
return nil
}
func (u *CallbacksUser) BeforeValidate(tx *Connection) error {
u.BeforeV = "BeforeValidate"
return nil
}
func (u *CallbacksUser) AfterSave(tx *Connection) error {
u.AfterS = "AfterSave"
return nil
}
func (u *CallbacksUser) AfterUpdate(tx *Connection) error {
u.AfterU = "AfterUpdate"
return nil
}
func (u *CallbacksUser) AfterCreate(tx *Connection) error {
u.AfterC = "AfterCreate"
return nil
}
func (u *CallbacksUser) AfterDestroy(tx *Connection) error {
u.AfterD = "AfterDestroy"
return nil
}
func (u *CallbacksUser) AfterFind(tx *Connection) error {
u.AfterF = "AfterFind"
return nil
}
type Label struct {
ID string `db:"id"`
}
type SingleID struct {
ID int `db:"id"`
}
type Body struct {
ID int `json:"id" db:"id"`
Head *Head `json:"head" has_one:"head"`
}
type Head struct {
ID int `json:"id,omitempty" db:"id"`
BodyID int `json:"-" db:"body_id"`
Body *Body `json:"body,omitempty" belongs_to:"body"`
}
type HeadPtr struct {
ID int `json:"id,omitempty" db:"id"`
BodyID *int `json:"-" db:"body_id"`
Body *Body `json:"body,omitempty" belongs_to:"body"`
}
type Student struct {
ID uuid.UUID `json:"id" db:"id"`
CreatedAt time.Time `json:"created_at" db:"created_at"`
UpdatedAt time.Time `json:"updated_at" db:"updated_at"`
}
// https://github.com/gobuffalo/pop/issues/302
type Parent struct {
ID uuid.UUID `json:"id" db:"id"`
CreatedAt time.Time `json:"created_at" db:"created_at"`
UpdatedAt time.Time `json:"updated_at" db:"updated_at"`
Students []*Student `many_to_many:"parents_students"`
}
type CrookedColour struct {
ID int `db:"pk"`
Name string `db:"name"`
CreatedAt time.Time `db:"created_at"`
UpdatedAt time.Time `db:"updated_at"`
}
type CrookedSong struct {
ID string `db:"name"`
CreatedAt time.Time `db:"created_at"`
UpdatedAt time.Time `db:"updated_at"`
}
|
[
"\"SODA_DIALECT\"",
"\"SODA_DIALECT\""
] |
[] |
[
"SODA_DIALECT"
] |
[]
|
["SODA_DIALECT"]
|
go
| 1 | 0 | |
src/main/java/java/lang/ProcessBuilder.java
|
/*
* Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
* ORACLE PROPRIETARY/CONFIDENTIAL. Use is subject to license terms.
*
*
*
*
*
*
*
*
*
*
*
*
*
*
*
*
*
*
*
*
*/
package java.lang;
import java.io.File;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.security.AccessControlException;
import java.util.Arrays;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
/**
* This class is used to create operating system processes.
*
* <p>Each {@code ProcessBuilder} instance manages a collection
* of process attributes. The {@link #start()} method creates a new
* {@link Process} instance with those attributes. The {@link
* #start()} method can be invoked repeatedly from the same instance
* to create new subprocesses with identical or related attributes.
*
* <p>Each process builder manages these process attributes:
*
* <ul>
*
* <li>a <i>command</i>, a list of strings which signifies the
* external program file to be invoked and its arguments, if any.
* Which string lists represent a valid operating system command is
* system-dependent. For example, it is common for each conceptual
* argument to be an element in this list, but there are operating
* systems where programs are expected to tokenize command line
* strings themselves - on such a system a Java implementation might
* require commands to contain exactly two elements.
*
* <li>an <i>environment</i>, which is a system-dependent mapping from
* <i>variables</i> to <i>values</i>. The initial value is a copy of
* the environment of the current process (see {@link System#getenv()}).
*
* <li>a <i>working directory</i>. The default value is the current
* working directory of the current process, usually the directory
* named by the system property {@code user.dir}.
*
* <li><a name="redirect-input">a source of <i>standard input</i>.
* By default, the subprocess reads input from a pipe. Java code
* can access this pipe via the output stream returned by
* {@link Process#getOutputStream()}. However, standard input may
* be redirected to another source using
* {@link #redirectInput(Redirect) redirectInput}.
* In this case, {@link Process#getOutputStream()} will return a
* <i>null output stream</i>, for which:
*
* <ul>
* <li>the {@link OutputStream#write(int) write} methods always
* throw {@code IOException}
* <li>the {@link OutputStream#close() close} method does nothing
* </ul>
*
* <li><a name="redirect-output">a destination for <i>standard output</i>
* and <i>standard error</i>. By default, the subprocess writes standard
* output and standard error to pipes. Java code can access these pipes
* via the input streams returned by {@link Process#getInputStream()} and
* {@link Process#getErrorStream()}. However, standard output and
* standard error may be redirected to other destinations using
* {@link #redirectOutput(Redirect) redirectOutput} and
* {@link #redirectError(Redirect) redirectError}.
* In this case, {@link Process#getInputStream()} and/or
* {@link Process#getErrorStream()} will return a <i>null input
* stream</i>, for which:
*
* <ul>
* <li>the {@link InputStream#read() read} methods always return
* {@code -1}
* <li>the {@link InputStream#available() available} method always returns
* {@code 0}
* <li>the {@link InputStream#close() close} method does nothing
* </ul>
*
* <li>a <i>redirectErrorStream</i> property. Initially, this property
* is {@code false}, meaning that the standard output and error
* output of a subprocess are sent to two separate streams, which can
* be accessed using the {@link Process#getInputStream()} and {@link
* Process#getErrorStream()} methods.
*
* <p>If the value is set to {@code true}, then:
*
* <ul>
* <li>standard error is merged with the standard output and always sent
* to the same destination (this makes it easier to correlate error
* messages with the corresponding output)
* <li>the common destination of standard error and standard output can be
* redirected using
* {@link #redirectOutput(Redirect) redirectOutput}
* <li>any redirection set by the
* {@link #redirectError(Redirect) redirectError}
* method is ignored when creating a subprocess
* <li>the stream returned from {@link Process#getErrorStream()} will
* always be a <a href="#redirect-output">null input stream</a>
* </ul>
*
* </ul>
*
* <p>Modifying a process builder's attributes will affect processes
* subsequently started by that object's {@link #start()} method, but
* will never affect previously started processes or the Java process
* itself.
*
* <p>Most error checking is performed by the {@link #start()} method.
* It is possible to modify the state of an object so that {@link
* #start()} will fail. For example, setting the command attribute to
* an empty list will not throw an exception unless {@link #start()}
* is invoked.
*
* <p><strong>Note that this class is not synchronized.</strong>
* If multiple threads access a {@code ProcessBuilder} instance
* concurrently, and at least one of the threads modifies one of the
* attributes structurally, it <i>must</i> be synchronized externally.
*
* <p>Starting a new process which uses the default working directory
* and environment is easy:
*
* <pre> {@code
* Process p = new ProcessBuilder("myCommand", "myArg").start();
* }</pre>
*
* <p>Here is an example that starts a process with a modified working
* directory and environment, and redirects standard output and error
* to be appended to a log file:
*
* <pre> {@code
* ProcessBuilder pb =
* new ProcessBuilder("myCommand", "myArg1", "myArg2");
* Map<String, String> env = pb.environment();
* env.put("VAR1", "myValue");
* env.remove("OTHERVAR");
* env.put("VAR2", env.get("VAR1") + "suffix");
* pb.directory(new File("myDir"));
* File log = new File("log");
* pb.redirectErrorStream(true);
* pb.redirectOutput(Redirect.appendTo(log));
* Process p = pb.start();
* assert pb.redirectInput() == Redirect.PIPE;
* assert pb.redirectOutput().file() == log;
* assert p.getInputStream().read() == -1;
* }</pre>
*
* <p>To start a process with an explicit set of environment
* variables, first call {@link java.util.Map#clear() Map.clear()}
* before adding environment variables.
*
* @author Martin Buchholz
* @since 1.5
*/
public final class ProcessBuilder
{
private List<String> command;
private File directory;
private Map<String,String> environment;
private boolean redirectErrorStream;
private Redirect[] redirects;
/**
* Constructs a process builder with the specified operating
* system program and arguments. This constructor does <i>not</i>
* make a copy of the {@code command} list. Subsequent
* updates to the list will be reflected in the state of the
* process builder. It is not checked whether
* {@code command} corresponds to a valid operating system
* command.
*
* @param command the list containing the program and its arguments
* @throws NullPointerException if the argument is null
*/
public ProcessBuilder(List<String> command) {
if (command == null)
throw new NullPointerException();
this.command = command;
}
/**
* Constructs a process builder with the specified operating
* system program and arguments. This is a convenience
* constructor that sets the process builder's command to a string
* list containing the same strings as the {@code command}
* array, in the same order. It is not checked whether
* {@code command} corresponds to a valid operating system
* command.
*
* @param command a string array containing the program and its arguments
*/
public ProcessBuilder(String... command) {
this.command = new ArrayList<>(command.length);
for (String arg : command)
this.command.add(arg);
}
/**
* Sets this process builder's operating system program and
* arguments. This method does <i>not</i> make a copy of the
* {@code command} list. Subsequent updates to the list will
* be reflected in the state of the process builder. It is not
* checked whether {@code command} corresponds to a valid
* operating system command.
*
* @param command the list containing the program and its arguments
* @return this process builder
*
* @throws NullPointerException if the argument is null
*/
public ProcessBuilder command(List<String> command) {
if (command == null)
throw new NullPointerException();
this.command = command;
return this;
}
/**
* Sets this process builder's operating system program and
* arguments. This is a convenience method that sets the command
* to a string list containing the same strings as the
* {@code command} array, in the same order. It is not
* checked whether {@code command} corresponds to a valid
* operating system command.
*
* @param command a string array containing the program and its arguments
* @return this process builder
*/
public ProcessBuilder command(String... command) {
this.command = new ArrayList<>(command.length);
for (String arg : command)
this.command.add(arg);
return this;
}
/**
* Returns this process builder's operating system program and
* arguments. The returned list is <i>not</i> a copy. Subsequent
* updates to the list will be reflected in the state of this
* process builder.
*
* @return this process builder's program and its arguments
*/
public List<String> command() {
return command;
}
/**
* Returns a string map view of this process builder's environment.
*
* Whenever a process builder is created, the environment is
* initialized to a copy of the current process environment (see
* {@link System#getenv()}). Subprocesses subsequently started by
* this object's {@link #start()} method will use this map as
* their environment.
*
* <p>The returned object may be modified using ordinary {@link
* java.util.Map Map} operations. These modifications will be
* visible to subprocesses started via the {@link #start()}
* method. Two {@code ProcessBuilder} instances always
* contain independent process environments, so changes to the
* returned map will never be reflected in any other
* {@code ProcessBuilder} instance or the values returned by
* {@link System#getenv System.getenv}.
*
* <p>If the system does not support environment variables, an
* empty map is returned.
*
* <p>The returned map does not permit null keys or values.
* Attempting to insert or query the presence of a null key or
* value will throw a {@link NullPointerException}.
* Attempting to query the presence of a key or value which is not
* of type {@link String} will throw a {@link ClassCastException}.
*
* <p>The behavior of the returned map is system-dependent. A
* system may not allow modifications to environment variables or
* may forbid certain variable names or values. For this reason,
* attempts to modify the map may fail with
* {@link UnsupportedOperationException} or
* {@link IllegalArgumentException}
* if the modification is not permitted by the operating system.
*
* <p>Since the external format of environment variable names and
* values is system-dependent, there may not be a one-to-one
* mapping between them and Java's Unicode strings. Nevertheless,
* the map is implemented in such a way that environment variables
* which are not modified by Java code will have an unmodified
* native representation in the subprocess.
*
* <p>The returned map and its collection views may not obey the
* general contract of the {@link Object#equals} and
* {@link Object#hashCode} methods.
*
* <p>The returned map is typically case-sensitive on all platforms.
*
* <p>If a security manager exists, its
* {@link SecurityManager#checkPermission checkPermission} method
* is called with a
* {@link RuntimePermission}{@code ("getenv.*")} permission.
* This may result in a {@link SecurityException} being thrown.
*
* <p>When passing information to a Java subprocess,
* <a href=System.html#EnvironmentVSSystemProperties>system properties</a>
* are generally preferred over environment variables.
*
* @return this process builder's environment
*
* @throws SecurityException
* if a security manager exists and its
* {@link SecurityManager#checkPermission checkPermission}
* method doesn't allow access to the process environment
*
* @see Runtime#exec(String[],String[],java.io.File)
* @see System#getenv()
*/
public Map<String,String> environment() {
SecurityManager security = System.getSecurityManager();
if (security != null)
security.checkPermission(new RuntimePermission("getenv.*"));
if (environment == null)
environment = ProcessEnvironment.environment();
assert environment != null;
return environment;
}
// Only for use by Runtime.exec(...envp...)
ProcessBuilder environment(String[] envp) {
assert environment == null;
if (envp != null) {
environment = ProcessEnvironment.emptyEnvironment(envp.length);
assert environment != null;
for (String envstring : envp) {
// Before 1.5, we blindly passed invalid envstrings
// to the child process.
// We would like to throw an exception, but do not,
// for compatibility with old broken code.
// Silently discard any trailing junk.
if (envstring.indexOf((int) '\u0000') != -1)
envstring = envstring.replaceFirst("\u0000.*", "");
int eqlsign =
envstring.indexOf('=', ProcessEnvironment.MIN_NAME_LENGTH);
// Silently ignore envstrings lacking the required `='.
if (eqlsign != -1)
environment.put(envstring.substring(0,eqlsign),
envstring.substring(eqlsign+1));
}
}
return this;
}
/**
* Returns this process builder's working directory.
*
* Subprocesses subsequently started by this object's {@link
* #start()} method will use this as their working directory.
* The returned value may be {@code null} -- this means to use
* the working directory of the current Java process, usually the
* directory named by the system property {@code user.dir},
* as the working directory of the child process.
*
* @return this process builder's working directory
*/
public File directory() {
return directory;
}
/**
* Sets this process builder's working directory.
*
* Subprocesses subsequently started by this object's {@link
* #start()} method will use this as their working directory.
* The argument may be {@code null} -- this means to use the
* working directory of the current Java process, usually the
* directory named by the system property {@code user.dir},
* as the working directory of the child process.
*
* @param directory the new working directory
* @return this process builder
*/
public ProcessBuilder directory(File directory) {
this.directory = directory;
return this;
}
// ---------------- I/O Redirection ----------------
/**
* Implements a <a href="#redirect-output">null input stream</a>.
*/
static class NullInputStream extends InputStream {
static final NullInputStream INSTANCE = new NullInputStream();
private NullInputStream() {}
public int read() { return -1; }
public int available() { return 0; }
}
/**
* Implements a <a href="#redirect-input">null output stream</a>.
*/
static class NullOutputStream extends OutputStream {
static final NullOutputStream INSTANCE = new NullOutputStream();
private NullOutputStream() {}
public void write(int b) throws IOException {
throw new IOException("Stream closed");
}
}
/**
* Represents a source of subprocess input or a destination of
* subprocess output.
*
* Each {@code Redirect} instance is one of the following:
*
* <ul>
* <li>the special value {@link #PIPE Redirect.PIPE}
* <li>the special value {@link #INHERIT Redirect.INHERIT}
* <li>a redirection to read from a file, created by an invocation of
* {@link Redirect#from Redirect.from(File)}
* <li>a redirection to write to a file, created by an invocation of
* {@link Redirect#to Redirect.to(File)}
* <li>a redirection to append to a file, created by an invocation of
* {@link Redirect#appendTo Redirect.appendTo(File)}
* </ul>
*
* <p>Each of the above categories has an associated unique
* {@link Type Type}.
*
* @since 1.7
*/
public static abstract class Redirect {
/**
* The type of a {@link Redirect}.
*/
public enum Type {
/**
* The type of {@link Redirect#PIPE Redirect.PIPE}.
*/
PIPE,
/**
* The type of {@link Redirect#INHERIT Redirect.INHERIT}.
*/
INHERIT,
/**
* The type of redirects returned from
* {@link Redirect#from Redirect.from(File)}.
*/
READ,
/**
* The type of redirects returned from
* {@link Redirect#to Redirect.to(File)}.
*/
WRITE,
/**
* The type of redirects returned from
* {@link Redirect#appendTo Redirect.appendTo(File)}.
*/
APPEND
};
/**
* Returns the type of this {@code Redirect}.
* @return the type of this {@code Redirect}
*/
public abstract Type type();
/**
* Indicates that subprocess I/O will be connected to the
* current Java process over a pipe.
*
* This is the default handling of subprocess standard I/O.
*
* <p>It will always be true that
* <pre> {@code
* Redirect.PIPE.file() == null &&
* Redirect.PIPE.type() == Redirect.Type.PIPE
* }</pre>
*/
public static final Redirect PIPE = new Redirect() {
public Type type() { return Type.PIPE; }
public String toString() { return type().toString(); }};
/**
* Indicates that subprocess I/O source or destination will be the
* same as those of the current process. This is the normal
* behavior of most operating system command interpreters (shells).
*
* <p>It will always be true that
* <pre> {@code
* Redirect.INHERIT.file() == null &&
* Redirect.INHERIT.type() == Redirect.Type.INHERIT
* }</pre>
*/
public static final Redirect INHERIT = new Redirect() {
public Type type() { return Type.INHERIT; }
public String toString() { return type().toString(); }};
/**
* Returns the {@link File} source or destination associated
* with this redirect, or {@code null} if there is no such file.
*
* @return the file associated with this redirect,
* or {@code null} if there is no such file
*/
public File file() { return null; }
/**
* When redirected to a destination file, indicates if the output
* is to be written to the end of the file.
*/
boolean append() {
throw new UnsupportedOperationException();
}
/**
* Returns a redirect to read from the specified file.
*
* <p>It will always be true that
* <pre> {@code
* Redirect.from(file).file() == file &&
* Redirect.from(file).type() == Redirect.Type.READ
* }</pre>
*
* @throws NullPointerException if the specified file is null
* @return a redirect to read from the specified file
*/
public static Redirect from(final File file) {
if (file == null)
throw new NullPointerException();
return new Redirect() {
public Type type() { return Type.READ; }
public File file() { return file; }
public String toString() {
return "redirect to read from file \"" + file + "\"";
}
};
}
/**
* Returns a redirect to write to the specified file.
* If the specified file exists when the subprocess is started,
* its previous contents will be discarded.
*
* <p>It will always be true that
* <pre> {@code
* Redirect.to(file).file() == file &&
* Redirect.to(file).type() == Redirect.Type.WRITE
* }</pre>
*
* @throws NullPointerException if the specified file is null
* @return a redirect to write to the specified file
*/
public static Redirect to(final File file) {
if (file == null)
throw new NullPointerException();
return new Redirect() {
public Type type() { return Type.WRITE; }
public File file() { return file; }
public String toString() {
return "redirect to write to file \"" + file + "\"";
}
boolean append() { return false; }
};
}
/**
* Returns a redirect to append to the specified file.
* Each write operation first advances the position to the
* end of the file and then writes the requested data.
* Whether the advancement of the position and the writing
* of the data are done in a single atomic operation is
* system-dependent and therefore unspecified.
*
* <p>It will always be true that
* <pre> {@code
* Redirect.appendTo(file).file() == file &&
* Redirect.appendTo(file).type() == Redirect.Type.APPEND
* }</pre>
*
* @throws NullPointerException if the specified file is null
* @return a redirect to append to the specified file
*/
public static Redirect appendTo(final File file) {
if (file == null)
throw new NullPointerException();
return new Redirect() {
public Type type() { return Type.APPEND; }
public File file() { return file; }
public String toString() {
return "redirect to append to file \"" + file + "\"";
}
boolean append() { return true; }
};
}
/**
* Compares the specified object with this {@code Redirect} for
* equality. Returns {@code true} if and only if the two
* objects are identical or both objects are {@code Redirect}
* instances of the same type associated with non-null equal
* {@code File} instances.
*/
public boolean equals(Object obj) {
if (obj == this)
return true;
if (! (obj instanceof Redirect))
return false;
Redirect r = (Redirect) obj;
if (r.type() != this.type())
return false;
assert this.file() != null;
return this.file().equals(r.file());
}
/**
* Returns a hash code value for this {@code Redirect}.
* @return a hash code value for this {@code Redirect}
*/
public int hashCode() {
File file = file();
if (file == null)
return super.hashCode();
else
return file.hashCode();
}
/**
* No public constructors. Clients must use predefined
* static {@code Redirect} instances or factory methods.
*/
private Redirect() {}
}
private Redirect[] redirects() {
if (redirects == null)
redirects = new Redirect[] {
Redirect.PIPE, Redirect.PIPE, Redirect.PIPE
};
return redirects;
}
/**
* Sets this process builder's standard input source.
*
* Subprocesses subsequently started by this object's {@link #start()}
* method obtain their standard input from this source.
*
* <p>If the source is {@link Redirect#PIPE Redirect.PIPE}
* (the initial value), then the standard input of a
* subprocess can be written to using the output stream
* returned by {@link Process#getOutputStream()}.
* If the source is set to any other value, then
* {@link Process#getOutputStream()} will return a
* <a href="#redirect-input">null output stream</a>.
*
* @param source the new standard input source
* @return this process builder
* @throws IllegalArgumentException
* if the redirect does not correspond to a valid source
* of data, that is, has type
* {@link Redirect.Type#WRITE WRITE} or
* {@link Redirect.Type#APPEND APPEND}
* @since 1.7
*/
public ProcessBuilder redirectInput(Redirect source) {
if (source.type() == Redirect.Type.WRITE ||
source.type() == Redirect.Type.APPEND)
throw new IllegalArgumentException(
"Redirect invalid for reading: " + source);
redirects()[0] = source;
return this;
}
/**
* Sets this process builder's standard output destination.
*
* Subprocesses subsequently started by this object's {@link #start()}
* method send their standard output to this destination.
*
* <p>If the destination is {@link Redirect#PIPE Redirect.PIPE}
* (the initial value), then the standard output of a subprocess
* can be read using the input stream returned by {@link
* Process#getInputStream()}.
* If the destination is set to any other value, then
* {@link Process#getInputStream()} will return a
* <a href="#redirect-output">null input stream</a>.
*
* @param destination the new standard output destination
* @return this process builder
* @throws IllegalArgumentException
* if the redirect does not correspond to a valid
* destination of data, that is, has type
* {@link Redirect.Type#READ READ}
* @since 1.7
*/
public ProcessBuilder redirectOutput(Redirect destination) {
if (destination.type() == Redirect.Type.READ)
throw new IllegalArgumentException(
"Redirect invalid for writing: " + destination);
redirects()[1] = destination;
return this;
}
/**
* Sets this process builder's standard error destination.
*
* Subprocesses subsequently started by this object's {@link #start()}
* method send their standard error to this destination.
*
* <p>If the destination is {@link Redirect#PIPE Redirect.PIPE}
* (the initial value), then the error output of a subprocess
* can be read using the input stream returned by {@link
* Process#getErrorStream()}.
* If the destination is set to any other value, then
* {@link Process#getErrorStream()} will return a
* <a href="#redirect-output">null input stream</a>.
*
* <p>If the {@link #redirectErrorStream redirectErrorStream}
* attribute has been set {@code true}, then the redirection set
* by this method has no effect.
*
* @param destination the new standard error destination
* @return this process builder
* @throws IllegalArgumentException
* if the redirect does not correspond to a valid
* destination of data, that is, has type
* {@link Redirect.Type#READ READ}
* @since 1.7
*/
public ProcessBuilder redirectError(Redirect destination) {
if (destination.type() == Redirect.Type.READ)
throw new IllegalArgumentException(
"Redirect invalid for writing: " + destination);
redirects()[2] = destination;
return this;
}
/**
* Sets this process builder's standard input source to a file.
*
* <p>This is a convenience method. An invocation of the form
* {@code redirectInput(file)}
* behaves in exactly the same way as the invocation
* {@link #redirectInput(Redirect) redirectInput}
* {@code (Redirect.from(file))}.
*
* @param file the new standard input source
* @return this process builder
* @since 1.7
*/
public ProcessBuilder redirectInput(File file) {
return redirectInput(Redirect.from(file));
}
/**
* Sets this process builder's standard output destination to a file.
*
* <p>This is a convenience method. An invocation of the form
* {@code redirectOutput(file)}
* behaves in exactly the same way as the invocation
* {@link #redirectOutput(Redirect) redirectOutput}
* {@code (Redirect.to(file))}.
*
* @param file the new standard output destination
* @return this process builder
* @since 1.7
*/
public ProcessBuilder redirectOutput(File file) {
return redirectOutput(Redirect.to(file));
}
/**
* Sets this process builder's standard error destination to a file.
*
* <p>This is a convenience method. An invocation of the form
* {@code redirectError(file)}
* behaves in exactly the same way as the invocation
* {@link #redirectError(Redirect) redirectError}
* {@code (Redirect.to(file))}.
*
* @param file the new standard error destination
* @return this process builder
* @since 1.7
*/
public ProcessBuilder redirectError(File file) {
return redirectError(Redirect.to(file));
}
/**
* Returns this process builder's standard input source.
*
* Subprocesses subsequently started by this object's {@link #start()}
* method obtain their standard input from this source.
* The initial value is {@link Redirect#PIPE Redirect.PIPE}.
*
* @return this process builder's standard input source
* @since 1.7
*/
public Redirect redirectInput() {
return (redirects == null) ? Redirect.PIPE : redirects[0];
}
/**
* Returns this process builder's standard output destination.
*
* Subprocesses subsequently started by this object's {@link #start()}
* method redirect their standard output to this destination.
* The initial value is {@link Redirect#PIPE Redirect.PIPE}.
*
* @return this process builder's standard output destination
* @since 1.7
*/
public Redirect redirectOutput() {
return (redirects == null) ? Redirect.PIPE : redirects[1];
}
/**
* Returns this process builder's standard error destination.
*
* Subprocesses subsequently started by this object's {@link #start()}
* method redirect their standard error to this destination.
* The initial value is {@link Redirect#PIPE Redirect.PIPE}.
*
* @return this process builder's standard error destination
* @since 1.7
*/
public Redirect redirectError() {
return (redirects == null) ? Redirect.PIPE : redirects[2];
}
/**
* Sets the source and destination for subprocess standard I/O
* to be the same as those of the current Java process.
*
* <p>This is a convenience method. An invocation of the form
* <pre> {@code
* pb.inheritIO()
* }</pre>
* behaves in exactly the same way as the invocation
* <pre> {@code
* pb.redirectInput(Redirect.INHERIT)
* .redirectOutput(Redirect.INHERIT)
* .redirectError(Redirect.INHERIT)
* }</pre>
*
* This gives behavior equivalent to most operating system
* command interpreters, or the standard C library function
* {@code system()}.
*
* @return this process builder
* @since 1.7
*/
public ProcessBuilder inheritIO() {
Arrays.fill(redirects(), Redirect.INHERIT);
return this;
}
/**
* Tells whether this process builder merges standard error and
* standard output.
*
* <p>If this property is {@code true}, then any error output
* generated by subprocesses subsequently started by this object's
* {@link #start()} method will be merged with the standard
* output, so that both can be read using the
* {@link Process#getInputStream()} method. This makes it easier
* to correlate error messages with the corresponding output.
* The initial value is {@code false}.
*
* @return this process builder's {@code redirectErrorStream} property
*/
public boolean redirectErrorStream() {
return redirectErrorStream;
}
/**
* Sets this process builder's {@code redirectErrorStream} property.
*
* <p>If this property is {@code true}, then any error output
* generated by subprocesses subsequently started by this object's
* {@link #start()} method will be merged with the standard
* output, so that both can be read using the
* {@link Process#getInputStream()} method. This makes it easier
* to correlate error messages with the corresponding output.
* The initial value is {@code false}.
*
* @param redirectErrorStream the new property value
* @return this process builder
*/
public ProcessBuilder redirectErrorStream(boolean redirectErrorStream) {
this.redirectErrorStream = redirectErrorStream;
return this;
}
/**
* Starts a new process using the attributes of this process builder.
*
* <p>The new process will
* invoke the command and arguments given by {@link #command()},
* in a working directory as given by {@link #directory()},
* with a process environment as given by {@link #environment()}.
*
* <p>This method checks that the command is a valid operating
* system command. Which commands are valid is system-dependent,
* but at the very least the command must be a non-empty list of
* non-null strings.
*
* <p>A minimal set of system dependent environment variables may
* be required to start a process on some operating systems.
* As a result, the subprocess may inherit additional environment variable
* settings beyond those in the process builder's {@link #environment()}.
*
* <p>If there is a security manager, its
* {@link SecurityManager#checkExec checkExec}
* method is called with the first component of this object's
* {@code command} array as its argument. This may result in
* a {@link SecurityException} being thrown.
*
* <p>Starting an operating system process is highly system-dependent.
* Among the many things that can go wrong are:
* <ul>
* <li>The operating system program file was not found.
* <li>Access to the program file was denied.
* <li>The working directory does not exist.
* </ul>
*
* <p>In such cases an exception will be thrown. The exact nature
* of the exception is system-dependent, but it will always be a
* subclass of {@link IOException}.
*
* <p>Subsequent modifications to this process builder will not
* affect the returned {@link Process}.
*
* @return a new {@link Process} object for managing the subprocess
*
* @throws NullPointerException
* if an element of the command list is null
*
* @throws IndexOutOfBoundsException
* if the command is an empty list (has size {@code 0})
*
* @throws SecurityException
* if a security manager exists and
* <ul>
*
* <li>its
* {@link SecurityManager#checkExec checkExec}
* method doesn't allow creation of the subprocess, or
*
* <li>the standard input to the subprocess was
* {@linkplain #redirectInput redirected from a file}
* and the security manager's
* {@link SecurityManager#checkRead checkRead} method
* denies read access to the file, or
*
* <li>the standard output or standard error of the
* subprocess was
* {@linkplain #redirectOutput redirected to a file}
* and the security manager's
* {@link SecurityManager#checkWrite checkWrite} method
* denies write access to the file
*
* </ul>
*
* @throws IOException if an I/O error occurs
*
* @see Runtime#exec(String[], String[], java.io.File)
*/
public Process start() throws IOException {
// Must convert to array first -- a malicious user-supplied
// list might try to circumvent the security check.
String[] cmdarray = command.toArray(new String[command.size()]);
cmdarray = cmdarray.clone();
for (String arg : cmdarray)
if (arg == null)
throw new NullPointerException();
// Throws IndexOutOfBoundsException if command is empty
String prog = cmdarray[0];
SecurityManager security = System.getSecurityManager();
if (security != null) {
security.checkExec(prog);
}
String dir = directory == null ? null : directory.toString();
try {
return ProcessImpl.start(cmdarray,
environment,
dir,
redirects,
redirectErrorStream);
} catch (IOException | IllegalArgumentException e) {
String exceptionInfo = ": " + e.getMessage();
Throwable cause = e;
if ((e instanceof IOException) && security != null) {
// Can not disclose the fail reason for read-protected files.
try {
security.checkRead(prog);
} catch (AccessControlException ace) {
exceptionInfo = "";
cause = ace;
}
}
// It's much easier for us to create a high-quality error
// message than the low-level C code which found the problem.
throw new IOException(
"Cannot run program \"" + prog + "\""
+ (dir == null ? "" : " (in directory \"" + dir + "\")")
+ exceptionInfo,
cause);
}
}
}
|
[] |
[] |
[] |
[]
|
[]
|
java
| 0 | 0 | |
yolo.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
"""
Run a YOLO_v3 style detection model on test images.
"""
import colorsys
import os
from timeit import default_timer as timer
import numpy as np
from keras import backend as K
from keras.models import load_model
from keras.layers import Input
from PIL import Image, ImageFont, ImageDraw
from yolo3.model import yolo_eval, yolo_body, tiny_yolo_body
from yolo3.utils import letterbox_image
import os
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
from keras.utils import multi_gpu_model
gpu_num=1
class YOLO(object):
def __init__(self):
self.model_path = 'model_data/yolo.h5' # model path or trained weights path
self.anchors_path = 'model_data/yolo_anchors.txt'
self.classes_path = 'model_data/coco_classes.txt'
self.score = 0.3
self.iou = 0.45
self.class_names = self._get_class()
self.anchors = self._get_anchors()
self.sess = K.get_session()
self.model_image_size = (416, 416) # fixed size or (None, None), hw
self.boxes, self.scores, self.classes = self.generate()
def _get_class(self):
classes_path = os.path.expanduser(self.classes_path)
with open(classes_path) as f:
class_names = f.readlines()
class_names = [c.strip() for c in class_names]
return class_names
def _get_anchors(self):
anchors_path = os.path.expanduser(self.anchors_path)
with open(anchors_path) as f:
anchors = f.readline()
anchors = [float(x) for x in anchors.split(',')]
return np.array(anchors).reshape(-1, 2)
def generate(self):
model_path = os.path.expanduser(self.model_path)
assert model_path.endswith('.h5'), 'Keras model or weights must be a .h5 file.'
# Load model, or construct model and load weights.
num_anchors = len(self.anchors)
num_classes = len(self.class_names)
is_tiny_version = num_anchors==6 # default setting
try:
self.yolo_model = load_model(model_path, compile=False)
except:
self.yolo_model = tiny_yolo_body(Input(shape=(None,None,3)), num_anchors//2, num_classes) \
if is_tiny_version else yolo_body(Input(shape=(None,None,3)), num_anchors//3, num_classes)
self.yolo_model.load_weights(self.model_path) # make sure model, anchors and classes match
else:
assert self.yolo_model.layers[-1].output_shape[-1] == \
num_anchors/len(self.yolo_model.output) * (num_classes + 5), \
'Mismatch between model and given anchor and class sizes'
print('{} model, anchors, and classes loaded.'.format(model_path))
# Generate colors for drawing bounding boxes.
hsv_tuples = [(x / len(self.class_names), 1., 1.)
for x in range(len(self.class_names))]
self.colors = list(map(lambda x: colorsys.hsv_to_rgb(*x), hsv_tuples))
self.colors = list(
map(lambda x: (int(x[0] * 255), int(x[1] * 255), int(x[2] * 255)),
self.colors))
np.random.seed(10101) # Fixed seed for consistent colors across runs.
np.random.shuffle(self.colors) # Shuffle colors to decorrelate adjacent classes.
np.random.seed(None) # Reset seed to default.
# Generate output tensor targets for filtered bounding boxes.
self.input_image_shape = K.placeholder(shape=(2, ))
if gpu_num>=2:
self.yolo_model = multi_gpu_model(self.yolo_model, gpus=gpu_num)
boxes, scores, classes = yolo_eval(self.yolo_model.output, self.anchors,
len(self.class_names), self.input_image_shape,
score_threshold=self.score, iou_threshold=self.iou)
return boxes, scores, classes
def detect_image(self, image):
start = timer()
if self.model_image_size != (None, None):
assert self.model_image_size[0]%32 == 0, 'Multiples of 32 required'
assert self.model_image_size[1]%32 == 0, 'Multiples of 32 required'
boxed_image = letterbox_image(image, tuple(reversed(self.model_image_size)))
else:
new_image_size = (image.width - (image.width % 32),
image.height - (image.height % 32))
boxed_image = letterbox_image(image, new_image_size)
image_data = np.array(boxed_image, dtype='float32')
print(image_data.shape)
image_data /= 255.
image_data = np.expand_dims(image_data, 0) # Add batch dimension.
out_boxes, out_scores, out_classes = self.sess.run(
[self.boxes, self.scores, self.classes],
feed_dict={
self.yolo_model.input: image_data,
self.input_image_shape: [image.size[1], image.size[0]],
K.learning_phase(): 0
})
print('Found {} boxes for {}'.format(len(out_boxes), 'img'))
font = ImageFont.truetype(font='font/FiraMono-Medium.otf',
size=np.floor(3e-2 * image.size[1] + 0.5).astype('int32'))
thickness = (image.size[0] + image.size[1]) // 300
for i, c in reversed(list(enumerate(out_classes))):
predicted_class = self.class_names[c]
box = out_boxes[i]
score = out_scores[i]
label = '{} {:.2f}'.format(predicted_class, score)
draw = ImageDraw.Draw(image)
label_size = draw.textsize(label, font)
top, left, bottom, right = box
top = max(0, np.floor(top + 0.5).astype('int32'))
left = max(0, np.floor(left + 0.5).astype('int32'))
bottom = min(image.size[1], np.floor(bottom + 0.5).astype('int32'))
right = min(image.size[0], np.floor(right + 0.5).astype('int32'))
print(label, (left, top), (right, bottom))
if top - label_size[1] >= 0:
text_origin = np.array([left, top - label_size[1]])
else:
text_origin = np.array([left, top + 1])
# My kingdom for a good redistributable image drawing library.
for i in range(thickness):
draw.rectangle(
[left + i, top + i, right - i, bottom - i],
outline=self.colors[c])
draw.rectangle(
[tuple(text_origin), tuple(text_origin + label_size)],
fill=self.colors[c])
draw.text(text_origin, label, fill=(0, 0, 0), font=font)
del draw
end = timer()
print(end - start)
return image
def close_session(self):
self.sess.close()
def detect_video(yolo, video_path, output_path=""):
import cv2
vid = cv2.VideoCapture(video_path)
if not vid.isOpened():
raise IOError("Couldn't open webcam or video")
video_FourCC = int(vid.get(cv2.CAP_PROP_FOURCC))
video_fps = vid.get(cv2.CAP_PROP_FPS)
video_size = (int(vid.get(cv2.CAP_PROP_FRAME_WIDTH)),
int(vid.get(cv2.CAP_PROP_FRAME_HEIGHT)))
isOutput = True if output_path != "" else False
if isOutput:
print("!!! TYPE:", type(output_path), type(video_FourCC), type(video_fps), type(video_size))
out = cv2.VideoWriter(output_path, video_FourCC, video_fps, video_size)
accum_time = 0
curr_fps = 0
fps = "FPS: ??"
prev_time = timer()
while True:
return_value, frame = vid.read()
image = Image.fromarray(frame)
image = yolo.detect_image(image)
result = np.asarray(image)
curr_time = timer()
exec_time = curr_time - prev_time
prev_time = curr_time
accum_time = accum_time + exec_time
curr_fps = curr_fps + 1
if accum_time > 1:
accum_time = accum_time - 1
fps = "FPS: " + str(curr_fps)
curr_fps = 0
cv2.putText(result, text=fps, org=(3, 15), fontFace=cv2.FONT_HERSHEY_SIMPLEX,
fontScale=0.50, color=(255, 0, 0), thickness=2)
cv2.namedWindow("result", cv2.WINDOW_NORMAL)
cv2.imshow("result", result)
if isOutput:
out.write(result)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
yolo.close_session()
def detect_img(yolo):
pic_file = r'./source/images/dog.jpg'
image = Image.open(pic_file)
r_image = yolo.detect_image(image)
r_image.show()
yolo.close_session()
# while True:
# img = pic_file
# try:
# image = Image.open(img)
# except:
# print('Open Error! Try again!')
# continue
# else:
# r_image = yolo.detect_image(image)
# r_image.show()
# yolo.close_session()
if __name__ == '__main__':
detect_img(YOLO())
|
[] |
[] |
[
"CUDA_VISIBLE_DEVICES"
] |
[]
|
["CUDA_VISIBLE_DEVICES"]
|
python
| 1 | 0 | |
test/helpers/oc/runner.go
|
package oc
import (
"bytes"
"fmt"
"os"
osexec "os/exec"
"strings"
"time"
"github.com/openshift/cluster-logging-operator/pkg/logger"
)
// Runner is for executing the command. It provides implementation for
// the methods in oc.Command interface.
// Other commands like oc.Exec, oc.Get, oc.Literal collect their arguments
// and use Runner to run the commad with arguments.
// It provides different modes of executing the commands, Run/RunFor/Output/OutputFor
//
// As fas as possible, it is to be kept independent of oc command syntax.
// TODO(vimalk78)
// Move KUBECONFIG out from here
// CMD is the command to be run by the runner
const CMD string = "oc"
// runner encapsulates os/exec/Cmd, collects args, and runs CMD
type runner struct {
*osexec.Cmd
args []string
configPath string
// This must be set by oc.Commands to collect arguments before calling Run
collectArgsFunc func() []string
tostdout bool
err error
}
func (r *runner) Run() (string, error) {
if r.err != nil {
return "composed command failed", r.err
}
r.setArgs(r.collectArgsFunc())
return r.runCmd()
}
func (r *runner) runCmd() (string, error) {
r.Cmd = osexec.Command(CMD, r.args...)
var outbuf bytes.Buffer
var errbuf bytes.Buffer
if r.tostdout {
r.Cmd.Stdout = os.Stdout
r.Cmd.Stderr = os.Stderr
} else {
r.Cmd.Stdout = &outbuf
r.Cmd.Stderr = &errbuf
}
r.Cmd.Env = []string{fmt.Sprintf("%s=%s", "KUBECONFIG", os.Getenv("KUBECONFIG"))}
logger.Infof("running: %s %s", r, strings.Join(r.args, " "))
err := r.Cmd.Run()
if err != nil {
if r.tostdout {
return "", err
}
errout := strings.TrimSpace(errbuf.String())
logger.Infof("output: %s, error: %v", errout, err)
return errout, err
}
if r.tostdout {
return "", nil
}
out := strings.TrimSpace(outbuf.String())
logger.Infof("output: %s", out)
return out, nil
}
func (r *runner) RunFor(d time.Duration) (string, error) {
time.AfterFunc(d, func() {
_ = r.Kill()
})
return r.Run()
}
func (r *runner) Kill() error {
if r.Process != nil {
return r.Process.Kill()
}
return nil
}
func (r *runner) Output() error {
r.tostdout = true
_, err := r.Run()
return err
}
func (r *runner) OutputFor(d time.Duration) error {
r.tostdout = true
_, err := r.RunFor(d)
return err
}
func (r *runner) String() string {
if r.configPath != "" {
return fmt.Sprintf("%s --config %s", CMD, r.configPath)
}
return CMD
}
func sanitizeArgStr(argstr string) string {
return strings.Join(sanitizeArgs(argstr), " ")
}
// sanitize the args, removes any unwanted spaces
func sanitizeArgs(argstr string) []string {
outargs := []string{}
args := strings.Split(argstr, " ")
for _, arg := range args {
arg = strings.TrimSpace(arg)
if arg != "" {
outargs = append(outargs, arg)
}
}
return outargs
}
func (r *runner) setArgs(args []string) {
r.args = args
}
func (r *runner) setArgsStr(argstr string) {
r.args = sanitizeArgs(argstr)
}
|
[
"\"KUBECONFIG\""
] |
[] |
[
"KUBECONFIG"
] |
[]
|
["KUBECONFIG"]
|
go
| 1 | 0 | |
server.py
|
import os
import elasticsearch
from flask import Flask
from flask_cors import CORS
from apies import apies_blueprint
#BASE = 'http://pipelines/data/{}_in_es/datapackage.json'
BASE = 'http://api.yodaat.org/data/{}_in_es/datapackage.json'
ES_HOST = os.environ.get('ES_HOST', 'localhost')
ES_PORT = int(os.environ.get('ES_PORT', '9200'))
# INDEX_NAME = os.environ.get('INDEX_NAME', 'migdar')
def rules(field):
if field.get('es:title') or field.get('es:hebrew'):
if field.get('es:keyword'):
return [('exact', '^10')]
else:
return [('inexact', '^3'), ('natural', '.hebrew^10')]
elif field.get('es:boost'):
if field.get('es:keyword'):
return [('exact', '^10')]
else:
return [('inexact', '^10')]
elif field.get('es:keyword'):
return [('exact', '')]
else:
return [('inexact', '')]
TYPES = [
'publications', 'orgs', 'datasets',
]
app = Flask(__name__)
CORS(app)
blueprint = apies_blueprint(app,
[BASE.format(t) for t in TYPES],
elasticsearch.Elasticsearch([dict(host=ES_HOST, port=ES_PORT)], timeout=60),
dict(
(t, 'migdar__%s' % t)
for t in TYPES
),
'migdar__docs',
multi_match_type='best_fields',
multi_match_operator='and',
dont_highlight='*',
text_field_rules=rules,
debug_queries=True,
)
app.register_blueprint(blueprint, url_prefix='/')
if __name__ == '__main__':
app.run()
|
[] |
[] |
[
"ES_PORT",
"ES_HOST",
"INDEX_NAME"
] |
[]
|
["ES_PORT", "ES_HOST", "INDEX_NAME"]
|
python
| 3 | 0 | |
fullApiExample.go
|
package main
import (
"errors"
"flag"
"fmt"
"io"
"io/ioutil"
"os"
"time"
"github.com/urfave/cli"
)
func init() {
cli.AppHelpTemplate += "\nCUSTOMIZED: you bet ur muffins\n"
cli.CommandHelpTemplate += "\nYMMV\n"
cli.SubcommandHelpTemplate += "\nor something\n"
cli.HelpFlag = cli.BoolFlag{Name: "halp"}
cli.BashCompletionFlag = cli.BoolFlag{Name: "compgen", Hidden: true}
cli.VersionFlag = cli.BoolFlag{Name: "print-version, V"}
cli.HelpPrinter = func(w io.Writer, templ string, data interface{}) {
fmt.Fprintf(w, "best of luck to you\n")
}
cli.VersionPrinter = func(c *cli.Context) {
fmt.Fprintf(c.App.Writer, "version=%s\n", c.App.Version)
}
cli.OsExiter = func(c int) {
fmt.Fprintf(cli.ErrWriter, "refusing to exit %d\n", c)
}
cli.ErrWriter = ioutil.Discard
cli.FlagStringer = func(fl cli.Flag) string {
return fmt.Sprintf("\t\t%s", fl.GetName())
}
}
type hexWriter struct{}
func (w *hexWriter) Write(p []byte) (int, error) {
for _, b := range p {
fmt.Printf("%x", b)
}
fmt.Printf("\n")
return len(p), nil
}
type genericType struct{
s string
}
func (g *genericType) Set(value string) error {
g.s = value
return nil
}
func (g *genericType) String() string {
return g.s
}
func fullApiExample() {
app := cli.NewApp()
app.Name = "kənˈtrīv"
app.Version = "19.99.0"
app.Compiled = time.Now()
app.Authors = []cli.Author{
cli.Author{
Name: "Example Human",
Email: "[email protected]",
},
}
app.Copyright = "(c) 1999 Serious Enterprise"
app.HelpName = "contrive"
app.Usage = "demonstrate available API"
app.UsageText = "contrive - demonstrating the available API"
app.ArgsUsage = "[args and such]"
app.Commands = []cli.Command{
cli.Command{
Name: "doo",
Aliases: []string{"do"},
Category: "motion",
Usage: "do the doo",
UsageText: "doo - does the dooing",
Description: "no really, there is a lot of dooing to be done",
ArgsUsage: "[arrgh]",
Flags: []cli.Flag{
cli.BoolFlag{Name: "forever, forevvarr"},
},
Subcommands: cli.Commands{
cli.Command{
Name: "wop",
Action: wopAction,
},
},
SkipFlagParsing: false,
HideHelp: false,
Hidden: false,
HelpName: "doo!",
BashComplete: func(c *cli.Context) {
fmt.Fprintf(c.App.Writer, "--better\n")
},
Before: func(c *cli.Context) error {
fmt.Fprintf(c.App.Writer, "brace for impact\n")
return nil
},
After: func(c *cli.Context) error {
fmt.Fprintf(c.App.Writer, "did we lose anyone?\n")
return nil
},
Action: func(c *cli.Context) error {
c.Command.FullName()
c.Command.HasName("wop")
c.Command.Names()
c.Command.VisibleFlags()
fmt.Fprintf(c.App.Writer, "dodododododoodododddooooododododooo\n")
if c.Bool("forever") {
c.Command.Run(c)
}
return nil
},
OnUsageError: func(c *cli.Context, err error, isSubcommand bool) error {
fmt.Fprintf(c.App.Writer, "for shame\n")
return err
},
},
}
app.Flags = []cli.Flag{
cli.BoolFlag{Name: "fancy"},
cli.BoolTFlag{Name: "fancier"},
cli.DurationFlag{Name: "howlong, H", Value: time.Second * 3},
cli.Float64Flag{Name: "howmuch"},
cli.GenericFlag{Name: "wat", Value: &genericType{}},
cli.Int64Flag{Name: "longdistance"},
cli.Int64SliceFlag{Name: "intervals"},
cli.IntFlag{Name: "distance"},
cli.IntSliceFlag{Name: "times"},
cli.StringFlag{Name: "dance-move, d"},
cli.StringSliceFlag{Name: "names, N"},
cli.UintFlag{Name: "age"},
cli.Uint64Flag{Name: "bigage"},
}
app.EnableBashCompletion = true
app.HideHelp = false
app.HideVersion = false
app.BashComplete = func(c *cli.Context) {
fmt.Fprintf(c.App.Writer, "lipstick\nkiss\nme\nlipstick\nringo\n")
}
app.Before = func(c *cli.Context) error {
fmt.Fprintf(c.App.Writer, "HEEEERE GOES\n")
return nil
}
app.After = func(c *cli.Context) error {
fmt.Fprintf(c.App.Writer, "Phew!\n")
return nil
}
app.CommandNotFound = func(c *cli.Context, command string) {
fmt.Fprintf(c.App.Writer, "Thar be no %q here.\n", command)
}
app.OnUsageError = func(c *cli.Context, err error, isSubcommand bool) error {
if isSubcommand {
return err
}
fmt.Fprintf(c.App.Writer, "WRONG: %#v\n", err)
return nil
}
app.Action = func(c *cli.Context) error {
cli.DefaultAppComplete(c)
cli.HandleExitCoder(errors.New("not an exit coder, though"))
cli.ShowAppHelp(c)
cli.ShowCommandCompletions(c, "nope")
cli.ShowCommandHelp(c, "also-nope")
cli.ShowCompletions(c)
cli.ShowSubcommandHelp(c)
cli.ShowVersion(c)
categories := c.App.Categories()
categories.AddCommand("sounds", cli.Command{
Name: "bloop",
})
for _, category := range c.App.Categories() {
fmt.Fprintf(c.App.Writer, "%s\n", category.Name)
fmt.Fprintf(c.App.Writer, "%#v\n", category.Commands)
fmt.Fprintf(c.App.Writer, "%#v\n", category.VisibleCommands())
}
fmt.Printf("%#v\n", c.App.Command("doo"))
if c.Bool("infinite") {
c.App.Run([]string{"app", "doo", "wop"})
}
if c.Bool("forevar") {
c.App.RunAsSubcommand(c)
}
c.App.Setup()
fmt.Printf("%#v\n", c.App.VisibleCategories())
fmt.Printf("%#v\n", c.App.VisibleCommands())
fmt.Printf("%#v\n", c.App.VisibleFlags())
fmt.Printf("%#v\n", c.Args().First())
if len(c.Args()) > 0 {
fmt.Printf("%#v\n", c.Args()[1])
}
fmt.Printf("%#v\n", c.Args().Present())
fmt.Printf("%#v\n", c.Args().Tail())
set := flag.NewFlagSet("contrive", 0)
nc := cli.NewContext(c.App, set, c)
fmt.Printf("%#v\n", nc.Args())
fmt.Printf("%#v\n", nc.Bool("nope"))
fmt.Printf("%#v\n", nc.BoolT("nerp"))
fmt.Printf("%#v\n", nc.Duration("howlong"))
fmt.Printf("%#v\n", nc.Float64("hay"))
fmt.Printf("%#v\n", nc.Generic("bloop"))
fmt.Printf("%#v\n", nc.Int64("bonk"))
fmt.Printf("%#v\n", nc.Int64Slice("burnks"))
fmt.Printf("%#v\n", nc.Int("bips"))
fmt.Printf("%#v\n", nc.IntSlice("blups"))
fmt.Printf("%#v\n", nc.String("snurt"))
fmt.Printf("%#v\n", nc.StringSlice("snurkles"))
fmt.Printf("%#v\n", nc.Uint("flub"))
fmt.Printf("%#v\n", nc.Uint64("florb"))
fmt.Printf("%#v\n", nc.GlobalBool("global-nope"))
fmt.Printf("%#v\n", nc.GlobalBoolT("global-nerp"))
fmt.Printf("%#v\n", nc.GlobalDuration("global-howlong"))
fmt.Printf("%#v\n", nc.GlobalFloat64("global-hay"))
fmt.Printf("%#v\n", nc.GlobalGeneric("global-bloop"))
fmt.Printf("%#v\n", nc.GlobalInt("global-bips"))
fmt.Printf("%#v\n", nc.GlobalIntSlice("global-blups"))
fmt.Printf("%#v\n", nc.GlobalString("global-snurt"))
fmt.Printf("%#v\n", nc.GlobalStringSlice("global-snurkles"))
fmt.Printf("%#v\n", nc.FlagNames())
fmt.Printf("%#v\n", nc.GlobalFlagNames())
fmt.Printf("%#v\n", nc.GlobalIsSet("wat"))
fmt.Printf("%#v\n", nc.GlobalSet("wat", "nope"))
fmt.Printf("%#v\n", nc.NArg())
fmt.Printf("%#v\n", nc.NumFlags())
fmt.Printf("%#v\n", nc.Parent())
nc.Set("wat", "also-nope")
ec := cli.NewExitError("ohwell", 86)
fmt.Fprintf(c.App.Writer, "%d", ec.ExitCode())
fmt.Printf("made it!\n")
return nil
}
if os.Getenv("HEXY") != "" {
app.Writer = &hexWriter{}
app.ErrWriter = &hexWriter{}
}
app.Metadata = map[string]interface{}{
"layers": "many",
"explicable": false,
"whatever-values": 19.99,
}
// ignore error so we don't exit non-zero and break gfmrun README example tests
_ = app.Run(os.Args)
}
func wopAction(c *cli.Context) error {
fmt.Fprintf(c.App.Writer, ":wave: over here, eh\n")
return nil
}
|
[
"\"HEXY\""
] |
[] |
[
"HEXY"
] |
[]
|
["HEXY"]
|
go
| 1 | 0 | |
keras/esim_online.py
|
import os
os.environ["CUDA_VISIBLE_DEVICES"] = "3"
import pandas as pd
import numpy as np
from tqdm import tqdm
import time
import logging
from sklearn.model_selection import StratifiedKFold
from keras_bert import load_trained_model_from_checkpoint, Tokenizer
from keras.optimizers import Adam
import pandas as pd
from sklearn.metrics import mean_absolute_error, accuracy_score, f1_score
from keras.layers import *
from keras.models import Model
import keras.backend as K
from keras.callbacks import Callback, EarlyStopping, ModelCheckpoint
from keras.activations import softmax
learning_rate = 5e-5
min_learning_rate = 1e-5
batch_size =32
val_batch_size = 512
pred_batch_size = 512
percent_of_epoch = 0.25 * 0.05
num_epochs = 7 //percent_of_epoch
patience = 3
model_path= "./model/"
nfolds=5
bert_path = "/home/mhxia/workspace/BDCI/chinese_wwm_ext_L-12_H-768_A-12/"
config_path = bert_path + 'bert_config.json'
checkpoint_path = bert_path + 'bert_model.ckpt'
dict_path = bert_path + 'vocab.txt'
MAX_LEN = 64
token_dict = {}
with open(dict_path, 'r', encoding='utf-8') as reader:
for line in reader:
token = line.strip()
token_dict[token] = len(token_dict)
tokenizer = Tokenizer(token_dict)
train= pd.read_csv('./data/train_set.csv')
test=pd.read_csv('./data/dev_set.csv',sep='\t')
train_achievements = train['question1'].values
train_requirements = train['question2'].values
labels = train['label'].values
def label_process(x):
if x==0:
return [1,0]
else:
return [0,1]
train['label']=train['label'].apply(label_process)
labels_cat=list(train['label'].values)
labels_cat=np.array(labels_cat)
test_achievements = test['question1'].values
test_requirements = test['question2'].values
print(train.shape,test.shape)
class data_generator:
def __init__(self, data, batch_size=32):
self.data = data
self.batch_size = batch_size
self.steps = len(self.data[0]) // self.batch_size
if len(self.data[0]) % self.batch_size != 0:
self.steps += 1
def __len__(self):
return self.steps
def __iter__(self):
while True:
X1, X2, y = self.data
idxs = list(range(len(self.data[0])))
np.random.shuffle(idxs)
T, T_, Y = [], [], []
for c, i in enumerate(idxs):
achievements = X1[i]
requirements = X2[i]
t, t_ = tokenizer.encode(first=achievements, second=requirements, max_len=MAX_LEN)
T.append(t)
T_.append(t_)
Y.append(y[i])
if len(T) == self.batch_size or i == idxs[-1]:
T = np.array(T)
T_ = np.array(T_)
Y = np.array(Y)
yield [T, T_], Y
T, T_, Y = [], [], []
class test_data_generator:
def __init__(self, data, batch_size=32):
self.data = data
self.batch_size = batch_size
self.steps = len(self.data[0]) // self.batch_size
if len(self.data[0]) % self.batch_size != 0:
self.steps += 1
def __len__(self):
return self.steps
def __iter__(self):
while True:
X1, X2 = self.data
idxs = list(range(len(self.data[0])))
np.random.shuffle(idxs)
T, T_ = [], []
for c, i in enumerate(idxs):
print(c)
achievements = X1[i]
requirements = X2[i]
t, t_ = tokenizer.encode(first=achievements, second=requirements, max_len=MAX_LEN)
T.append(t)
T_.append(t_)
if len(T) == self.batch_size or i == idxs[-1]:
T = np.array(T)
T_ = np.array(T_)
yield [T, T_]
T, T_ = [], []
#####################################################
def apply_multiple(input_, layers):
if not len(layers) > 1:
raise ValueError('Layers list should contain more than 1 layer')
else:
agg_ = []
for layer in layers:
agg_.append(layer(input_))
out_ = Concatenate()(agg_)
return out_
def unchanged_shape(input_shape):
return input_shape
def substract(input_1, input_2):
neg_input_2 = Lambda(lambda x: -x, output_shape=unchanged_shape)(input_2)
out_ = Add()([input_1, neg_input_2])
return out_
def submult(input_1, input_2):
mult = Multiply()([input_1, input_2])
sub = substract(input_1, input_2)
out_ = Concatenate()([sub, mult])
return out_
def soft_attention_alignment(input_1, input_2):
attention = Dot(axes=-1)([input_1, input_2])
w_att_1 = Lambda(lambda x: softmax(x, axis=1), ##soft max to each column
output_shape=unchanged_shape)(attention)
w_att_2 = Permute((2, 1))(Lambda(lambda x: softmax(x, axis=2), ## axis =2 soft max to each row
output_shape=unchanged_shape)(attention))
in1_aligned = Dot(axes=1)([w_att_1, input_1])
in2_aligned = Dot(axes=1)([w_att_2, input_2])
return in1_aligned, in2_aligned
def focal_loss(y_true, y_pred, alpha=0.25, gamma=2.):
y_pred = K.clip(y_pred, 1e-8, 1 - 1e-8)
return - alpha * y_true * K.log(y_pred) * (1 - y_pred)**gamma\
- (1 - alpha) * (1 - y_true) * K.log(1 - y_pred) * y_pred**gamma
def get_model():
bert_model = load_trained_model_from_checkpoint(config_path, checkpoint_path)
# for l in bert_model.layers:
# l.trainable = True
T1 = Input(shape=(None,))
T2 = Input(shape=(None,))
tp1 = Lambda(lambda x: K.zeros_like(x))(T1)
tp2 = Lambda(lambda x: K.zeros_like(x))(T2)
x1 = bert_model([T1, tp1])
x2 = bert_model([T2, tp2])
X1 = Lambda(lambda x: x[:, 0:-1])(x1)
X2 = Lambda(lambda x: x[:, 0:-1])(x2)
encode = Bidirectional(LSTM(200, return_sequences=True))
q1_encoded = encode(X1)
q2_encoded = encode(X2)
q1_aligned, q2_aligned = soft_attention_alignment(q1_encoded, q2_encoded)
q1_combined = Concatenate()([q1_encoded, q2_aligned, submult(q1_encoded, q2_aligned)])
q2_combined = Concatenate()([q2_encoded, q1_aligned, submult(q2_encoded, q1_aligned)])
compose = Bidirectional(GRU(200, return_sequences=True))
q1_compare = compose(q1_combined)
q2_compare = compose(q2_combined)
# Aggregate
q1_rep = apply_multiple(q1_compare, [GlobalAvgPool1D(), GlobalMaxPool1D()])
q2_rep = apply_multiple(q2_compare, [GlobalAvgPool1D(), GlobalMaxPool1D()])
# Classifier
merged = Concatenate()([q1_rep, q2_rep])
dense = BatchNormalization()(merged)
dense = Dense(30, activation='selu')(dense)
dense = BatchNormalization()(dense)
output = Dense(2, activation='softmax')(dense)
model = Model([T1, T2], output)
model.compile(
# loss='categorical_crossentropy',
loss=focal_loss,
optimizer=Adam(1e-3), # 用足够小的学习率
metrics=['accuracy']
)
model.summary()
return model
skf = StratifiedKFold(n_splits=nfolds, shuffle=True, random_state=42)
oof_train = np.zeros((len(train), 2), dtype=np.float32)
oof_test = np.zeros((len(test), 2), dtype=np.float32)
for fold, (train_index, valid_index) in enumerate(skf.split(train_achievements, labels)):
x1 = train_achievements[train_index]
x2 = train_requirements[train_index]
y = labels_cat[train_index]
val_x1 = train_achievements[valid_index]
val_x2 = train_requirements[valid_index]
val_y = labels_cat[valid_index]
train_D = data_generator([x1, x2, y], batch_size)
val_D = data_generator([val_x1, val_x2, val_y], val_batch_size)
early_stopping = EarlyStopping(monitor='val_accuracy', patience=patience, verbose=1)
model_checkpoint = ModelCheckpoint(model_path+"model_%s.w"%fold, monitor='val_accuracy', verbose=1,save_best_only=True, save_weights_only=False, mode='auto')
model = get_model()
model.fit_generator(train_D.__iter__(),
steps_per_epoch=len(train_D) * percent_of_epoch,
epochs=num_epochs,
validation_data= val_D.__iter__(),
validation_steps = len(val_D) ,
verbose=1,
callbacks=[early_stopping, model_checkpoint]
)
# model.load_weights('bert{}.w'.format(fold))
pred_D = test_data_generator([test_achievements, test_requirements], pred_batch_size)
oof_test += model.predict(pred_D.__iter__(), verbose=1, steps=len(pred_D))
K.clear_session()
oof_test /= nfolds
test=pd.DataFrame(oof_test)
test.to_csv('test_pred.csv',index=False)
test.head(),test.shape
train=pd.DataFrame(oof_train)
train.to_csv('train_pred.csv',index=False)
pred=pd.read_csv('test_pred.csv').values
pred=pred.argmax(axis=1)
sub=pd.DataFrame()
sub['pred']=list(pred)
sub.to_csv('sub.csv',sep='\t',header=None)
|
[] |
[] |
[
"CUDA_VISIBLE_DEVICES"
] |
[]
|
["CUDA_VISIBLE_DEVICES"]
|
python
| 1 | 0 | |
src/python/pants/bin/daemon_pants_runner.py
|
# coding=utf-8
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import absolute_import, division, print_function, unicode_literals
import datetime
import os
import sys
import termios
import time
from builtins import open, zip
from contextlib import contextmanager
from future.utils import PY3, raise_with_traceback
from setproctitle import setproctitle as set_process_title
from pants.base.build_environment import get_buildroot
from pants.base.exception_sink import ExceptionSink
from pants.base.exiter import Exiter
from pants.bin.local_pants_runner import LocalPantsRunner
from pants.init.util import clean_global_runtime_state
from pants.java.nailgun_io import NailgunStreamStdinReader, NailgunStreamWriter
from pants.java.nailgun_protocol import ChunkType, NailgunProtocol
from pants.pantsd.process_manager import ProcessManager
from pants.rules.core.exceptions import GracefulTerminationException
from pants.util.contextutil import hermetic_environment_as, stdio_as
from pants.util.socket import teardown_socket
class DaemonExiter(Exiter):
"""An Exiter that emits unhandled tracebacks and exit codes via the Nailgun protocol."""
def __init__(self, socket):
# N.B. Assuming a fork()'d child, cause os._exit to be called here to avoid the routine
# sys.exit behavior.
# TODO: The behavior we're avoiding with the use of os._exit should be described and tested.
super(DaemonExiter, self).__init__(exiter=os._exit)
self._socket = socket
self._finalizer = None
def set_finalizer(self, finalizer):
"""Sets a finalizer that will be called before exiting."""
self._finalizer = finalizer
def exit(self, result=0, msg=None, *args, **kwargs):
"""Exit the runtime."""
if self._finalizer:
try:
self._finalizer()
except Exception as e:
try:
NailgunProtocol.send_stderr(
self._socket,
'\nUnexpected exception in finalizer: {!r}\n'.format(e)
)
except Exception:
pass
try:
# Write a final message to stderr if present.
if msg:
NailgunProtocol.send_stderr(self._socket, msg)
# Send an Exit chunk with the result.
NailgunProtocol.send_exit_with_code(self._socket, result)
# Shutdown the connected socket.
teardown_socket(self._socket)
finally:
super(DaemonExiter, self).exit(result=result, *args, **kwargs)
class DaemonPantsRunner(ProcessManager):
"""A daemonizing PantsRunner that speaks the nailgun protocol to a remote client.
N.B. this class is primarily used by the PailgunService in pantsd.
"""
@classmethod
def create(cls, sock, args, env, services, scheduler_service):
try:
# N.B. This will temporarily redirect stdio in the daemon's pre-fork context
# to the nailgun session. We'll later do this a second time post-fork, because
# threads.
with cls.nailgunned_stdio(sock, env, handle_stdin=False):
options, _, options_bootstrapper = LocalPantsRunner.parse_options(args, env)
subprocess_dir = options.for_global_scope().pants_subprocessdir
graph_helper, target_roots = scheduler_service.prefork(options, options_bootstrapper)
deferred_exc = None
except Exception:
deferred_exc = sys.exc_info()
graph_helper = None
target_roots = None
options_bootstrapper = None
# N.B. This will be overridden with the correct value if options
# parsing is successful - otherwise it permits us to run just far
# enough to raise the deferred exception.
subprocess_dir = os.path.join(get_buildroot(), '.pids')
return cls(
sock,
args,
env,
graph_helper,
target_roots,
services,
subprocess_dir,
options_bootstrapper,
deferred_exc
)
def __init__(self, socket, args, env, graph_helper, target_roots, services,
metadata_base_dir, options_bootstrapper, deferred_exc):
"""
:param socket socket: A connected socket capable of speaking the nailgun protocol.
:param list args: The arguments (i.e. sys.argv) for this run.
:param dict env: The environment (i.e. os.environ) for this run.
:param LegacyGraphSession graph_helper: The LegacyGraphSession instance to use for BuildGraph
construction. In the event of an exception, this will be
None.
:param TargetRoots target_roots: The `TargetRoots` for this run.
:param PantsServices services: The PantsServices that are currently running.
:param str metadata_base_dir: The ProcessManager metadata_base_dir from options.
:param OptionsBootstrapper options_bootstrapper: An OptionsBootstrapper to reuse.
:param Exception deferred_exception: A deferred exception from the daemon's pre-fork context.
If present, this will be re-raised in the client context.
"""
super(DaemonPantsRunner, self).__init__(
name=self._make_identity(),
metadata_base_dir=metadata_base_dir
)
self._socket = socket
self._args = args
self._env = env
self._graph_helper = graph_helper
self._target_roots = target_roots
self._services = services
self._options_bootstrapper = options_bootstrapper
self._deferred_exception = deferred_exc
self._exiter = DaemonExiter(socket)
def _make_identity(self):
"""Generate a ProcessManager identity for a given pants run.
This provides for a reasonably unique name e.g. 'pantsd-run-2015-09-16T23_17_56_581899'.
"""
return 'pantsd-run-{}'.format(datetime.datetime.now().strftime('%Y-%m-%dT%H_%M_%S_%f'))
@classmethod
@contextmanager
def _tty_stdio(cls, env):
"""Handles stdio redirection in the case of all stdio descriptors being the same tty."""
# If all stdio is a tty, there's only one logical I/O device (the tty device). This happens to
# be addressable as a file in OSX and Linux, so we take advantage of that and directly open the
# character device for output redirection - eliminating the need to directly marshall any
# interactive stdio back/forth across the socket and permitting full, correct tty control with
# no middle-man.
stdin_ttyname, stdout_ttyname, stderr_ttyname = NailgunProtocol.ttynames_from_env(env)
assert stdin_ttyname == stdout_ttyname == stderr_ttyname, (
'expected all stdio ttys to be the same, but instead got: {}\n'
'please file a bug at http://github.com/pantsbuild/pants'
.format([stdin_ttyname, stdout_ttyname, stderr_ttyname])
)
with open(stdin_ttyname, 'rb+', 0) as tty:
tty_fileno = tty.fileno()
with stdio_as(stdin_fd=tty_fileno, stdout_fd=tty_fileno, stderr_fd=tty_fileno):
def finalizer():
termios.tcdrain(tty_fileno)
yield finalizer
@classmethod
@contextmanager
def _pipe_stdio(cls, sock, stdin_isatty, stdout_isatty, stderr_isatty, handle_stdin):
"""Handles stdio redirection in the case of pipes and/or mixed pipes and ttys."""
stdio_writers = (
(ChunkType.STDOUT, stdout_isatty),
(ChunkType.STDERR, stderr_isatty)
)
types, ttys = zip(*(stdio_writers))
@contextmanager
def maybe_handle_stdin(want):
if want:
# TODO: Launching this thread pre-fork to handle @rule input currently results
# in an unhandled SIGILL in `src/python/pants/engine/scheduler.py, line 313 in pre_fork`.
# More work to be done here in https://github.com/pantsbuild/pants/issues/6005
with NailgunStreamStdinReader.open(sock, stdin_isatty) as fd:
yield fd
else:
with open('/dev/null', 'rb') as fh:
yield fh.fileno()
with maybe_handle_stdin(handle_stdin) as stdin_fd,\
NailgunStreamWriter.open_multi(sock, types, ttys) as ((stdout_fd, stderr_fd), writer),\
stdio_as(stdout_fd=stdout_fd, stderr_fd=stderr_fd, stdin_fd=stdin_fd):
# N.B. This will be passed to and called by the `DaemonExiter` prior to sending an
# exit chunk, to avoid any socket shutdown vs write races.
stdout, stderr = sys.stdout, sys.stderr
def finalizer():
try:
stdout.flush()
stderr.flush()
finally:
time.sleep(.001) # HACK: Sleep 1ms in the main thread to free the GIL.
writer.stop()
writer.join()
stdout.close()
stderr.close()
yield finalizer
@classmethod
@contextmanager
def nailgunned_stdio(cls, sock, env, handle_stdin=True):
"""Redirects stdio to the connected socket speaking the nailgun protocol."""
# Determine output tty capabilities from the environment.
stdin_isatty, stdout_isatty, stderr_isatty = NailgunProtocol.isatty_from_env(env)
is_tty_capable = all((stdin_isatty, stdout_isatty, stderr_isatty))
if is_tty_capable:
with cls._tty_stdio(env) as finalizer:
yield finalizer
else:
with cls._pipe_stdio(
sock,
stdin_isatty,
stdout_isatty,
stderr_isatty,
handle_stdin
) as finalizer:
yield finalizer
# TODO: there's no testing for this method, and this caused a user-visible failure -- see #7008!
def _raise_deferred_exc(self):
"""Raises deferred exceptions from the daemon's synchronous path in the post-fork client."""
if self._deferred_exception:
try:
exc_type, exc_value, exc_traceback = self._deferred_exception
raise_with_traceback(exc_value, exc_traceback)
except TypeError:
# If `_deferred_exception` isn't a 3-item tuple (raising a TypeError on the above
# destructuring), treat it like a bare exception.
raise self._deferred_exception
def _maybe_get_client_start_time_from_env(self, env):
client_start_time = env.pop('PANTSD_RUNTRACKER_CLIENT_START_TIME', None)
return None if client_start_time is None else float(client_start_time)
def run(self):
"""Fork, daemonize and invoke self.post_fork_child() (via ProcessManager).
The scheduler has thread pools which need to be re-initialized after a fork: this ensures that
when the pantsd-runner forks from pantsd, there is a working pool for any work that happens
in that child process.
"""
fork_context = self._graph_helper.scheduler_session.with_fork_context if self._graph_helper else None
self.daemonize(write_pid=False, fork_context=fork_context)
def pre_fork(self):
# Mark all services pausing (to allow them to concurrently pause), and then wait for them
# to have paused.
# NB: PailgunServer ensures that the entire run occurs under the lifecycle_lock.
for service in self._services.services:
service.mark_pausing()
for service in self._services.services:
service.await_paused()
def post_fork_parent(self):
# NB: PailgunServer ensures that the entire run occurs under the lifecycle_lock.
for service in self._services.services:
service.resume()
def post_fork_child(self):
"""Post-fork child process callback executed via ProcessManager.daemonize()."""
# Set the Exiter exception hook post-fork so as not to affect the pantsd processes exception
# hook with socket-specific behavior. Note that this intentionally points the faulthandler
# trace stream to sys.stderr, which at this point is still a _LoggerStream object writing to
# the `pantsd.log`. This ensures that in the event of e.g. a hung but detached pantsd-runner
# process that the stacktrace output lands deterministically in a known place vs to a stray
# terminal window.
# TODO: test the above!
ExceptionSink.reset_exiter(self._exiter)
ExceptionSink.reset_interactive_output_stream(sys.stderr.buffer if PY3 else sys.stderr)
# Ensure anything referencing sys.argv inherits the Pailgun'd args.
sys.argv = self._args
# Set context in the process title.
set_process_title('pantsd-runner [{}]'.format(' '.join(self._args)))
# Broadcast our process group ID (in PID form - i.e. negated) to the remote client so
# they can send signals (e.g. SIGINT) to all processes in the runners process group.
NailgunProtocol.send_pid(self._socket, os.getpid())
NailgunProtocol.send_pgrp(self._socket, os.getpgrp() * -1)
# Stop the services that were paused pre-fork.
for service in self._services.services:
service.terminate()
# Invoke a Pants run with stdio redirected and a proxied environment.
with self.nailgunned_stdio(self._socket, self._env) as finalizer,\
hermetic_environment_as(**self._env):
try:
# Setup the Exiter's finalizer.
self._exiter.set_finalizer(finalizer)
# Clean global state.
clean_global_runtime_state(reset_subsystem=True)
# Re-raise any deferred exceptions, if present.
self._raise_deferred_exc()
# Otherwise, conduct a normal run.
runner = LocalPantsRunner.create(
self._exiter,
self._args,
self._env,
self._target_roots,
self._graph_helper,
self._options_bootstrapper
)
runner.set_start_time(self._maybe_get_client_start_time_from_env(self._env))
runner.run()
except KeyboardInterrupt:
self._exiter.exit_and_fail('Interrupted by user.\n')
except GracefulTerminationException as e:
ExceptionSink.log_exception(
'Encountered graceful termination exception {}; exiting'.format(e))
self._exiter.exit(e.exit_code)
except Exception:
ExceptionSink._log_unhandled_exception_and_exit()
else:
self._exiter.exit(0)
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
appengine/standard/pubsub/main.py
|
# Copyright 2018 Google, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# [START app]
import base64
import json
import logging
import os
from flask import current_app, Flask, render_template, request
from googleapiclient.discovery import build
app = Flask(__name__)
# Configure the following environment variables via app.yaml
# This is used in the push request handler to verify that the request came from
# pubsub and originated from a trusted source.
app.config['PUBSUB_VERIFICATION_TOKEN'] = \
os.environ['PUBSUB_VERIFICATION_TOKEN']
app.config['PUBSUB_TOPIC'] = os.environ['PUBSUB_TOPIC']
app.config['GCLOUD_PROJECT'] = os.environ['GOOGLE_CLOUD_PROJECT']
# Global list to storage messages received by this instance.
MESSAGES = []
# [START index]
@app.route('/', methods=['GET', 'POST'])
def index():
if request.method == 'GET':
return render_template('index.html', messages=MESSAGES)
data = request.form.get('payload', 'Example payload').encode('utf-8')
service = build('pubsub', 'v1')
topic_path = 'projects/{project_id}/topics/{topic}'.format(
project_id=app.config['GCLOUD_PROJECT'],
topic=app.config['PUBSUB_TOPIC']
)
service.projects().topics().publish(
topic=topic_path, body={
"messages": [{
"data": base64.b64encode(data)
}]
}).execute()
return 'OK', 200
# [END index]
# [START push]
@app.route('/_ah/push-handlers/receive_messages', methods=['POST'])
def receive_messages_handler():
if (request.args.get('token', '') !=
current_app.config['PUBSUB_VERIFICATION_TOKEN']):
return 'Invalid request', 400
envelope = json.loads(request.get_data().decode('utf-8'))
payload = base64.b64decode(envelope['message']['data'])
MESSAGES.append(payload)
# Returning any 2xx status indicates successful receipt of the message.
return 'OK', 200
# [END push]
@app.errorhandler(500)
def server_error(e):
logging.exception('An error occurred during a request.')
return """
An internal error occurred: <pre>{}</pre>
See logs for full stacktrace.
""".format(e), 500
if __name__ == '__main__':
# This is used when running locally. Gunicorn is used to run the
# application on Google App Engine. See entrypoint in app.yaml.
app.run(host='127.0.0.1', port=8080, debug=True)
# [END app]
|
[] |
[] |
[
"GOOGLE_CLOUD_PROJECT",
"PUBSUB_VERIFICATION_TOKEN",
"PUBSUB_TOPIC"
] |
[]
|
["GOOGLE_CLOUD_PROJECT", "PUBSUB_VERIFICATION_TOKEN", "PUBSUB_TOPIC"]
|
python
| 3 | 0 | |
shop/handlers/shop_handlers.go
|
package handlers
import (
"encoding/json"
"fmt"
"net/http"
"sync"
"github.com/go-chi/chi/v5"
"github.com/naim6246/grpc-GO/param"
"github.com/naim6246/grpc-GO/shop/models"
"github.com/naim6246/grpc-GO/shop/services"
)
var Wg sync.WaitGroup
type ShopHandler struct {
shopService *services.ShopService
}
func NewShopHandler(shopService *services.ShopService) *ShopHandler {
return &ShopHandler{
shopService: shopService,
}
}
func (h *ShopHandler) Handler() {
router := chi.NewRouter()
router.Route("/shop", func(router chi.Router) {
router.Get("/", h.getAllShop)
router.Post("/", h.createShop)
router.Route("/{shopId}", func(r chi.Router) {
router.Get("/", h.getShopById)
router.Get("/details", h.getShopDetails)
router.Get("/products",h.getShopProducts)
})
})
fmt.Println("serving api server on port: 8083")
http.ListenAndServe(":8083", router)
Wg.Done()
}
func (h *ShopHandler) createShop(w http.ResponseWriter, r *http.Request) {
var shop models.Shop
if err := json.NewDecoder(r.Body).Decode(&shop); err != nil {
w.WriteHeader(http.StatusBadRequest)
json.NewEncoder(w).Encode(err)
return
}
createdShop, err := h.shopService.Create(&shop)
if err != nil {
w.WriteHeader(http.StatusBadRequest)
json.NewEncoder(w).Encode(err)
return
}
w.WriteHeader(http.StatusCreated)
json.NewEncoder(w).Encode(createdShop)
}
func (h *ShopHandler) getShopById(w http.ResponseWriter, r *http.Request) {
id := param.Int(r, "shopId")
shop, err := h.shopService.GetShopByID(int32(id))
if err != nil {
w.WriteHeader(http.StatusNotFound)
json.NewEncoder(w).Encode(err)
return
}
w.WriteHeader(http.StatusOK)
json.NewEncoder(w).Encode(shop)
}
func (h *ShopHandler) getAllShop(w http.ResponseWriter, r *http.Request) {
shops, err := h.shopService.GetAllShops()
if err != nil {
w.WriteHeader(http.StatusNotFound)
json.NewEncoder(w).Encode(err)
return
}
w.WriteHeader(http.StatusOK)
json.NewEncoder(w).Encode(shops)
}
func (h *ShopHandler) getShopDetails(w http.ResponseWriter, r *http.Request) {
id := param.Int(r, "shopId")
shop, err := h.shopService.GetShopDetails(int32(id), r.Context())
if err != nil {
w.WriteHeader(http.StatusNotFound)
json.NewEncoder(w).Encode(err)
return
}
w.WriteHeader(http.StatusOK)
json.NewEncoder(w).Encode(shop)
}
func (h *ShopHandler) getShopProducts(w http.ResponseWriter, r *http.Request) {
shopId := param.Int(r, "shopId")
products, err := h.shopService.GetShopProduts(shopId)
if err != nil {
w.WriteHeader(http.StatusNotFound)
json.NewEncoder(w).Encode(err)
return
}
w.WriteHeader(http.StatusOK)
json.NewEncoder(w).Encode(products)
}
|
[] |
[] |
[] |
[]
|
[]
|
go
| null | null | null |
codes/src/holding/EnvironmentVariables.java
|
package holding;
import java.util.Map;
/**
* <pre>
* author : wangzhichao
* e-mail : [email protected]
* time : 2019/08/18
* desc :
* version: 1.0
* </pre>
*/
public class EnvironmentVariables {
public static void main(String[] args) {
for (Map.Entry<String, String> entry : System.getenv().entrySet()) {
System.out.println(entry.getKey() +": " + entry.getValue());
}
}
}
|
[] |
[] |
[] |
[]
|
[]
|
java
| 0 | 0 | |
cmd/manager/main.go
|
package main
import (
"context"
"flag"
"fmt"
"os"
"runtime"
// Import all Kubernetes client auth plugins (e.g. Azure, GCP, OIDC, etc.)
"github.com/operator-framework/operator-sdk/pkg/k8sutil"
kubemetrics "github.com/operator-framework/operator-sdk/pkg/kube-metrics"
"github.com/operator-framework/operator-sdk/pkg/leader"
"github.com/operator-framework/operator-sdk/pkg/log/zap"
"github.com/operator-framework/operator-sdk/pkg/metrics"
sdkVersion "github.com/operator-framework/operator-sdk/version"
"github.com/spf13/pflag"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/util/intstr"
_ "k8s.io/client-go/plugin/pkg/client/auth"
"k8s.io/client-go/rest"
"sigs.k8s.io/controller-runtime/pkg/client/config"
"sigs.k8s.io/controller-runtime/pkg/manager"
"sigs.k8s.io/controller-runtime/pkg/runtime/signals"
"github.com/redhat-developer/service-binding-operator/pkg/apis"
"github.com/redhat-developer/service-binding-operator/pkg/controller"
"github.com/redhat-developer/service-binding-operator/pkg/log"
)
// Change below variables to serve metrics on different host or port.
var (
metricsHost = "0.0.0.0"
metricsPort int32 = 8383
operatorMetricsPort int32 = 8686
mainLog = log.NewLog("main")
)
func printVersion() {
mainLog.Info(fmt.Sprintf("Go Version: %s", runtime.Version()))
mainLog.Info(fmt.Sprintf("Go OS/Arch: %s/%s", runtime.GOOS, runtime.GOARCH))
mainLog.Info(fmt.Sprintf("Version of operator-sdk: %v", sdkVersion.Version))
}
// getOperatorName based on environment variable OPERATOR_NAME, or returns the default name for
// the operatator.
func getOperatorName() string {
envName := os.Getenv("OPERATOR_NAME")
if envName != "" {
return envName
}
return "service-binding-operator"
}
// isLeaderElectionEnabled based on environment variable SERVICE_BINDING_OPERATOR_DISABLE_ELECTION. By default, it is enabled.
func isLeaderElectionEnabled() bool {
return os.Getenv("SERVICE_BINDING_OPERATOR_DISABLE_ELECTION") == ""
}
// isLeaderWithLeaseEnabled based on environment variable SERVICE_BINDING_OPERATOR_LEADER_ELECTION_OPTION. By default, it is leader-for-life.
func isLeaderWithLeaseEnabled() bool {
return os.Getenv("SERVICE_BINDING_OPERATOR_LEADER_ELECTION_OPTION") == "leader-with-lease"
}
func main() {
pflag.CommandLine.AddFlagSet(zap.FlagSet())
pflag.CommandLine.AddGoFlagSet(flag.CommandLine)
pflag.Parse()
log.SetLog(zap.Logger())
printVersion()
namespace, err := k8sutil.GetWatchNamespace()
if err != nil {
mainLog.Error(err, "Failed to get watch namespace")
os.Exit(1)
}
// Get a config to talk to the apiserver
cfg, err := config.GetConfig()
if err != nil {
mainLog.Error(err, "Failed to acquire a configuration to talk to the API server")
os.Exit(1)
}
ctx := context.TODO()
opts := manager.Options{
Namespace: namespace,
MetricsBindAddress: fmt.Sprintf("%s:%d", metricsHost, metricsPort),
}
// FIXME: is there a way to tell k8s-client that is not running in-cluster?
if isLeaderElectionEnabled() {
if !isLeaderWithLeaseEnabled() {
// Become the leader before proceeding
err = leader.Become(ctx, fmt.Sprintf("%s-lock", getOperatorName()))
if err != nil {
mainLog.Error(err, "Failed to become the leader")
os.Exit(1)
}
} else {
leaderNS := os.Getenv("SERVICE_BINDING_OPERATOR_LEADER_ELECTION_NAMESPACE")
opts = manager.Options{
Namespace: namespace,
MetricsBindAddress: fmt.Sprintf("%s:%d", metricsHost, metricsPort),
LeaderElection: true,
LeaderElectionID: getOperatorName(),
LeaderElectionNamespace: leaderNS,
}
}
} else {
mainLog.Warning("Leader election is disabled")
}
// Create a new Cmd to provide shared dependencies and start components
mgr, err := manager.New(cfg, opts)
if err != nil {
mainLog.Error(err, "Error on creating a new manager instance")
os.Exit(1)
}
mainLog.Info("Registering Components.")
// Setup Scheme for all resources
if err := apis.AddToScheme(mgr.GetScheme()); err != nil {
mainLog.Error(err, "Error adding local operator scheme")
os.Exit(1)
}
// Setup all Controllers
if err := controller.AddToManager(mgr); err != nil {
mainLog.Error(err, "Failed to setup the controller manager")
os.Exit(1)
}
if err = serveCRMetrics(cfg); err != nil {
mainLog.Info("Could not generate and serve custom resource metrics", "error", err.Error())
}
// Add to the below struct any other metrics ports you want to expose.
servicePorts := []v1.ServicePort{
{Port: metricsPort, Name: metrics.OperatorPortName, Protocol: v1.ProtocolTCP, TargetPort: intstr.IntOrString{Type: intstr.Int, IntVal: metricsPort}},
{Port: operatorMetricsPort, Name: metrics.CRPortName, Protocol: v1.ProtocolTCP, TargetPort: intstr.IntOrString{Type: intstr.Int, IntVal: operatorMetricsPort}},
}
// Create Service object to expose the metrics port(s).
service, err := metrics.CreateMetricsService(ctx, cfg, servicePorts)
if err != nil {
mainLog.Info("Could not create metrics Service", "error", err.Error())
}
// CreateServiceMonitors will automatically create the prometheus-operator ServiceMonitor resources
// necessary to configure Prometheus to scrape metrics from this operator.
services := []*v1.Service{service}
_, err = metrics.CreateServiceMonitors(cfg, namespace, services)
if err != nil {
mainLog.Info("Could not create ServiceMonitor object", "error", err.Error())
// If this operator is deployed to a cluster without the prometheus-operator running, it will return
// ErrServiceMonitorNotPresent, which can be used to safely skip ServiceMonitor creation.
if err == metrics.ErrServiceMonitorNotPresent {
mainLog.Info("Install prometheus-operator in your cluster to create ServiceMonitor objects", "error", err.Error())
}
}
mainLog.Info("Starting the Cmd.")
// Start the Cmd
if err := mgr.Start(signals.SetupSignalHandler()); err != nil {
mainLog.Error(err, "Manager exited non-zero")
os.Exit(1)
}
}
// serveCRMetrics gets the Operator/CustomResource GVKs and generates metrics based on those types.
// It serves those metrics on "http://metricsHost:operatorMetricsPort".
func serveCRMetrics(cfg *rest.Config) error {
// Below function returns filtered operator/CustomResource specific GVKs.
// For more control override the below GVK list with your own custom logic.
filteredGVK, err := k8sutil.GetGVKsFromAddToScheme(apis.AddToScheme)
if err != nil {
return err
}
// Get the namespace the operator is currently deployed in.
operatorNs, err := k8sutil.GetOperatorNamespace()
if err != nil {
return err
}
// To generate metrics in other namespaces, add the values below.
ns := []string{operatorNs}
// Generate and serve custom resource specific metrics.
err = kubemetrics.GenerateAndServeCRMetrics(cfg, ns, filteredGVK, metricsHost, operatorMetricsPort)
if err != nil {
return err
}
return nil
}
|
[
"\"OPERATOR_NAME\"",
"\"SERVICE_BINDING_OPERATOR_DISABLE_ELECTION\"",
"\"SERVICE_BINDING_OPERATOR_LEADER_ELECTION_OPTION\"",
"\"SERVICE_BINDING_OPERATOR_LEADER_ELECTION_NAMESPACE\""
] |
[] |
[
"SERVICE_BINDING_OPERATOR_LEADER_ELECTION_NAMESPACE",
"OPERATOR_NAME",
"SERVICE_BINDING_OPERATOR_LEADER_ELECTION_OPTION",
"SERVICE_BINDING_OPERATOR_DISABLE_ELECTION"
] |
[]
|
["SERVICE_BINDING_OPERATOR_LEADER_ELECTION_NAMESPACE", "OPERATOR_NAME", "SERVICE_BINDING_OPERATOR_LEADER_ELECTION_OPTION", "SERVICE_BINDING_OPERATOR_DISABLE_ELECTION"]
|
go
| 4 | 0 | |
test/test_dump_sql.py
|
import json
import os
import subprocess
from file import temp_file
from pg import connection, transaction
from process import run_process
def test_dump_sql(pg_database, snapshot):
schema_sql = """
CREATE TABLE parent (
id int PRIMARY KEY
);
CREATE TABLE child (
id int PRIMARY KEY,
parent_id int REFERENCES parent (id)
);
"""
with temp_file("schema-") as schema_file, temp_file("output-") as output_file:
with connection("") as conn, transaction(conn) as cur:
cur.execute(schema_sql)
cur.execute(
"""
INSERT INTO parent (id)
VALUES (1), (2);
INSERT INTO child (id, parent_id)
VALUES (1, 1), (2, 1), (3, 2);
"""
)
with open(schema_file, "w") as f:
schema_json = {
"references": {
"public.child.child_parent_id_fkey": {
"columns": ["parent_id"],
"referenceColumns": ["id"],
"referenceTable": "public.parent",
"table": "public.child",
}
},
"sequences": {},
"tables": {
"public.parent": {
"columns": ["id"],
"name": "parent",
"schema": "public",
"sequences": [],
},
"public.child": {
"columns": ["id", "parent_id"],
"name": "child",
"schema": "public",
"sequences": [],
},
},
}
json.dump(schema_json, f)
run_process(
[
"slicedb",
"dump",
"--include-schema",
"--schema",
schema_file,
"--root",
"public.parent",
"id = 1",
"--output",
output_file,
"--output-type",
"sql",
]
)
with connection("") as conn, transaction(conn) as cur:
cur.execute(
"""
DROP TABLE child;
DROP TABLE parent;
"""
)
with open(output_file) as f:
print(f.read())
run_process(
[
"psql",
"-f",
output_file,
],
env=dict(**os.environ, ON_ERROR_STOP="1"),
)
with connection("") as conn, transaction(conn) as cur:
cur.execute("TABLE parent")
result = cur.fetchall()
assert result == [(1,)]
cur.execute("TABLE child")
result = cur.fetchall()
assert result == [(1, 1), (2, 1)]
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
tests/unit/flow/test_flow_multimode.py
|
import os
from typing import List, Dict
import pytest
import numpy as np
from jina.executors.segmenters import BaseSegmenter
from jina.executors.encoders import BaseEncoder
from jina.executors.indexers.keyvalue import BinaryPbIndexer
from jina.executors.decorators import single
from jina.flow import Flow
from jina.proto import jina_pb2
cur_dir = os.path.dirname(os.path.abspath(__file__))
class MockSegmenter(BaseSegmenter):
@single
def segment(self, text: str, *args, **kwargs) -> List[Dict]:
split = text.split(',')
chunks = [
dict(text=split[0], offset=0, weight=1.0, modality='mode1'),
dict(text=split[1], offset=1, weight=1.0, modality='mode2'),
]
return chunks
class MockEncoder(BaseEncoder):
def encode(self, content: 'np.ndarray', *args, **kwargs) -> 'np.ndarray':
output = []
for r in content:
if "mode1" in r:
output.append([0.0, 0.0, 0.0])
elif "mode2" in r:
output.append([1.0, 1.0, 1.0])
return np.array(output)
@pytest.mark.parametrize('restful', [False, True])
def test_flow_with_modalities(tmpdir, restful):
os.environ['JINA_TEST_FLOW_MULTIMODE_WORKSPACE'] = str(tmpdir)
def input_function():
doc1 = jina_pb2.DocumentProto()
doc1.text = 'title: this is mode1 from doc1, body: this is mode2 from doc1'
doc1.id = '1'
doc2 = jina_pb2.DocumentProto()
doc2.text = 'title: this is mode1 from doc2, body: this is mode2 from doc2'
doc2.id = '2'
doc3 = jina_pb2.DocumentProto()
doc3.text = 'title: this is mode1 from doc3, body: this is mode2 from doc3'
doc3.id = '3'
return [doc1, doc2, doc3]
flow = (
Flow(restful=restful)
.add(name='segmenter', uses='!MockSegmenter')
.add(name='encoder1', uses=os.path.join(cur_dir, 'yaml/mockencoder-mode1.yml'))
.add(
name='indexer1',
uses=os.path.join(cur_dir, 'yaml/numpy-indexer-1.yml'),
needs=['encoder1'],
)
.add(
name='encoder2',
uses=os.path.join(cur_dir, 'yaml/mockencoder-mode2.yml'),
needs=['segmenter'],
)
.add(name='indexer2', uses=os.path.join(cur_dir, 'yaml/numpy-indexer-2.yml'))
.join(['indexer1', 'indexer2'])
)
with flow:
flow.index(inputs=input_function)
with open(os.path.join(tmpdir, 'compound', 'vecidx1-0', 'vec1.gz'), 'rb') as fp:
result = np.frombuffer(fp.read(), dtype='float').reshape([-1, 3])
np.testing.assert_equal(
result, np.array([[0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0]])
)
with open(os.path.join(tmpdir, 'compound', 'vecidx2-0', 'vec2.gz'), 'rb') as fp:
result = np.frombuffer(fp.read(), dtype='float').reshape([-1, 3])
np.testing.assert_equal(
result, np.array([[1.0, 1.0, 1.0], [1.0, 1.0, 1.0], [1.0, 1.0, 1.0]])
)
chunkIndexer1 = BinaryPbIndexer.load(
os.path.join(tmpdir, 'compound', 'kvidx1-0', 'kvidx1.bin')
)
assert chunkIndexer1.size == 3
d_id = list(chunkIndexer1.query_handler.header.keys())[0]
query_doc = jina_pb2.DocumentProto()
query_doc.ParseFromString(chunkIndexer1.query([d_id])[0])
assert query_doc.text == 'title: this is mode1 from doc1'
assert query_doc.modality == 'mode1'
chunkIndexer2 = BinaryPbIndexer.load(
os.path.join(tmpdir, 'compound', 'kvidx2-0', 'kvidx2.bin')
)
assert chunkIndexer2.size == 3
d_id = list(chunkIndexer2.query_handler.header.keys())[0]
query_doc = jina_pb2.DocumentProto()
query_doc.ParseFromString(chunkIndexer2.query([d_id])[0])
assert query_doc.text == ' body: this is mode2 from doc1'
assert query_doc.modality == 'mode2'
del os.environ['JINA_TEST_FLOW_MULTIMODE_WORKSPACE']
|
[] |
[] |
[
"JINA_TEST_FLOW_MULTIMODE_WORKSPACE"
] |
[]
|
["JINA_TEST_FLOW_MULTIMODE_WORKSPACE"]
|
python
| 1 | 0 | |
server/metadata_controller.go
|
package server
import (
"fmt"
"os"
"sync"
"time"
"github.com/rancher/log"
"github.com/rancher/rancher-metadata/config"
uuid "github.com/satori/go.uuid"
)
type MetadataController struct {
metadataServers map[string]*MetadataServer
versions config.Versions
version string
sync.Mutex
versionCond *sync.Cond
subscribe bool
answersFileNamePrefix string
reloadInterval int64
}
func NewMetadataController(subscribe bool, answersFileNamePrefix string, reloadInterval int64) *MetadataController {
return &MetadataController{
versions: (config.Versions)(nil),
version: "0",
subscribe: subscribe,
answersFileNamePrefix: answersFileNamePrefix,
reloadInterval: reloadInterval,
}
}
func (mc *MetadataController) Start() error {
//register default metadata server
mc.RegisterMetaDataServer(os.Getenv("CATTLE_URL"),
os.Getenv("CATTLE_ACCESS_KEY"),
os.Getenv("CATTLE_SECRET_KEY"), true, false)
mc.versionCond = sync.NewCond(mc)
if err := mc.LoadVersionsFromFile(); err != nil {
return err
}
go func() {
for {
time.Sleep(5 * time.Second)
mc.versionCond.Broadcast()
}
}()
if mc.subscribe {
for _, m := range mc.metadataServers {
if err := m.Start(); err != nil {
return err
}
}
}
return nil
}
func (mc *MetadataController) LoadVersionsFromFile() error {
for _, m := range mc.metadataServers {
err := m.loadVersionsFromFile()
if err != nil {
return fmt.Errorf("Failed to load answers from file: %v", err)
}
}
mc.reloadVersions()
return nil
}
func (mc *MetadataController) resetVersion() {
mc.version = uuid.NewV4().String()
}
func (mc *MetadataController) mergeVersions() config.Versions {
var external []config.Versions
var local config.Versions
for _, m := range mc.metadataServers {
if m.local {
local = m.GetVersions()
} else {
external = append(external, m.GetVersions())
}
}
return config.MergeVersions(local, external, mc.version)
}
func (mc *MetadataController) GetVersions() config.Versions {
mc.Lock()
defer mc.Unlock()
return mc.versions
}
func (mc *MetadataController) RegisterMetaDataServer(url string, accessKey string, secretKey string, local bool, subscribe bool) error {
create := false
if mc.metadataServers == nil {
mc.metadataServers = make(map[string]*MetadataServer)
create = true
} else {
existing, ok := mc.metadataServers[accessKey]
if !ok {
create = true
} else if existing.accessKey != accessKey {
create = true
}
}
if !create {
return nil
}
log.Infof("Registering metadata server [%s] with url [%s]", accessKey, url)
m := NewMetaDataServer(url,
accessKey, secretKey, local, mc.answersFileNamePrefix, mc.reloadInterval, mc.reloadVersions)
if subscribe && mc.subscribe {
if err := m.Start(); err != nil {
return fmt.Errorf("Failed to register metadata server [%s] with url [%s]: [%v]", accessKey, url, err)
}
}
mc.metadataServers[accessKey] = m
log.Infof("Registered metadata server for [%s] with url [%s]", accessKey, url)
return nil
}
func (mc *MetadataController) UnregisterMetaDataServer(UUID string) {
if _, ok := mc.metadataServers[UUID]; !ok {
return
}
log.Infof("Deregestring metadata server [%s]", UUID)
if mc.subscribe {
mc.metadataServers[UUID].Stop()
}
delete(mc.metadataServers, UUID)
log.Infof("Deregistered metadata server [%s]", UUID)
}
func (mc *MetadataController) getExternalCredentials() []config.Credential {
for _, s := range mc.metadataServers {
if s.local {
return s.GetExternalCredentials()
}
}
return []config.Credential{}
}
func (mc *MetadataController) reloadVersions() {
mc.Lock()
defer mc.Unlock()
creds := mc.getExternalCredentials()
// sync subscribers here
toAdd := make(map[string]config.Credential)
for _, cred := range creds {
toAdd[cred.PublicValue] = cred
}
toRemove := []string{}
for key, server := range mc.metadataServers {
if server.local {
continue
}
if val, ok := toAdd[key]; !ok {
toRemove = append(toRemove, server.accessKey)
} else if server.URL != val.URL {
toRemove = append(toRemove, server.accessKey)
}
}
// 1. Deregister obsolete subscribers
for _, UUID := range toRemove {
mc.UnregisterMetaDataServer(UUID)
}
// 2. Merge versions
mc.versions = mc.mergeVersions()
mc.resetVersion()
// 3. Register new subscribers
for _, cred := range toAdd {
err := mc.RegisterMetaDataServer(cred.URL, cred.PublicValue, cred.SecretValue, false, true)
if err != nil {
log.Error(err)
}
}
mc.versionCond.Broadcast()
}
func (mc *MetadataController) LookupAnswer(wait bool, oldValue, version string, ip string, path []string, maxWait time.Duration) (interface{}, bool) {
if !wait {
v := mc.GetVersions()
return v.Matching(version, ip, path)
}
if maxWait == time.Duration(0) {
maxWait = time.Minute
}
if maxWait > 2*time.Minute {
maxWait = 2 * time.Minute
}
start := time.Now()
for {
v := mc.GetVersions()
val, ok := v.Matching(version, ip, path)
if time.Now().Sub(start) > maxWait {
return val, ok
}
if ok && fmt.Sprint(val) != oldValue {
return val, ok
}
mc.versionCond.L.Lock()
mc.versionCond.Wait()
mc.versionCond.L.Unlock()
}
}
|
[
"\"CATTLE_URL\"",
"\"CATTLE_ACCESS_KEY\"",
"\"CATTLE_SECRET_KEY\""
] |
[] |
[
"CATTLE_SECRET_KEY",
"CATTLE_URL",
"CATTLE_ACCESS_KEY"
] |
[]
|
["CATTLE_SECRET_KEY", "CATTLE_URL", "CATTLE_ACCESS_KEY"]
|
go
| 3 | 0 | |
pkg/repository/redis/redis_test.go
|
package redis_test
import (
"log"
"os"
"testing"
"time"
"github.com/joshturge-io/auth/pkg/repository"
"github.com/joshturge-io/auth/pkg/repository/redis"
"github.com/joshturge-io/auth/pkg/token"
)
var (
repo repository.Repository
testUser map[string]string
testBlacklist []string
)
func init() {
var err error
repo, err = redis.NewRepository(log.New(os.Stdout, "", 0), os.Getenv("REDIS_ADDR"),
os.Getenv("REDIS_PSWD"), 3*time.Minute)
if err != nil {
panic(err)
}
testUser = map[string]string{
"salt": "H4jk53hGsk3fj4Dfsj3",
"hash": "dd373f6f7e9338d82a5ccab1be65475c06e97fed63cd59b892024a0a120aa6f0",
"refresh": "Uq_XJB5p5clZ_lAjFVND0oTYT9uFe8plBfGHFGMZ4RI=",
}
testBlacklist = []string{}
}
func TestSetRefreshToken(t *testing.T) {
if err := repo.SetRefreshToken("test_user", testUser["refresh"], 3*time.Minute); err != nil {
t.Error(err)
}
}
func TestGetRefreshToken(t *testing.T) {
token, err := repo.GetRefreshToken("test_user")
if err != nil {
t.Error(err)
}
if token != testUser["refresh"] {
t.Errorf("token does not match the one set wanted: %s got: %s\n", testUser["refresh"], token)
}
}
func TestSetSalt(t *testing.T) {
if err := repo.SetSalt("test_user", testUser["salt"]); err != nil {
t.Error(err)
}
}
func TestGetSalt(t *testing.T) {
salt, err := repo.GetSalt("test_user")
if err != nil {
t.Error(err)
}
if salt != testUser["salt"] {
t.Errorf("salt does not match the one set wanted: %s got: %s", testUser["salt"], salt)
}
}
func TestSetHash(t *testing.T) {
if err := repo.SetSalt("test_user", testUser["hash"]); err != nil {
t.Error(err)
}
}
func TestGetHash(t *testing.T) {
hash, err := repo.GetSalt("test_user")
if err != nil {
t.Error(err)
}
if hash != testUser["hash"] {
t.Errorf("hash does not match the one set wanted: %s got: %s", testUser["hash"], hash)
}
}
func TestSetBlacklist(t *testing.T) {
jw := token.NewJW("secret", "test_user", 3*time.Minute)
if err := jw.Generate(); err != nil {
t.Error(err)
}
testBlacklist = append(testBlacklist, jw.Token())
if err := repo.SetBlacklist(jw.Token(), 3*time.Minute); err != nil {
t.Error(err)
}
}
func TestIsBlacklisted(t *testing.T) {
blacklisted, err := repo.IsBlacklisted(testBlacklist[0])
if err != nil {
t.Error(err)
}
if !blacklisted {
t.Error("token was not blacklisted")
}
}
|
[
"\"REDIS_ADDR\"",
"\"REDIS_PSWD\""
] |
[] |
[
"REDIS_PSWD",
"REDIS_ADDR"
] |
[]
|
["REDIS_PSWD", "REDIS_ADDR"]
|
go
| 2 | 0 | |
storage_drivers/ontap/api/rest/models/app_nfs_access.go
|
// Code generated by go-swagger; DO NOT EDIT.
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"encoding/json"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
"github.com/go-openapi/validate"
)
// AppNfsAccess The list of NFS access controls. You must provide either 'host' or 'access' to enable NFS access.
//
// swagger:model app_nfs_access
type AppNfsAccess struct {
// The NFS access granted.
// Enum: [none ro rw]
Access *string `json:"access,omitempty"`
// The name of the NFS entity granted access.
Host *string `json:"host,omitempty"`
}
// Validate validates this app nfs access
func (m *AppNfsAccess) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateAccess(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
var appNfsAccessTypeAccessPropEnum []interface{}
func init() {
var res []string
if err := json.Unmarshal([]byte(`["none","ro","rw"]`), &res); err != nil {
panic(err)
}
for _, v := range res {
appNfsAccessTypeAccessPropEnum = append(appNfsAccessTypeAccessPropEnum, v)
}
}
const (
// BEGIN DEBUGGING
// app_nfs_access
// AppNfsAccess
// access
// Access
// none
// END DEBUGGING
// AppNfsAccessAccessNone captures enum value "none"
AppNfsAccessAccessNone string = "none"
// BEGIN DEBUGGING
// app_nfs_access
// AppNfsAccess
// access
// Access
// ro
// END DEBUGGING
// AppNfsAccessAccessRo captures enum value "ro"
AppNfsAccessAccessRo string = "ro"
// BEGIN DEBUGGING
// app_nfs_access
// AppNfsAccess
// access
// Access
// rw
// END DEBUGGING
// AppNfsAccessAccessRw captures enum value "rw"
AppNfsAccessAccessRw string = "rw"
)
// prop value enum
func (m *AppNfsAccess) validateAccessEnum(path, location string, value string) error {
if err := validate.EnumCase(path, location, value, appNfsAccessTypeAccessPropEnum, true); err != nil {
return err
}
return nil
}
func (m *AppNfsAccess) validateAccess(formats strfmt.Registry) error {
if swag.IsZero(m.Access) { // not required
return nil
}
// value enum
if err := m.validateAccessEnum("access", "body", *m.Access); err != nil {
return err
}
return nil
}
// ContextValidate validates this app nfs access based on context it is used
func (m *AppNfsAccess) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
return nil
}
// MarshalBinary interface implementation
func (m *AppNfsAccess) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *AppNfsAccess) UnmarshalBinary(b []byte) error {
var res AppNfsAccess
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
|
[] |
[] |
[] |
[]
|
[]
|
go
| null | null | null |
test/extended/prometheus/prometheus.go
|
package prometheus
import (
"bytes"
"context"
"encoding/json"
"fmt"
"os"
"regexp"
"strconv"
"strings"
"time"
g "github.com/onsi/ginkgo"
o "github.com/onsi/gomega"
promv1 "github.com/prometheus/client_golang/api/prometheus/v1"
dto "github.com/prometheus/client_model/go"
"github.com/prometheus/common/expfmt"
"github.com/prometheus/common/model"
v1 "k8s.io/api/core/v1"
kapierrs "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/wait"
clientset "k8s.io/client-go/kubernetes"
kapi "k8s.io/kubernetes/pkg/apis/core"
"k8s.io/kubernetes/test/e2e/framework"
e2e "k8s.io/kubernetes/test/e2e/framework"
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
admissionapi "k8s.io/pod-security-admission/api"
configv1 "github.com/openshift/api/config/v1"
"github.com/openshift/origin/pkg/synthetictests/allowedalerts"
testresult "github.com/openshift/origin/pkg/test/ginkgo/result"
"github.com/openshift/origin/test/extended/networking"
exutil "github.com/openshift/origin/test/extended/util"
helper "github.com/openshift/origin/test/extended/util/prometheus"
)
var _ = g.Describe("[sig-instrumentation][Late] OpenShift alerting rules", func() {
defer g.GinkgoRecover()
// These alerts are known to be missing the summary and/or description
// annotations. Bugzillas have been filed, and are linked here. These
// should be fixed one-by-one and removed from this list.
descriptionExceptions := sets.NewString(
// Repo: openshift/cluster-kube-apiserver-operator
// https://bugzilla.redhat.com/show_bug.cgi?id=2010349
"APIRemovedInNextEUSReleaseInUse",
"APIRemovedInNextReleaseInUse",
"ExtremelyHighIndividualControlPlaneCPU",
"HighOverallControlPlaneCPU",
"TechPreviewNoUpgrade",
// Repo: operator-framework/operator-marketplace
// https://bugzilla.redhat.com/show_bug.cgi?id=2010375
"CertifiedOperatorsCatalogError",
"CommunityOperatorsCatalogError",
"RedhatMarketplaceCatalogError",
"RedhatOperatorsCatalogError",
// Repo: openshift/cloud-credential-operator
// https://bugzilla.redhat.com/show_bug.cgi?id=2010341
"CloudCredentialOperatorDeprovisioningFailed",
"CloudCredentialOperatorInsufficientCloudCreds",
"CloudCredentialOperatorProvisioningFailed",
"CloudCredentialOperatorTargetNamespaceMissing",
"CloudCredentialOperatorStaleCredentials",
// Repo: operator-framework/operator-lifecycle-manager
// https://bugzilla.redhat.com/show_bug.cgi?id=2010373
"CsvAbnormalFailedOver2Min",
"CsvAbnormalOver30Min",
"InstallPlanStepAppliedWithWarnings",
// Repo: openshift/cluster-ingress-operator
// https://bugzilla.redhat.com/show_bug.cgi?id=2010376
"HAProxyDown",
"HAProxyReloadFail",
"IngressControllerDegraded",
"IngressControllerUnavailable",
// Repo: openshift/cluster-image-registry-operator
// https://bugzilla.redhat.com/show_bug.cgi?id=2010347
// https://bugzilla.redhat.com/show_bug.cgi?id=1992553
"ImageRegistryStorageReconfigured",
// Repo: openshift/cluster-kube-scheduler-operator
// https://bugzilla.redhat.com/show_bug.cgi?id=2010354
"KubeSchedulerDown",
"SchedulerLegacyPolicySet",
// Repo: openshift/machine-config-operator
// https://bugzilla.redhat.com/show_bug.cgi?id=2010371
"KubeletHealthState",
"MCDDrainError",
"MCDPivotError",
"MCDRebootError",
"MasterNodesHighMemoryUsage",
"SystemMemoryExceedsReservation",
// Repo: openshift/machine-api-operator
// https://bugzilla.redhat.com/show_bug.cgi?id=2010368
"MachineAPIOperatorMetricsCollectionFailing",
"MachineHealthCheckUnterminatedShortCircuit",
"MachineNotYetDeleted",
"MachineWithNoRunningPhase",
"MachineWithoutValidNode",
// Repo: openshift/cluster-machine-approver
//https://bugzilla.redhat.com/show_bug.cgi?id=2010359
"MachineApproverMaxPendingCSRsReached",
// Repo: openshift/cluster-kube-controller-manager-operator
// https://bugzilla.redhat.com/show_bug.cgi?id=2010352
"KubeControllerManagerDown",
"PodDisruptionBudgetAtLimit",
"PodDisruptionBudgetLimit",
// Repo: openshift/cluster-samples-operator
// https://bugzilla.redhat.com/show_bug.cgi?id=2010364
"SamplesDegraded",
"SamplesImagestreamImportFailing",
"SamplesInvalidConfig",
"SamplesMissingSecret",
"SamplesMissingTBRCredential",
"SamplesRetriesMissingOnImagestreamImportFailing",
"SamplesTBRInaccessibleOnBoot",
// Repo: openshift/cluster-etcd-operator
// https://bugzilla.redhat.com/show_bug.cgi?id=2010346
"etcdBackendQuotaLowSpace",
"etcdExcessiveDatabaseGrowth",
"etcdHighFsyncDurations",
// Repo: openshift/cluster-storage-operator (vSphere)
// https://bugzilla.redhat.com/show_bug.cgi?id=2010310
// https://github.com/openshift/cluster-storage-operator/pull/220
"VSphereOpenshiftClusterHealthFail",
"VSphereOpenshiftNodeHealthFail",
)
var alertingRules map[string][]promv1.AlertingRule
oc := exutil.NewCLIWithoutNamespace("prometheus")
g.BeforeEach(func() {
err := exutil.WaitForAnImageStream(
oc.AdminImageClient().ImageV1().ImageStreams("openshift"), "tools",
exutil.CheckImageStreamLatestTagPopulated, exutil.CheckImageStreamTagNotFound)
o.Expect(err).NotTo(o.HaveOccurred())
url, _, bearerToken, ok := helper.LocatePrometheus(oc)
if !ok {
e2e.Failf("Prometheus could not be located on this cluster, failing prometheus test")
}
if alertingRules == nil {
var err error
alertingRules, err = helper.FetchAlertingRules(oc, url, bearerToken)
if err != nil {
e2e.Failf("Failed to fetch alerting rules: %v", err)
}
}
})
g.It("should have a valid severity label", func() {
err := helper.ForEachAlertingRule(alertingRules, func(alert promv1.AlertingRule) sets.String {
severityRe := regexp.MustCompile("^critical|warning|info$")
severity, found := alert.Labels["severity"]
if !found {
return sets.NewString("has no 'severity' label")
}
if !severityRe.MatchString(string(severity)) {
return sets.NewString(
fmt.Sprintf("has a 'severity' label value of %q which doesn't match %q",
severity, severityRe.String(),
),
)
}
return nil
})
if err != nil {
e2e.Failf(err.Error())
}
})
g.It("should have description and summary annotations", func() {
err := helper.ForEachAlertingRule(alertingRules, func(alert promv1.AlertingRule) sets.String {
if descriptionExceptions.Has(alert.Name) {
framework.Logf("Alerting rule %q is known to have missing annotations.", alert.Name)
return nil
}
violations := sets.NewString()
if _, found := alert.Annotations["description"]; !found {
// If there's no 'description' annotation, but there is a
// 'message' annotation, suggest renaming it.
if _, found := alert.Annotations["message"]; found {
violations.Insert("has no 'description' annotation, but has a 'message' annotation." +
" OpenShift alerts must use 'description' -- consider renaming the annotation")
} else {
violations.Insert("has no 'description' annotation")
}
}
if _, found := alert.Annotations["summary"]; !found {
violations.Insert("has no 'summary' annotation")
}
return violations
})
if err != nil {
// We are still gathering data on how many alerts need to
// be fixed, so this is marked as a flake for now.
testresult.Flakef(err.Error())
}
})
g.It("should have a runbook_url annotation if the alert is critical", func() {
err := helper.ForEachAlertingRule(alertingRules, func(alert promv1.AlertingRule) sets.String {
violations := sets.NewString()
severity := string(alert.Labels["severity"])
runbook := string(alert.Annotations["runbook_url"])
if severity == "critical" && runbook == "" {
violations.Insert(
fmt.Sprintf("WARNING: Alert %q is critical and has no 'runbook_url' annotation", alert.Name),
)
} else if runbook != "" {
// If there's a 'runbook_url' annotation, make sure it's a
// valid URL and that we can fetch the contents.
if err := helper.ValidateURL(runbook, 10*time.Second); err != nil {
violations.Insert(
fmt.Sprintf("WARNING: Alert %q has an invalid 'runbook_url' annotation: %v",
alert.Name, err),
)
}
}
return violations
})
if err != nil {
// We are still gathering data on how many alerts need to
// be fixed, so this is marked as a flake for now.
testresult.Flakef(err.Error())
}
})
})
var _ = g.Describe("[sig-instrumentation][Late] Alerts", func() {
defer g.GinkgoRecover()
var (
oc = exutil.NewCLIWithoutNamespace("prometheus")
)
g.It("shouldn't report any unexpected alerts in firing or pending state", func() {
// Watchdog and AlertmanagerReceiversNotConfigured are expected.
if len(os.Getenv("TEST_UNSUPPORTED_ALLOW_VERSION_SKEW")) > 0 {
e2eskipper.Skipf("Test is disabled to allow cluster components to have different versions, and skewed versions trigger multiple other alerts")
}
firingAlertsWithBugs := helper.MetricConditions{
{
Selector: map[string]string{"alertname": "ClusterOperatorDown", "name": "authentication"},
Text: "https://bugzilla.redhat.com/show_bug.cgi?id=1939580",
},
{
Selector: map[string]string{"alertname": "ClusterOperatorDegraded", "name": "authentication"},
Text: "https://bugzilla.redhat.com/show_bug.cgi?id=1939580",
},
{
Selector: map[string]string{"alertname": "AggregatedAPIDown", "name": "v1alpha1.wardle.example.com"},
Text: "https://bugzilla.redhat.com/show_bug.cgi?id=1933144",
},
{
Selector: map[string]string{"alertname": "KubeAggregatedAPIDown", "name": "v1alpha1.wardle.example.com"},
Text: "https://bugzilla.redhat.com/show_bug.cgi?id=1933144",
},
{
Selector: map[string]string{"alertname": "KubeAPIErrorBudgetBurn"},
Text: "https://bugzilla.redhat.com/show_bug.cgi?id=1953798",
Matches: func(_ *model.Sample) bool {
return framework.ProviderIs("gce")
},
},
{
Selector: map[string]string{"alertname": "HighlyAvailableWorkloadIncorrectlySpread", "namespace": "openshift-monitoring", "workload": "prometheus-k8s"},
Text: "https://bugzilla.redhat.com/show_bug.cgi?id=1949262",
},
{
Selector: map[string]string{"alertname": "HighlyAvailableWorkloadIncorrectlySpread", "namespace": "openshift-monitoring", "workload": "alertmanager-main"},
Text: "https://bugzilla.redhat.com/show_bug.cgi?id=1955489",
},
{
Selector: map[string]string{"alertname": "KubeJobFailed", "namespace": "openshift-multus"}, // not sure how to do a job_name prefix
Text: "https://bugzilla.redhat.com/show_bug.cgi?id=2054426",
},
}
allowedFiringAlerts := helper.MetricConditions{
{
Selector: map[string]string{"alertname": "TargetDown", "namespace": "openshift-e2e-loki"},
Text: "Loki is nice to have, but we can allow it to be down",
},
{
Selector: map[string]string{"alertname": "KubePodNotReady", "namespace": "openshift-e2e-loki"},
Text: "Loki is nice to have, but we can allow it to be down",
},
{
Selector: map[string]string{"alertname": "KubeDeploymentReplicasMismatch", "namespace": "openshift-e2e-loki"},
Text: "Loki is nice to have, but we can allow it to be down",
},
{
Selector: map[string]string{"alertname": "HighOverallControlPlaneCPU"},
Text: "high CPU utilization during e2e runs is normal",
},
{
Selector: map[string]string{"alertname": "ExtremelyHighIndividualControlPlaneCPU"},
Text: "high CPU utilization during e2e runs is normal",
},
}
if isTechPreviewCluster(oc) {
allowedFiringAlerts = append(
allowedFiringAlerts,
helper.MetricCondition{
Selector: map[string]string{"alertname": "TechPreviewNoUpgrade"},
Text: "Allow testing of TechPreviewNoUpgrade clusters, this will only fire when a FeatureGate has been installed",
},
helper.MetricCondition{
Selector: map[string]string{"alertname": "ClusterNotUpgradeable"},
Text: "Allow testing of ClusterNotUpgradeable clusters, this will only fire when a FeatureGate has been installed",
})
}
pendingAlertsWithBugs := helper.MetricConditions{}
allowedPendingAlerts := helper.MetricConditions{
{
Selector: map[string]string{"alertname": "HighOverallControlPlaneCPU"},
Text: "high CPU utilization during e2e runs is normal",
},
{
Selector: map[string]string{"alertname": "ExtremelyHighIndividualControlPlaneCPU"},
Text: "high CPU utilization during e2e runs is normal",
},
}
// we exclude alerts that have their own separate tests.
for _, alertTest := range allowedalerts.AllAlertTests(context.TODO(), nil, 0) {
switch alertTest.AlertState() {
case allowedalerts.AlertPending:
// a pending test covers pending and everything above (firing)
allowedPendingAlerts = append(allowedPendingAlerts,
helper.MetricCondition{
Selector: map[string]string{"alertname": alertTest.AlertName()},
Text: "has a separate e2e test",
},
)
allowedFiringAlerts = append(allowedFiringAlerts,
helper.MetricCondition{
Selector: map[string]string{"alertname": alertTest.AlertName()},
Text: "has a separate e2e test",
},
)
case allowedalerts.AlertInfo:
// an info test covers all firing
allowedFiringAlerts = append(allowedFiringAlerts,
helper.MetricCondition{
Selector: map[string]string{"alertname": alertTest.AlertName()},
Text: "has a separate e2e test",
},
)
}
}
knownViolations := sets.NewString()
unexpectedViolations := sets.NewString()
unexpectedViolationsAsFlakes := sets.NewString()
debug := sets.NewString()
// we only consider samples since the beginning of the test
testDuration := exutil.DurationSinceStartInSeconds().String()
// Invariant: No non-info level alerts should have fired during the test run
firingAlertQuery := fmt.Sprintf(`
sort_desc(
count_over_time(ALERTS{alertstate="firing",severity!="info",alertname!~"Watchdog|AlertmanagerReceiversNotConfigured"}[%[1]s:1s])
) > 0
`, testDuration)
result, err := helper.RunQuery(context.TODO(), oc.NewPrometheusClient(context.TODO()), firingAlertQuery)
o.Expect(err).NotTo(o.HaveOccurred(), "unable to check firing alerts during test")
for _, series := range result.Data.Result {
labels := helper.StripLabels(series.Metric, "alertname", "alertstate", "prometheus")
violation := fmt.Sprintf("alert %s fired for %s seconds with labels: %s", series.Metric["alertname"], series.Value, helper.LabelsAsSelector(labels))
if cause := allowedFiringAlerts.Matches(series); cause != nil {
debug.Insert(fmt.Sprintf("%s (allowed: %s)", violation, cause.Text))
continue
}
if cause := firingAlertsWithBugs.Matches(series); cause != nil {
knownViolations.Insert(fmt.Sprintf("%s (open bug: %s)", violation, cause.Text))
} else {
unexpectedViolations.Insert(violation)
}
}
// Invariant: There should be no pending alerts after the test run
pendingAlertQuery := fmt.Sprintf(`
sort_desc(
time() * ALERTS + 1
-
last_over_time((
time() * ALERTS{alertname!~"Watchdog|AlertmanagerReceiversNotConfigured",alertstate="pending",severity!="info"}
unless
ALERTS offset 1s
)[%[1]s:1s])
)
`, testDuration)
result, err = helper.RunQuery(context.TODO(), oc.NewPrometheusClient(context.TODO()), pendingAlertQuery)
o.Expect(err).NotTo(o.HaveOccurred(), "unable to retrieve pending alerts after upgrade")
for _, series := range result.Data.Result {
labels := helper.StripLabels(series.Metric, "alertname", "alertstate", "prometheus")
violation := fmt.Sprintf("alert %s pending for %s seconds with labels: %s", series.Metric["alertname"], series.Value, helper.LabelsAsSelector(labels))
if cause := allowedPendingAlerts.Matches(series); cause != nil {
debug.Insert(fmt.Sprintf("%s (allowed: %s)", violation, cause.Text))
continue
}
if cause := pendingAlertsWithBugs.Matches(series); cause != nil {
knownViolations.Insert(fmt.Sprintf("%s (open bug: %s)", violation, cause.Text))
} else {
// treat pending errors as a flake right now because we are still trying to determine the scope
// TODO: move this to unexpectedViolations later
unexpectedViolationsAsFlakes.Insert(violation)
}
}
if len(debug) > 0 {
framework.Logf("Alerts were detected during test run which are allowed:\n\n%s", strings.Join(debug.List(), "\n"))
}
if len(unexpectedViolations) > 0 {
framework.Failf("Unexpected alerts fired or pending after the test run:\n\n%s", strings.Join(unexpectedViolations.List(), "\n"))
}
if flakes := sets.NewString().Union(knownViolations).Union(unexpectedViolations).Union(unexpectedViolationsAsFlakes); len(flakes) > 0 {
testresult.Flakef("Unexpected alert behavior during test:\n\n%s", strings.Join(flakes.List(), "\n"))
}
framework.Logf("No alerts fired during test run")
})
g.It("shouldn't exceed the 650 series limit of total series sent via telemetry from each cluster", func() {
if !hasPullSecret(oc.AdminKubeClient(), "cloud.openshift.com") {
e2eskipper.Skipf("Telemetry is disabled")
}
// we only consider series sent since the beginning of the test
testDuration := exutil.DurationSinceStartInSeconds().String()
tests := map[string]bool{
// We want to limit the number of total series sent, the cluster:telemetry_selected_series:count
// rule contains the count of the all the series that are sent via telemetry. It is permissible
// for some scenarios to generate more series than 650, we just want the basic state to be below
// a threshold.
//
// The following query can be executed against the telemetry server
// to reevaluate the threshold value (replace the matcher on the version label accordingly):
//
// quantile(0.99,
// avg_over_time(
// (
// cluster:telemetry_selected_series:count
// *
// on (_id) group_left group by(_id) (cluster_version{version=~"4.11.0-0.ci.+"})
// )[30m:1m]
// )
// )
fmt.Sprintf(`avg_over_time(cluster:telemetry_selected_series:count[%s]) >= 650`, testDuration): false,
fmt.Sprintf(`max_over_time(cluster:telemetry_selected_series:count[%s]) >= 1200`, testDuration): false,
}
err := helper.RunQueries(context.TODO(), oc.NewPrometheusClient(context.TODO()), tests, oc)
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("Total number of series sent via telemetry is below the limit")
})
})
var _ = g.Describe("[sig-instrumentation] Prometheus", func() {
defer g.GinkgoRecover()
var (
oc = exutil.NewCLIWithPodSecurityLevel("prometheus", admissionapi.LevelBaseline)
url, prometheusURL, bearerToken string
)
g.BeforeEach(func() {
err := exutil.WaitForAnImageStream(
oc.AdminImageClient().ImageV1().ImageStreams("openshift"), "tools",
exutil.CheckImageStreamLatestTagPopulated, exutil.CheckImageStreamTagNotFound)
o.Expect(err).NotTo(o.HaveOccurred())
var ok bool
url, prometheusURL, bearerToken, ok = helper.LocatePrometheus(oc)
if !ok {
e2e.Failf("Prometheus could not be located on this cluster, failing prometheus test")
}
})
g.Describe("when installed on the cluster", func() {
g.It("should report telemetry if a cloud.openshift.com token is present [Late]", func() {
if !hasPullSecret(oc.AdminKubeClient(), "cloud.openshift.com") {
e2eskipper.Skipf("Telemetry is disabled")
}
tests := map[string]bool{}
if hasTelemeterClient(oc.AdminKubeClient()) {
e2e.Logf("Found telemeter-client pod")
tests = map[string]bool{
// should have successfully sent at least once to remote
`metricsclient_request_send{client="federate_to",job="telemeter-client",status_code="200"} >= 1`: true,
// should have scraped some metrics from prometheus
`federate_samples{job="telemeter-client"} >= 10`: true,
}
} else {
e2e.Logf("Found no telemeter-client pod, assuming prometheus remote_write")
tests = map[string]bool{
// Should have successfully sent at least some metrics to
// remote write endpoint
`prometheus_remote_storage_succeeded_samples_total{job="prometheus-k8s"} >= 1`: true,
}
}
err := helper.RunQueries(context.TODO(), oc.NewPrometheusClient(context.TODO()), tests, oc)
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("Telemetry is enabled: %s", bearerToken)
})
g.It("should start and expose a secured proxy and unsecured metrics", func() {
ns := oc.Namespace()
execPod := exutil.CreateExecPodOrFail(oc.AdminKubeClient(), ns, "execpod")
defer func() {
oc.AdminKubeClient().CoreV1().Pods(ns).Delete(context.Background(), execPod.Name, *metav1.NewDeleteOptions(1))
}()
g.By("checking the prometheus metrics path")
var metrics map[string]*dto.MetricFamily
o.Expect(wait.PollImmediate(10*time.Second, 2*time.Minute, func() (bool, error) {
results, err := getBearerTokenURLViaPod(ns, execPod.Name, fmt.Sprintf("%s/metrics", prometheusURL), bearerToken)
if err != nil {
e2e.Logf("unable to get metrics: %v", err)
return false, nil
}
p := expfmt.TextParser{}
metrics, err = p.TextToMetricFamilies(bytes.NewBufferString(results))
o.Expect(err).NotTo(o.HaveOccurred())
// original field in 2.0.0-beta
counts := findCountersWithLabels(metrics["tsdb_samples_appended_total"], labels{})
if len(counts) != 0 && counts[0] > 0 {
return true, nil
}
// 2.0.0-rc.0
counts = findCountersWithLabels(metrics["tsdb_head_samples_appended_total"], labels{})
if len(counts) != 0 && counts[0] > 0 {
return true, nil
}
// 2.0.0-rc.2
counts = findCountersWithLabels(metrics["prometheus_tsdb_head_samples_appended_total"], labels{})
if len(counts) != 0 && counts[0] > 0 {
return true, nil
}
return false, nil
})).NotTo(o.HaveOccurred(), fmt.Sprintf("Did not find tsdb_samples_appended_total, tsdb_head_samples_appended_total, or prometheus_tsdb_head_samples_appended_total"))
g.By("verifying the Thanos querier service requires authentication")
err := helper.ExpectURLStatusCodeExec(ns, execPod.Name, url, 401, 403)
o.Expect(err).NotTo(o.HaveOccurred())
g.By("verifying a service account token is able to authenticate")
err = expectBearerTokenURLStatusCodeExec(ns, execPod.Name, fmt.Sprintf("%s/api/v1/targets", url), bearerToken, 200)
o.Expect(err).NotTo(o.HaveOccurred())
g.By("verifying a service account token is able to access the Prometheus API")
// expect all endpoints within 60 seconds
var lastErrs []error
o.Expect(wait.PollImmediate(10*time.Second, 2*time.Minute, func() (bool, error) {
contents, err := getBearerTokenURLViaPod(ns, execPod.Name, fmt.Sprintf("%s/api/v1/targets", prometheusURL), bearerToken)
o.Expect(err).NotTo(o.HaveOccurred())
targets := &prometheusTargets{}
err = json.Unmarshal([]byte(contents), targets)
o.Expect(err).NotTo(o.HaveOccurred())
g.By("verifying all expected jobs have a working target")
controlPlaneTopology, err := exutil.GetControlPlaneTopology(oc)
o.Expect(err).NotTo(o.HaveOccurred())
// For External clusters, skip control plane components and the CVO
if *controlPlaneTopology != configv1.ExternalTopologyMode {
lastErrs = all(
// The OpenShift control plane
targets.Expect(labels{"job": "api"}, "up", "^https://.*/metrics$"),
targets.Expect(labels{"job": "controller-manager"}, "up", "^https://.*/metrics$"),
// The kube control plane
// TODO restore this after etcd operator lands
//targets.Expect(labels{"job": "etcd"}, "up", "^https://.*/metrics$"),
targets.Expect(labels{"job": "apiserver"}, "up", "^https://.*/metrics$"),
targets.Expect(labels{"job": "kube-controller-manager"}, "up", "^https://.*/metrics$"),
targets.Expect(labels{"job": "scheduler"}, "up", "^https://.*/metrics$"),
targets.Expect(labels{"job": "kube-state-metrics"}, "up", "^https://.*/metrics$"),
// Cluster version operator
targets.Expect(labels{"job": "cluster-version-operator"}, "up", "^https://.*/metrics$"),
)
}
lastErrs = append(lastErrs, all(
targets.Expect(labels{"job": "prometheus-k8s", "namespace": "openshift-monitoring", "pod": "prometheus-k8s-0"}, "up", "^https://.*/metrics$"),
targets.Expect(labels{"job": "kubelet"}, "up", "^https://.*/metrics$"),
targets.Expect(labels{"job": "kubelet"}, "up", "^https://.*/metrics/cadvisor$"),
targets.Expect(labels{"job": "node-exporter"}, "up", "^https://.*/metrics$"),
targets.Expect(labels{"job": "prometheus-operator"}, "up", "^https://.*/metrics$"),
targets.Expect(labels{"job": "alertmanager-main"}, "up", "^https://.*/metrics$"),
targets.Expect(labels{"job": "crio"}, "up", "^http://.*/metrics$"),
)...)
if len(lastErrs) > 0 {
e2e.Logf("missing some targets: %v", lastErrs)
return false, nil
}
return true, nil
})).NotTo(o.HaveOccurred(), "possibly some services didn't register ServiceMonitors to allow metrics collection")
g.By("verifying all targets are exposing metrics over secure channel")
var insecureTargets []error
contents, err := getBearerTokenURLViaPod(ns, execPod.Name, fmt.Sprintf("%s/api/v1/targets", prometheusURL), bearerToken)
o.Expect(err).NotTo(o.HaveOccurred())
targets := &prometheusTargets{}
err = json.Unmarshal([]byte(contents), targets)
o.Expect(err).NotTo(o.HaveOccurred())
// Currently following targets do not secure their /metrics endpoints:
// job="crio" - https://issues.redhat.com/browse/MON-1034 + https://issues.redhat.com/browse/OCPNODE-321
// job="ovnkube-master" - https://issues.redhat.com/browse/SDN-912
// job="ovnkube-node" - https://issues.redhat.com/browse/SDN-912
// Exclude list should be reduced to 0
exclude := map[string]bool{
"crio": true,
"ovnkube-master": true,
"ovnkube-node": true,
}
pattern := regexp.MustCompile("^https://.*")
for _, t := range targets.Data.ActiveTargets {
if exclude[t.Labels["job"]] {
continue
}
if !pattern.MatchString(t.ScrapeUrl) {
msg := fmt.Errorf("following target does not secure metrics endpoint: %v", t.Labels["job"])
insecureTargets = append(insecureTargets, msg)
}
}
o.Expect(insecureTargets).To(o.BeEmpty(), "some services expose metrics over insecure channel")
})
g.It("should have a AlertmanagerReceiversNotConfigured alert in firing state", func() {
tests := map[string]bool{
`ALERTS{alertstate=~"firing|pending",alertname="AlertmanagerReceiversNotConfigured"} == 1`: true,
}
err := helper.RunQueries(context.TODO(), oc.NewPrometheusClient(context.TODO()), tests, oc)
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("AlertmanagerReceiversNotConfigured alert is firing")
})
g.It("should have important platform topology metrics", func() {
exutil.SkipIfExternalControlplaneTopology(oc, "topology metrics are not available for clusters with external controlPlaneTopology")
tests := map[string]bool{
// track infrastructure type
`cluster_infrastructure_provider{type!=""}`: true,
`cluster_feature_set`: true,
// track installer type
`cluster_installer{type!="",invoker!=""}`: true,
// track sum of etcd
`instance:etcd_object_counts:sum > 0`: true,
// track cores and sockets across node types
`sum(node_role_os_version_machine:cpu_capacity_cores:sum{label_kubernetes_io_arch!="",label_node_role_kubernetes_io_master!=""}) > 0`: true,
`sum(node_role_os_version_machine:cpu_capacity_sockets:sum{label_kubernetes_io_arch!="",label_node_hyperthread_enabled!="",label_node_role_kubernetes_io_master!=""}) > 0`: true,
}
err := helper.RunQueries(context.TODO(), oc.NewPrometheusClient(context.TODO()), tests, oc)
o.Expect(err).NotTo(o.HaveOccurred())
})
g.It("should have non-Pod host cAdvisor metrics", func() {
tests := map[string]bool{
`container_cpu_usage_seconds_total{id!~"/kubepods.slice/.*"} >= 1`: true,
}
err := helper.RunQueries(context.TODO(), oc.NewPrometheusClient(context.TODO()), tests, oc)
o.Expect(err).NotTo(o.HaveOccurred())
})
g.It("shouldn't have failing rules evaluation", func() {
// we only consider samples since the beginning of the test
testDuration := exutil.DurationSinceStartInSeconds().String()
tests := map[string]bool{
fmt.Sprintf(`increase(prometheus_rule_evaluation_failures_total[%s]) >= 1`, testDuration): false,
}
err := helper.RunQueries(context.TODO(), oc.NewPrometheusClient(context.TODO()), tests, oc)
o.Expect(err).NotTo(o.HaveOccurred())
})
networking.InOpenShiftSDNContext(func() {
g.It("should be able to get the sdn ovs flows", func() {
tests := map[string]bool{
//something
`openshift_sdn_ovs_flows >= 1`: true,
}
err := helper.RunQueries(context.TODO(), oc.NewPrometheusClient(context.TODO()), tests, oc)
o.Expect(err).NotTo(o.HaveOccurred())
})
})
g.It("shouldn't report any alerts in firing state apart from Watchdog and AlertmanagerReceiversNotConfigured [Early]", func() {
if len(os.Getenv("TEST_UNSUPPORTED_ALLOW_VERSION_SKEW")) > 0 {
e2eskipper.Skipf("Test is disabled to allow cluster components to have different versions, and skewed versions trigger multiple other alerts")
}
// Checking Watchdog alert state is done in "should have a Watchdog alert in firing state".
allowedAlertNames := []string{
"Watchdog",
"AlertmanagerReceiversNotConfigured",
"PrometheusRemoteWriteDesiredShards",
"KubeJobFailed", // this is a result of bug https://bugzilla.redhat.com/show_bug.cgi?id=2054426 . We should catch these in the late test above.
}
// we exclude alerts that have their own separate tests.
for _, alertTest := range allowedalerts.AllAlertTests(context.TODO(), nil, 0) {
allowedAlertNames = append(allowedAlertNames, alertTest.AlertName())
}
if isTechPreviewCluster(oc) {
// On a TechPreviewNoUpgrade cluster we must ignore the TechPreviewNoUpgrade and ClusterNotUpgradeable alerts generated by the CVO.
// These two alerts are expected in this case when a cluster is configured to enable Tech Preview features,
// as they were intended to be "gentle reminders" to the cluster admins of the ramifications of enabling Tech Preview
allowedAlertNames = append(allowedAlertNames, "TechPreviewNoUpgrade", "ClusterNotUpgradeable")
}
tests := map[string]bool{
fmt.Sprintf(`ALERTS{alertname!~"%s",alertstate="firing",severity!="info"} >= 1`, strings.Join(allowedAlertNames, "|")): false,
}
err := helper.RunQueries(context.TODO(), oc.NewPrometheusClient(context.TODO()), tests, oc)
o.Expect(err).NotTo(o.HaveOccurred())
})
g.It("should provide ingress metrics", func() {
ns := oc.SetupNamespace()
execPod := exutil.CreateExecPodOrFail(oc.AdminKubeClient(), ns, "execpod")
defer func() {
oc.AdminKubeClient().CoreV1().Pods(ns).Delete(context.Background(), execPod.Name, *metav1.NewDeleteOptions(1))
}()
var lastErrs []error
o.Expect(wait.PollImmediate(10*time.Second, 4*time.Minute, func() (bool, error) {
contents, err := getBearerTokenURLViaPod(ns, execPod.Name, fmt.Sprintf("%s/api/v1/targets", prometheusURL), bearerToken)
o.Expect(err).NotTo(o.HaveOccurred())
targets := &prometheusTargets{}
err = json.Unmarshal([]byte(contents), targets)
o.Expect(err).NotTo(o.HaveOccurred())
g.By("verifying all expected jobs have a working target")
lastErrs = all(
// Is there a good way to discover the name and thereby avoid leaking the naming algorithm?
targets.Expect(labels{"job": "router-internal-default"}, "up", "^https://.*/metrics$"),
)
if len(lastErrs) > 0 {
e2e.Logf("missing some targets: %v", lastErrs)
return false, nil
}
return true, nil
})).NotTo(o.HaveOccurred(), "ingress router cannot report metrics to monitoring system")
g.By("verifying standard metrics keys")
queries := map[string]bool{
`template_router_reload_seconds_count{job="router-internal-default"} >= 1`: true,
`haproxy_server_up{job="router-internal-default"} >= 1`: true,
}
err := helper.RunQueries(context.TODO(), oc.NewPrometheusClient(context.TODO()), queries, oc)
o.Expect(err).NotTo(o.HaveOccurred())
})
g.It("should provide named network metrics", func() {
ns := oc.SetupNamespace()
cs, err := newDynClientSet()
o.Expect(err).NotTo(o.HaveOccurred())
err = addNetwork(cs, "secondary", ns)
o.Expect(err).NotTo(o.HaveOccurred())
defer func() {
err := removeNetwork(cs, "secondary", ns)
o.Expect(err).NotTo(o.HaveOccurred())
}()
execPod := exutil.CreateExecPodOrFail(oc.AdminKubeClient(), ns, "execpod", func(pod *v1.Pod) {
pod.Annotations = map[string]string{
"k8s.v1.cni.cncf.io/networks": "secondary",
}
})
defer func() {
oc.AdminKubeClient().CoreV1().Pods(ns).Delete(context.Background(), execPod.Name, *metav1.NewDeleteOptions(1))
}()
g.By("verifying named metrics keys")
queries := map[string]bool{
fmt.Sprintf(`pod_network_name_info{pod="%s",namespace="%s",interface="eth0"} == 0`, execPod.Name, execPod.Namespace): true,
fmt.Sprintf(`pod_network_name_info{pod="%s",namespace="%s",network_name="%s/secondary"} == 0`, execPod.Name, execPod.Namespace, ns): true,
}
err = helper.RunQueries(context.TODO(), oc.NewPrometheusClient(context.TODO()), queries, oc)
o.Expect(err).NotTo(o.HaveOccurred())
})
})
})
func all(errs ...error) []error {
var result []error
for _, err := range errs {
if err != nil {
result = append(result, err)
}
}
return result
}
type prometheusTargets struct {
Data struct {
ActiveTargets []struct {
Labels map[string]string
Health string
ScrapeUrl string
}
}
Status string
}
func (t *prometheusTargets) Expect(l labels, health, scrapeURLPattern string) error {
for _, target := range t.Data.ActiveTargets {
match := true
for k, v := range l {
if target.Labels[k] != v {
match = false
break
}
}
if !match {
continue
}
if health != target.Health {
continue
}
if !regexp.MustCompile(scrapeURLPattern).MatchString(target.ScrapeUrl) {
continue
}
return nil
}
return fmt.Errorf("no match for %v with health %s and scrape URL %s", l, health, scrapeURLPattern)
}
type labels map[string]string
func (l labels) With(name, value string) labels {
n := make(labels)
for k, v := range l {
n[k] = v
}
n[name] = value
return n
}
func findEnvVar(vars []kapi.EnvVar, key string) string {
for _, v := range vars {
if v.Name == key {
return v.Value
}
}
return ""
}
func findMetricsWithLabels(f *dto.MetricFamily, labels map[string]string) []*dto.Metric {
var result []*dto.Metric
if f == nil {
return result
}
for _, m := range f.Metric {
matched := map[string]struct{}{}
for _, l := range m.Label {
if expect, ok := labels[l.GetName()]; ok {
if expect != l.GetValue() {
break
}
matched[l.GetName()] = struct{}{}
}
}
if len(matched) != len(labels) {
continue
}
result = append(result, m)
}
return result
}
func findCountersWithLabels(f *dto.MetricFamily, labels map[string]string) []float64 {
var result []float64
for _, m := range findMetricsWithLabels(f, labels) {
result = append(result, m.Counter.GetValue())
}
return result
}
func findGaugesWithLabels(f *dto.MetricFamily, labels map[string]string) []float64 {
var result []float64
for _, m := range findMetricsWithLabels(f, labels) {
result = append(result, m.Gauge.GetValue())
}
return result
}
func findMetricLabels(f *dto.MetricFamily, labels map[string]string, match string) []string {
var result []string
for _, m := range findMetricsWithLabels(f, labels) {
for _, l := range m.Label {
if l.GetName() == match {
result = append(result, l.GetValue())
break
}
}
}
return result
}
func expectBearerTokenURLStatusCodeExec(ns, execPodName, url, bearer string, statusCode int) error {
cmd := fmt.Sprintf("curl -k -s -H 'Authorization: Bearer %s' -o /dev/null -w '%%{http_code}' %q", bearer, url)
output, err := e2e.RunHostCmd(ns, execPodName, cmd)
if err != nil {
return fmt.Errorf("host command failed: %v\n%s", err, output)
}
if output != strconv.Itoa(statusCode) {
return fmt.Errorf("last response from server was not %d: %s", statusCode, output)
}
return nil
}
func getBearerTokenURLViaPod(ns, execPodName, url, bearer string) (string, error) {
cmd := fmt.Sprintf("curl -s -k -H 'Authorization: Bearer %s' %q", bearer, url)
output, err := e2e.RunHostCmd(ns, execPodName, cmd)
if err != nil {
return "", fmt.Errorf("host command failed: %v\n%s", err, output)
}
return output, nil
}
func hasPullSecret(client clientset.Interface, name string) bool {
scrt, err := client.CoreV1().Secrets("openshift-config").Get(context.Background(), "pull-secret", metav1.GetOptions{})
if err != nil {
if kapierrs.IsNotFound(err) {
return false
}
e2e.Failf("could not retrieve pull-secret: %v", err)
}
if scrt.Type != v1.SecretTypeDockerConfigJson {
e2e.Failf("error expecting secret type %s got %s", v1.SecretTypeDockerConfigJson, scrt.Type)
}
ps := struct {
Auths map[string]struct {
Auth string `json:"auth"`
} `json:"auths"`
}{}
if err := json.Unmarshal(scrt.Data[v1.DockerConfigJsonKey], &ps); err != nil {
e2e.Failf("could not unmarshal pullSecret from openshift-config/pull-secret: %v", err)
}
return len(ps.Auths[name].Auth) > 0
}
func isTechPreviewCluster(oc *exutil.CLI) bool {
featureGate, err := oc.AdminConfigClient().ConfigV1().FeatureGates().Get(context.Background(), "cluster", metav1.GetOptions{})
if err != nil {
if kapierrs.IsNotFound(err) {
return false
}
e2e.Failf("could not retrieve feature-gate: %v", err)
}
return featureGate.Spec.FeatureSet == configv1.TechPreviewNoUpgrade
}
func hasTelemeterClient(client clientset.Interface) bool {
_, err := client.CoreV1().Pods("openshift-monitoring").List(context.Background(), metav1.ListOptions{
LabelSelector: "app.kubernetes.io/name=telemeter-client",
})
if err != nil {
if kapierrs.IsNotFound(err) {
return false
}
e2e.Failf("could not list pods: %v", err)
}
return true
}
|
[
"\"TEST_UNSUPPORTED_ALLOW_VERSION_SKEW\"",
"\"TEST_UNSUPPORTED_ALLOW_VERSION_SKEW\""
] |
[] |
[
"TEST_UNSUPPORTED_ALLOW_VERSION_SKEW"
] |
[]
|
["TEST_UNSUPPORTED_ALLOW_VERSION_SKEW"]
|
go
| 1 | 0 | |
source-code/pkg/controller/client/client.go
|
package client
import (
"io/ioutil"
"net"
"os"
clientset "pkg/aadsync/client/clientset/versioned"
aadgroupsyncv1 "pkg/aadsync/apis/aad.microsoft.com/v1"
v1 "pkg/aadsync/client/clientset/versioned/typed/aad.microsoft.com/v1"
logrus "github.com/sirupsen/logrus"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
rest "k8s.io/client-go/rest"
certutil "k8s.io/client-go/util/cert"
)
// Client contains the internal AAD Group Sync Client details
type Client struct {
Log *logrus.Entry
Config *rest.Config
Client v1.AADGroupSyncInterface
Namespace string
}
// NewClient creates a new AAD Group Sync Client with default incluster configuration. You need to be running
// incluster for this to be successful
func NewClient(namespace string, log *logrus.Entry) *Client {
config, err := rest.InClusterConfig()
if err != nil {
log.Fatal(err)
}
return NewClientForConfigAndNamespace(config, namespace, log)
}
// NewClientForConfigAndNamespace creates a new AAD Group Sync Client with the specified configuration and namespace
func NewClientForConfigAndNamespace(config *rest.Config, namespace string, log *logrus.Entry) *Client {
clientset, err := clientset.NewForConfig(config)
if err != nil {
log.Fatal(err)
}
client := &Client{
Log: log,
Config: config,
Client: clientset.AadV1().AADGroupSyncs(string(namespace)),
}
log.Info("Created aad group sync client")
log.Debugf("Host: %s", client.Config.Host)
return client
}
// NewClientForLocal creates a new AAD Group Sync Client from local copies of incluster resources. This is useful
// for testing
func NewClientForLocal(namespace string, log *logrus.Entry) *Client {
// Found incluster at /var/run/secrets/kubernetes.io/serviceaccount/token
tokenFile := os.Getenv("KUBERNETES_SERVICEACCOUNT_TOKENFILE")
// Found incluster at /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
rootCAFile := os.Getenv("KUBERNETES_SERVICEACCOUNT_ROOTCAFILE")
host := os.Getenv("KUBERNETES_SERVICE_HOST")
port := os.Getenv("KUBERNETES_SERVICE_PORT")
token, err := ioutil.ReadFile(tokenFile)
if err != nil {
log.Fatal(err)
}
tlsClientConfig := rest.TLSClientConfig{}
if _, err := certutil.NewPool(rootCAFile); err != nil {
log.Fatalf("Expected to load root CA config from %s, but got err: %v", rootCAFile, err)
} else {
tlsClientConfig.CAFile = rootCAFile
}
config := &rest.Config{
Host: "https://" + net.JoinHostPort(host, port),
TLSClientConfig: tlsClientConfig,
BearerToken: string(token),
BearerTokenFile: tokenFile,
}
return NewClientForConfigAndNamespace(config, namespace, log)
}
// Get returns an existing aadgroupsyncs.aad.microsoft.com CRD
func (c *Client) Get(aadGroupName string) (*aadgroupsyncv1.AADGroupSync, error) {
c.Log.Infof("Fetching aadgroupsyncs.aad.microsoft.com: %s", aadGroupName)
aadGroup, err := c.Client.Get(aadGroupName, metav1.GetOptions{})
if err != nil {
if errors.IsNotFound(err) {
c.Log.Infof("Not found - aadgroupsyncs.aad.microsoft.com: %s", aadGroupName)
return nil, nil
}
return nil, err
}
return aadGroup, nil
}
// Create creates a new aadgroupsyncs.aad.microsoft.com CRD
func (c *Client) Create(aadGroup *aadgroupsyncv1.AADGroupSync) (*aadgroupsyncv1.AADGroupSync, error) {
c.Log.Infof("Creating aadgroupsyncs.aad.microsoft.com: %s", aadGroup.ObjectMeta.Name)
aadGroup, err := c.Client.Create(aadGroup)
if err != nil {
return nil, err
}
return aadGroup, nil
}
// Update updates an existing aadgroupsyncs.aad.microsoft.com CRD
func (c *Client) Update(aadGroup *aadgroupsyncv1.AADGroupSync) (*aadgroupsyncv1.AADGroupSync, error) {
c.Log.Infof("Updating aadgroupsyncs.aad.microsoft.com: %s", aadGroup.ObjectMeta.Name)
aadGroup, err := c.Client.Update(aadGroup)
if err != nil {
return nil, err
}
return aadGroup, nil
}
// Delete deletes an existing aadgroupsyncs.aad.microsoft.com CRD
func (c *Client) Delete(aadGroupName string) error {
c.Log.Infof("Deleting aadgroupsyncs.aad.microsoft.com: %s", aadGroupName)
deletePolicy := metav1.DeletePropagationForeground
err := c.Client.Delete(aadGroupName, &metav1.DeleteOptions{PropagationPolicy: &deletePolicy})
if err != nil {
return err
}
return nil
}
// List returns a collection of existing aadgroupsyncs.aad.microsoft.com CRDs
func (c *Client) List() ([]aadgroupsyncv1.AADGroupSync, error) {
c.Log.Infof("Fetching all aadgroupsyncs.aad.microsoft.com")
aadGroupList, err := c.Client.List(metav1.ListOptions{})
if err != nil {
return nil, err
}
return aadGroupList.Items, nil
}
|
[
"\"KUBERNETES_SERVICEACCOUNT_TOKENFILE\"",
"\"KUBERNETES_SERVICEACCOUNT_ROOTCAFILE\"",
"\"KUBERNETES_SERVICE_HOST\"",
"\"KUBERNETES_SERVICE_PORT\""
] |
[] |
[
"KUBERNETES_SERVICE_HOST",
"KUBERNETES_SERVICE_PORT",
"KUBERNETES_SERVICEACCOUNT_TOKENFILE",
"KUBERNETES_SERVICEACCOUNT_ROOTCAFILE"
] |
[]
|
["KUBERNETES_SERVICE_HOST", "KUBERNETES_SERVICE_PORT", "KUBERNETES_SERVICEACCOUNT_TOKENFILE", "KUBERNETES_SERVICEACCOUNT_ROOTCAFILE"]
|
go
| 4 | 0 | |
scripts/launch_workflow_cf/main.py
|
"""Google Cloud Function to launch a Terra workflow."""
import os
from typing import Any, Dict
from utils import prepare_and_launch
def launch_workflow(data: Dict[Any, Any], context: Any):
"""Entry point for execution via a Cloud Function.
This Cloud Function reads configuration from environment variables and the triggering event.
This example workflow uses entities from a data table so that workflow parameter values do
not need to be hardcoded here in this script.
Environment variables:
WORKSPACE_NAMESPACE: The project id of the Terra billing project in which the workspace resides.
WORKSPACE_NAME: The name of the workspace in which the workflow resides
METHOD_NAMESPACE: The namespace of the workflow method.
METHOD_NAME: The name of the workflow method.
SECRET_PATH: The 'Resource ID' of the service account key stored in Secret Manager. Or, if
testing locally, the filepath to the JSON key for the service account.
TRIGGER_PARAMETER_NAME: The name of the workflow parameter to receive the path to the triggering file.
Defaults to `MyWorkflowName.aCloudStorageFilePath`.
ENTITY_SET_NAME: The name of the entity set to be used for all other workflow parameters. Defaults to
the most recently created entity set of the root entity type.
Args:
event: The dictionary with data specific to this type of event.
The `data` field contains a description of the event in
the Cloud Storage `object` format described here:
https://cloud.google.com/storage/docs/json_api/v1/objects#resource
context: Metadata of triggering event.
Returns:
None; the side effect is the execution of a parameter-parallel Terra workflow.
"""
# Extract file information from the triggering PubSub message.
file_name = data.get('name')
bucket_name = data.get('bucket')
file_path = f"gs://{bucket_name}/{file_name}"
print(f"input file: {file_name}; full path: {file_path}")
# Default to the parameter name from the example workflow.
workflow_parameters = {
os.getenv("TRIGGER_PARAMETER_NAME", "MyWorkflowName.aCloudStorageFilePath"): f"\"{file_path}\"",
}
prepare_and_launch(
workspace_namespace=os.getenv("WORKSPACE_NAMESPACE"),
workspace_name=os.getenv("WORKSPACE_NAME"),
method_namespace=os.getenv("METHOD_NAMESPACE"),
method_name=os.getenv("METHOD_NAME"),
secret_path=os.getenv("SECRET_PATH"),
workflow_parameters=workflow_parameters,
# Default to 'None', which will cause the most recently created entity set to be used.
entity_set_name=os.getenv("ENTITY_SET_NAME", None)
)
if __name__ == "__main__":
"""Entry point of manual execution for testing purposes."""
# This example parameter is a world-readable file.
# gs://genomics-public-data/platinum-genomes/other/platinum_genomes_sample_info.csv
launch_workflow(data={"bucket": "genomics-public-data",
"name": "platinum-genomes/other/platinum_genomes_sample_info.csv"},
context=None)
|
[] |
[] |
[
"WORKSPACE_NAME",
"SECRET_PATH",
"WORKSPACE_NAMESPACE",
"METHOD_NAME",
"TRIGGER_PARAMETER_NAME",
"METHOD_NAMESPACE",
"ENTITY_SET_NAME"
] |
[]
|
["WORKSPACE_NAME", "SECRET_PATH", "WORKSPACE_NAMESPACE", "METHOD_NAME", "TRIGGER_PARAMETER_NAME", "METHOD_NAMESPACE", "ENTITY_SET_NAME"]
|
python
| 7 | 0 | |
pkg/adapter/runtime.go
|
// +build !remoteclient
package adapter
import (
"bufio"
"context"
"io"
"io/ioutil"
"os"
"text/template"
"github.com/containers/buildah"
"github.com/containers/buildah/imagebuildah"
"github.com/containers/buildah/pkg/formats"
"github.com/containers/buildah/pkg/parse"
"github.com/containers/image/v5/docker/reference"
"github.com/containers/image/v5/types"
"github.com/containers/libpod/cmd/podman/cliconfig"
"github.com/containers/libpod/cmd/podman/libpodruntime"
"github.com/containers/libpod/cmd/podman/shared"
"github.com/containers/libpod/libpod"
"github.com/containers/libpod/libpod/define"
"github.com/containers/libpod/libpod/events"
"github.com/containers/libpod/libpod/image"
"github.com/containers/libpod/pkg/rootless"
"github.com/containers/libpod/pkg/util"
"github.com/containers/storage/pkg/archive"
"github.com/pkg/errors"
v1 "k8s.io/api/core/v1"
)
// LocalRuntime describes a typical libpod runtime
type LocalRuntime struct {
*libpod.Runtime
Remote bool
}
// ContainerImage ...
type ContainerImage struct {
*image.Image
}
// Container ...
type Container struct {
*libpod.Container
}
// Pod encapsulates the libpod.Pod structure, helps with remote vs. local
type Pod struct {
*libpod.Pod
}
// Volume ...
type Volume struct {
*libpod.Volume
}
// VolumeFilter is for filtering volumes on the client
type VolumeFilter func(*Volume) bool
// GetRuntimeNoStore returns a localruntime struct wit an embedded runtime but
// without a configured storage.
func GetRuntimeNoStore(ctx context.Context, c *cliconfig.PodmanCommand) (*LocalRuntime, error) {
runtime, err := libpodruntime.GetRuntimeNoStore(ctx, c)
if err != nil {
return nil, err
}
return getRuntime(runtime)
}
// GetRuntime returns a LocalRuntime struct with the actual runtime embedded in it
func GetRuntime(ctx context.Context, c *cliconfig.PodmanCommand) (*LocalRuntime, error) {
runtime, err := libpodruntime.GetRuntime(ctx, c)
if err != nil {
return nil, err
}
return getRuntime(runtime)
}
func getRuntime(runtime *libpod.Runtime) (*LocalRuntime, error) {
return &LocalRuntime{
Runtime: runtime,
}, nil
}
// GetFilterImages returns a slice of images in containerimages that are "filtered"
func (r *LocalRuntime) GetFilteredImages(filters []string, rwOnly bool) ([]*ContainerImage, error) {
images, err := r.ImageRuntime().GetImagesWithFilters(filters)
if err != nil {
return nil, err
}
return r.ImagestoContainerImages(images, rwOnly)
}
// GetImages returns a slice of images in containerimages
func (r *LocalRuntime) GetImages() ([]*ContainerImage, error) {
return r.getImages(false)
}
// GetRWImages returns a slice of read/write images in containerimages
func (r *LocalRuntime) GetRWImages() ([]*ContainerImage, error) {
return r.getImages(true)
}
func (r *LocalRuntime) getImages(rwOnly bool) ([]*ContainerImage, error) {
images, err := r.Runtime.ImageRuntime().GetImages()
if err != nil {
return nil, err
}
return r.ImagestoContainerImages(images, rwOnly)
}
func (r *LocalRuntime) ImagestoContainerImages(images []*image.Image, rwOnly bool) ([]*ContainerImage, error) {
var containerImages []*ContainerImage
for _, i := range images {
if rwOnly && i.IsReadOnly() {
continue
}
containerImages = append(containerImages, &ContainerImage{i})
}
return containerImages, nil
}
// NewImageFromLocal returns a containerimage representation of a image from local storage
func (r *LocalRuntime) NewImageFromLocal(name string) (*ContainerImage, error) {
img, err := r.Runtime.ImageRuntime().NewFromLocal(name)
if err != nil {
return nil, err
}
return &ContainerImage{img}, nil
}
// LoadFromArchiveReference calls into local storage to load an image from an archive
func (r *LocalRuntime) LoadFromArchiveReference(ctx context.Context, srcRef types.ImageReference, signaturePolicyPath string, writer io.Writer) ([]*ContainerImage, error) {
var containerImages []*ContainerImage
imgs, err := r.Runtime.ImageRuntime().LoadFromArchiveReference(ctx, srcRef, signaturePolicyPath, writer)
if err != nil {
return nil, err
}
for _, i := range imgs {
ci := ContainerImage{i}
containerImages = append(containerImages, &ci)
}
return containerImages, nil
}
// New calls into local storage to look for an image in local storage or to pull it
func (r *LocalRuntime) New(ctx context.Context, name, signaturePolicyPath, authfile string, writer io.Writer, dockeroptions *image.DockerRegistryOptions, signingoptions image.SigningOptions, label *string, pullType util.PullType) (*ContainerImage, error) {
img, err := r.Runtime.ImageRuntime().New(ctx, name, signaturePolicyPath, authfile, writer, dockeroptions, signingoptions, label, pullType)
if err != nil {
return nil, err
}
return &ContainerImage{img}, nil
}
// RemoveImage calls into local storage and removes an image
func (r *LocalRuntime) RemoveImage(ctx context.Context, img *ContainerImage, force bool) (string, error) {
return r.Runtime.RemoveImage(ctx, img.Image, force)
}
// PruneImages is wrapper into PruneImages within the image pkg
func (r *LocalRuntime) PruneImages(ctx context.Context, all bool, filter []string) ([]string, error) {
return r.ImageRuntime().PruneImages(ctx, all, filter)
}
// Export is a wrapper to container export to a tarfile
func (r *LocalRuntime) Export(name string, path string) error {
ctr, err := r.Runtime.LookupContainer(name)
if err != nil {
return errors.Wrapf(err, "error looking up container %q", name)
}
return ctr.Export(path)
}
// Import is a wrapper to import a container image
func (r *LocalRuntime) Import(ctx context.Context, source, reference string, changes []string, history string, quiet bool) (string, error) {
return r.Runtime.Import(ctx, source, reference, changes, history, quiet)
}
// CreateVolume is a wrapper to create volumes
func (r *LocalRuntime) CreateVolume(ctx context.Context, c *cliconfig.VolumeCreateValues, labels, opts map[string]string) (string, error) {
var (
options []libpod.VolumeCreateOption
volName string
)
if len(c.InputArgs) > 0 {
volName = c.InputArgs[0]
options = append(options, libpod.WithVolumeName(volName))
}
if c.Flag("driver").Changed {
options = append(options, libpod.WithVolumeDriver(c.Driver))
}
if len(labels) != 0 {
options = append(options, libpod.WithVolumeLabels(labels))
}
if len(opts) != 0 {
// We need to process -o for uid, gid
parsedOptions, err := shared.ParseVolumeOptions(opts)
if err != nil {
return "", err
}
options = append(options, parsedOptions...)
}
newVolume, err := r.NewVolume(ctx, options...)
if err != nil {
return "", err
}
return newVolume.Name(), nil
}
// RemoveVolumes is a wrapper to remove volumes
func (r *LocalRuntime) RemoveVolumes(ctx context.Context, c *cliconfig.VolumeRmValues) ([]string, map[string]error, error) {
return shared.SharedRemoveVolumes(ctx, r.Runtime, c.InputArgs, c.All, c.Force)
}
// Push is a wrapper to push an image to a registry
func (r *LocalRuntime) Push(ctx context.Context, srcName, destination, manifestMIMEType, authfile, digestfile, signaturePolicyPath string, writer io.Writer, forceCompress bool, signingOptions image.SigningOptions, dockerRegistryOptions *image.DockerRegistryOptions, additionalDockerArchiveTags []reference.NamedTagged) error {
newImage, err := r.ImageRuntime().NewFromLocal(srcName)
if err != nil {
return err
}
return newImage.PushImageToHeuristicDestination(ctx, destination, manifestMIMEType, authfile, digestfile, signaturePolicyPath, writer, forceCompress, signingOptions, dockerRegistryOptions, nil)
}
// InspectVolumes returns a slice of volumes based on an arg list or --all
func (r *LocalRuntime) InspectVolumes(ctx context.Context, c *cliconfig.VolumeInspectValues) ([]*libpod.InspectVolumeData, error) {
var (
volumes []*libpod.Volume
err error
)
if c.All {
volumes, err = r.GetAllVolumes()
} else {
for _, v := range c.InputArgs {
vol, err := r.LookupVolume(v)
if err != nil {
return nil, err
}
volumes = append(volumes, vol)
}
}
if err != nil {
return nil, err
}
inspectVols := make([]*libpod.InspectVolumeData, 0, len(volumes))
for _, vol := range volumes {
inspectOut, err := vol.Inspect()
if err != nil {
return nil, errors.Wrapf(err, "error inspecting volume %s", vol.Name())
}
inspectVols = append(inspectVols, inspectOut)
}
return inspectVols, nil
}
// Volumes returns a slice of localruntime volumes
func (r *LocalRuntime) Volumes(ctx context.Context) ([]*Volume, error) {
vols, err := r.GetAllVolumes()
if err != nil {
return nil, err
}
return libpodVolumeToVolume(vols), nil
}
// libpodVolumeToVolume converts a slice of libpod volumes to a slice
// of localruntime volumes (same as libpod)
func libpodVolumeToVolume(volumes []*libpod.Volume) []*Volume {
var vols []*Volume
for _, v := range volumes {
newVol := Volume{
v,
}
vols = append(vols, &newVol)
}
return vols
}
// Build is the wrapper to build images
func (r *LocalRuntime) Build(ctx context.Context, c *cliconfig.BuildValues, options imagebuildah.BuildOptions, dockerfiles []string) error {
namespaceOptions, networkPolicy, err := parse.NamespaceOptions(c.PodmanCommand.Command)
if err != nil {
return errors.Wrapf(err, "error parsing namespace-related options")
}
usernsOption, idmappingOptions, err := parse.IDMappingOptions(c.PodmanCommand.Command, options.Isolation)
if err != nil {
return errors.Wrapf(err, "error parsing ID mapping options")
}
namespaceOptions.AddOrReplace(usernsOption...)
systemContext, err := parse.SystemContextFromOptions(c.PodmanCommand.Command)
if err != nil {
return errors.Wrapf(err, "error building system context")
}
authfile := c.Authfile
if len(c.Authfile) == 0 {
authfile = os.Getenv("REGISTRY_AUTH_FILE")
}
systemContext.AuthFilePath = authfile
commonOpts, err := parse.CommonBuildOptions(c.PodmanCommand.Command)
if err != nil {
return err
}
options.NamespaceOptions = namespaceOptions
options.ConfigureNetwork = networkPolicy
options.IDMappingOptions = idmappingOptions
options.CommonBuildOpts = commonOpts
options.SystemContext = systemContext
if c.GlobalFlags.Runtime != "" {
options.Runtime = c.GlobalFlags.Runtime
} else {
options.Runtime = r.GetOCIRuntimePath()
}
if c.Quiet {
options.ReportWriter = ioutil.Discard
}
if rootless.IsRootless() {
options.Isolation = buildah.IsolationOCIRootless
}
return r.Runtime.Build(ctx, options, dockerfiles...)
}
// PruneVolumes is a wrapper function for libpod PruneVolumes
func (r *LocalRuntime) PruneVolumes(ctx context.Context) ([]string, []error) {
return r.Runtime.PruneVolumes(ctx)
}
// SaveImage is a wrapper function for saving an image to the local filesystem
func (r *LocalRuntime) SaveImage(ctx context.Context, c *cliconfig.SaveValues) error {
source := c.InputArgs[0]
additionalTags := c.InputArgs[1:]
newImage, err := r.Runtime.ImageRuntime().NewFromLocal(source)
if err != nil {
return err
}
return newImage.Save(ctx, source, c.Format, c.Output, additionalTags, c.Quiet, c.Compress)
}
// LoadImage is a wrapper function for libpod LoadImage
func (r *LocalRuntime) LoadImage(ctx context.Context, name string, cli *cliconfig.LoadValues) (string, error) {
var (
writer io.Writer
)
if !cli.Quiet {
writer = os.Stderr
}
return r.Runtime.LoadImage(ctx, name, cli.Input, writer, cli.SignaturePolicy)
}
// IsImageNotFound checks if the error indicates that no image was found.
func IsImageNotFound(err error) bool {
return errors.Cause(err) == image.ErrNoSuchImage
}
// HealthCheck is a wrapper to same named function in libpod
func (r *LocalRuntime) HealthCheck(c *cliconfig.HealthCheckValues) (string, error) {
output := "unhealthy"
status, err := r.Runtime.HealthCheck(c.InputArgs[0])
if status == libpod.HealthCheckSuccess {
output = "healthy"
}
return output, err
}
// Events is a wrapper to libpod to obtain libpod/podman events
func (r *LocalRuntime) Events(c *cliconfig.EventValues) error {
var (
fromStart bool
eventsError error
)
var tmpl *template.Template
if c.Format != formats.JSONString {
template, err := template.New("events").Parse(c.Format)
if err != nil {
return err
}
tmpl = template
}
if len(c.Since) > 0 || len(c.Until) > 0 {
fromStart = true
}
eventChannel := make(chan *events.Event)
go func() {
readOpts := events.ReadOptions{FromStart: fromStart, Stream: c.Stream, Filters: c.Filter, EventChannel: eventChannel, Since: c.Since, Until: c.Until}
eventsError = r.Runtime.Events(readOpts)
}()
if eventsError != nil {
return eventsError
}
w := bufio.NewWriter(os.Stdout)
for event := range eventChannel {
if c.Format == formats.JSONString {
jsonStr, err := event.ToJSONString()
if err != nil {
return errors.Wrapf(err, "unable to format json")
}
if _, err := w.Write([]byte(jsonStr)); err != nil {
return err
}
} else if len(c.Format) > 0 {
if err := tmpl.Execute(w, event); err != nil {
return err
}
} else {
if _, err := w.Write([]byte(event.ToHumanReadable())); err != nil {
return err
}
}
if _, err := w.Write([]byte("\n")); err != nil {
return err
}
if err := w.Flush(); err != nil {
return err
}
}
return nil
}
// Diff shows the difference in two objects
func (r *LocalRuntime) Diff(c *cliconfig.DiffValues, to string) ([]archive.Change, error) {
return r.Runtime.GetDiff("", to)
}
// GenerateKube creates kubernetes email from containers and pods
func (r *LocalRuntime) GenerateKube(c *cliconfig.GenerateKubeValues) (*v1.Pod, *v1.Service, error) {
return shared.GenerateKube(c.InputArgs[0], c.Service, r.Runtime)
}
// GetPodsByStatus returns a slice of pods filtered by a libpod status
func (r *LocalRuntime) GetPodsByStatus(statuses []string) ([]*libpod.Pod, error) {
filterFunc := func(p *libpod.Pod) bool {
state, _ := shared.GetPodStatus(p)
for _, status := range statuses {
if state == status {
return true
}
}
return false
}
pods, err := r.Runtime.Pods(filterFunc)
if err != nil {
return nil, err
}
return pods, nil
}
// GetVersion is an alias to satisfy interface{}
func (r *LocalRuntime) GetVersion() (define.Version, error) {
return define.GetVersion()
}
// RemoteEndpoint resolve interface requirement
func (r *LocalRuntime) RemoteEndpoint() (*Endpoint, error) {
return nil, errors.New("RemoteEndpoint() not implemented for local connection")
}
|
[
"\"REGISTRY_AUTH_FILE\""
] |
[] |
[
"REGISTRY_AUTH_FILE"
] |
[]
|
["REGISTRY_AUTH_FILE"]
|
go
| 1 | 0 | |
setup.py
|
# Copyright (c) 2020-2022 Qianqian Fang <q.fang at neu.edu>. All rights reserved.
# Copyright (c) 2016-2019 Iotic Labs Ltd. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://github.com/NeuroJSON/pybj/blob/master/LICENSE
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import sys
import os
import warnings
from glob import glob
from platform import python_implementation
# Allow for environments without setuptools
try:
from setuptools import setup
except ImportError:
from ez_setup import use_setuptools
use_setuptools()
from setuptools import setup # pylint: disable=ungrouped-imports
from distutils.core import Extension
from distutils.command.build_ext import build_ext
from distutils.errors import CCompilerError
from distutils.errors import DistutilsPlatformError, DistutilsExecError
def load_description(filename):
script_dir = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(script_dir, filename), 'r') as infile:
return infile.read()
# Loosely based on https://github.com/mongodb/mongo-python-driver/blob/master/setup.py
class BuildExtWarnOnFail(build_ext):
"""Allow for extension building to fail."""
def run(self):
try:
build_ext.run(self)
except DistutilsPlatformError:
ex = sys.exc_info()[1]
sys.stdout.write('%s\n' % str(ex))
warnings.warn("Extension modules: There was an issue with your platform configuration - see above.")
def build_extension(self, ext):
try:
build_ext.build_extension(self, ext)
except (CCompilerError, DistutilsExecError, DistutilsPlatformError, IOError):
ex = sys.exc_info()[1]
sys.stdout.write('%s\n' % str(ex))
warnings.warn("Extension module %s: The output above this warning shows how the compilation failed."
% ext.name)
BUILD_EXTENSIONS = 'PYBJDATA_NO_EXTENSION' not in os.environ and python_implementation() != 'PyPy'
COMPILE_ARGS = ['-std=c99', '-DUSE__BJDATA']
# For testing/debug only - some of these are GCC-specific
# COMPILE_ARGS += ['-Wall', '-Wextra', '-Wundef', '-Wshadow', '-Wcast-align', '-Wcast-qual', '-Wstrict-prototypes',
# '-pedantic']
setup(
name='bjdata',
version='0.3.2',
description='Binary JData and UBJSON encoder/decoder',
long_description=load_description('README.md'),
long_description_content_type='text/markdown',
author='Qianqian Fang',
author_email='[email protected]',
maintainer='Qianqian Fang',
maintainer_email='[email protected]',
url='https://github.com/NeuroJSON/pybj',
license='Apache License 2.0',
packages=['bjdata'],
install_requires=[
'numpy>=1.8.0'
],
extras_require={
'dev': [
'Pympler>=0.7 ,<0.8',
'coverage>=4.5.3,<4.6'
]
},
zip_safe=False,
ext_modules=([Extension(
'_bjdata',
sorted(glob('src/*.c')),
extra_compile_args=COMPILE_ARGS,
# undef_macros=['NDEBUG']
)] if BUILD_EXTENSIONS else []),
cmdclass={"build_ext": BuildExtWarnOnFail},
keywords = ['JSON', 'JData', 'UBJSON', 'BJData', 'OpenJData', 'NeuroJSON', 'JNIfTI', 'Encoder', 'Decoder'],
classifiers=[
'Development Status :: 4 - Beta',
'License :: OSI Approved :: Apache Software License',
'Intended Audience :: Developers',
'Programming Language :: C',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Topic :: Software Development :: Libraries',
'Topic :: Software Development :: Libraries :: Python Modules'
]
)
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
Multi_Classification/Multi_Image_Classification.py
|
# Primary Python Files for Image Classification
import numpy as np
import pandas as pd
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' # dont show any tensorflow warning messages
import cv2
# Keras libraries used for making the model and tensorflow
import tensorflow, keras
from tensorflow.keras.utils import to_categorical
from keras.layers import Dense,Conv2D,Flatten,MaxPool2D,Dropout
from keras.models import Sequential
# Sklearn library for splitting the data precisely
from sklearn.model_selection import train_test_split
'''
Multi_Image_Classification Class
Description:
1. Identify different sets of images based on the labels you provide.
2. Works based off a sequential model.
3. Uses a Convolutional Neural Network.
'''
class Multi_Image_Classification:
# ------------------------------ Generic Fields Needed for Training ---------------------------------- #
shape = (200,200) # predefine a established shape for training and resizing the images (default)
labels = [] # define the labels to train on
# --------------------------- Training Tools ---------------------------------- #
train_path = './Multi_Classification/train' # define the path where the training images are located
train_labels = None # define the labels (same as testing)
train_images = None # define the images with the training
x_train = None # split the training images for training
y_train = None # split the training labels for training
# ------------------------- Testing Tools -------------------------------------- #
test_path = './Multi_Classification/test' # define the path where the testing images are located
x_val = None # split the training images for testing
y_val = None # split the training labels for testing
test_labels = None # define the testing labels (same as training)
test_images = None # define the testing images
# ----------------------------------- Main Model Tools ------------------------------- #
epoch = 50 # default epoch
batch_size = 10 # default batch size
model = None # define the model (Sequential for Image Classification)
# ------------------------- Define the Functions for Making the model ---------------------- #
# define the labels and images depending on the directory path
def set_data(self, directory_path):
data_labels = [] # define the set of labels according to the name of the file
data_images = [] # define the images
# iterate through all the images in the directory
for filename in os.listdir(directory_path):
# Get the values of the images at the directory path
img = cv2.imread(os.path.join(directory_path, filename))
# Spliting file names and storing the labels for image in list
data_labels.append(filename.split('_')[0])
# Resize all images to a specific shape
img = cv2.resize(img, self.shape)
data_images.append(img) # append the image
data_labels = pd.get_dummies(data_labels).values # Get the categorical data
data_images = np.array(data_images) # Define the image array as a np array for fitting
return data_labels, data_images # return the labels, images for the specific directory
# define the tools for utilzing on creation of the object
def __init__(self, create_model, labels, shape, epoch, batch_size):
np.random.seed(1) # sets the random seed of the NumPy pseudo-random number generator
self.shape = shape # let the user enter the shape of the images to be formed (default 200x200)
# let the user define the labels for their model they want to create
self.labels = labels # default values
# define the training images and labels
self.train_labels, self.train_images = self.set_data(self.train_path)
# Splitting Training data into train and validation dataset
self.x_train,self.x_val,self.y_train,self.y_val = train_test_split(self.train_images,self.train_labels,random_state=1)
# define the test labels and images
self.test_labels, self.test_images = self.set_data(self.test_path)
# define the model for predicition
if create_model == True:
self.model = self.create_model(epoch, batch_size, self.x_train, self.y_train, self.x_val, self.y_val)
# create the model to be used for predicition
def create_model(self, epoch, batch_size, x_train, y_train, x_val, y_val):
model = Sequential() # define the model as sequential
model.add(Conv2D(kernel_size=(3,3), filters=32, activation='tanh', input_shape=(200,200,3,))) # define the first layer
model.add(Conv2D(filters=30,kernel_size = (3,3),activation='tanh')) # define the second layer
model.add(MaxPool2D(2,2)) # define the third layer
model.add(Conv2D(filters=30,kernel_size = (3,3),activation='tanh')) # define the fourth layer
model.add(MaxPool2D(2,2)) # define the fifth layer
model.add(Conv2D(filters=30,kernel_size = (3,3),activation='tanh')) # define the sixth layer
model.add(Flatten()) # define the seventh layer
model.add(Dense(20,activation='relu')) # define the eigth layer
model.add(Dense(15,activation='relu')) # define the ninth layer
model.add(Dense(len(self.labels),activation = 'softmax')) # define the tenth layer (according to the number of labels for the model)
model.compile(loss='categorical_crossentropy', metrics=['acc'], optimizer='adam') # compile the models with categorical because we are working with multiple labels
history = model.fit(x_train,y_train,epochs=epoch,batch_size=batch_size,validation_data=(x_val,y_val)) # train the model
# after the training is done, define a dictionary that holds the model and history from the training
complete_model = {} # define the dictionary
complete_model['model'] = model # define the model with its key
complete_model['history'] = history # define the history with its key
complete_model['labels'] = self.labels # save the labels into the dictionary
return complete_model # return the model at the end
# function to save the model that was created in the create_model function
def save_model(self, model_name, model):
model.save('./Models/{}.h5'.format(model_name)) # save the model in the models directory
# function to save the model's labels to be used later
def save_labels(self, labels, model_name):
f = open('./Models/{}_Labels.txt'.format(model_name), 'a') # create the .txt file that will contain the labels of the model
# iterate through the labels when the model was first created
for i in range(len(labels)):
f.write("{}\n".format(labels[i])) # write the labels to the file
f.close() # after iterating through all the labels, close the file so the space can be free
# ------------------------------------------------------ Define the functions used for classifiying --------------------------------------------- #
# classifies images based on the model and the selected image
def classify_image(self, image, model):
checkImage = image[0] # get the image
checklabel = image[0] # get the label of the image
predict = model.predict(np.array(checkImage)) # get the predicition
predicted_label = self.labels[np.argmax(predict)] # get the predicted label
return predicted_label # return the predicted label from the labels provided by the user
|
[] |
[] |
[
"TF_CPP_MIN_LOG_LEVEL"
] |
[]
|
["TF_CPP_MIN_LOG_LEVEL"]
|
python
| 1 | 0 | |
src/main/java/com/oracle/coherence/demo/application/Utilities.java
|
/*
* File: Utilities.java
*
* Copyright (c) 2015, 2016 Oracle and/or its affiliates.
*
* You may not use this file except in compliance with the Universal Permissive
* License (UPL), Version 1.0 (the "License.")
*
* You may obtain a copy of the License at https://opensource.org/licenses/UPL.
*
* Unless required by applicable law or agreed to in writing, software distributed
* under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
* CONDITIONS OF ANY KIND, either express or implied.
*
* See the License for the specific language governing permissions and limitations
* under the License.
*/
package com.oracle.coherence.demo.application;
import com.oracle.coherence.demo.model.Price;
import com.oracle.coherence.demo.model.Trade;
import com.tangosol.net.CacheFactory;
import com.tangosol.net.NamedCache;
import com.tangosol.net.cache.TypeAssertion;
import com.tangosol.util.InvocableMap;
import com.tangosol.util.UUID;
import java.util.HashMap;
import java.util.Random;
/**
* Utility functions for the Coherence Demo.
*
* @author Brian Oliver
*/
public class Utilities
{
private static final int NR_POSITIONS_TO_CREATE = 100000;
private static final float MIN_FACTOR = 0.95f;
private static final float MAX_FACTOR = 1.06f;
private static final double INITIAL_PRICE = 20;
private static final double MIN_PRICE = 5;
private static final String[] SYMBOLS = {"ORCL", "MSFT", "GOOG", "AAPL", "NFLX", "DELL"};
private static final Random RANDOM = new Random();
/**
* The path to the VisualVM executable, for JDK9+
*/
public static final String VISUALVM = System.getProperty("visualvm.executable", "");
/**
* The {@link TypeAssertion} for the trades cache.
*/
public static final TypeAssertion TRADE_CACHE_TYPE = TypeAssertion.withTypes(UUID.class, Trade.class);
/**
* The {@link TypeAssertion} for the prices cache.
*/
public static final TypeAssertion PRICE_CACHE_TYPE = TypeAssertion.withTypes(String.class, Price.class);
/**
* The name of the trades cache.
*/
public static final String TRADE_CACHE = "trades";
/**
* The name of the prices cache.
*/
public static final String PRICE_CACHE = "prices";
/**
* Create the required positions.
*
* @param args arguments to main
*/
@SuppressWarnings("unchecked")
public static void main(String[] args)
{
createPositions(NR_POSITIONS_TO_CREATE);
}
/**
* Obtain the trades cache.
*
* @return the trade {@link NamedCache}
*/
public static NamedCache<UUID, Trade> getTradesCache()
{
return CacheFactory.getTypedCache(TRADE_CACHE, TRADE_CACHE_TYPE);
}
/**
* Obtain the price cache
*
* @return the price {@link NamedCache}
*/
public static NamedCache<String, Price> getPricesCache()
{
return CacheFactory.getTypedCache(PRICE_CACHE, PRICE_CACHE_TYPE);
}
/**
* Obtain an indicator showing if we are running under the Coherence Operator in
* Kubernetes.
*
* @return an indicator showing if we are running under the Coherence Operator in
* Kubernetes
*/
public static boolean isRunningInKubernetes()
{
return System.getenv("COHERENCE_OPERATOR_SERVICE_SERVICE_HOST") != null &&
System.getenv("COHERENCE_OPERATOR_SERVICE_SERVICE_PORT") != null;
}
/**
* Obtain the Coherence cluster version.
*
* @return the Coherence cluster version
*/
public static String getCoherenceVersion()
{
return CacheFactory.VERSION.replaceFirst(" .*$", "")
.replaceFirst("[\\.-]SNAPSHOT.*$","")
.replaceAll("-",".");
}
/**
* Obtain an indicator showing if federation is configured in K8s.
*
* @return an indicator showing if federation is configured in K8s.
*/
public static boolean isFederationConfiguredInK8s()
{
return isRunningInKubernetes() &&
System.getProperty("primary.cluster") != null &&
System.getProperty("secondary.cluster") != null &&
System.getProperty("primary.cluster.host") != null &&
System.getProperty("secondary.cluster.host") != null;
}
/**
* Obtain the Coherence cluster version as an integer.
*
* @return the Coherence cluster version as an integer
*/
public static int getCoherenceVersionAsInt()
{
return Integer.parseInt(getCoherenceVersion().replaceAll("\\.", ""));
}
/**
* Add indexes to the caches to improve query performance.
*/
public static void addIndexes()
{
NamedCache<UUID, Trade> tradesCache = getTradesCache();
System.out.print("Adding Indexes...");
tradesCache.addIndex(Trade::getSymbol, true, null);
tradesCache.addIndex(Trade::getPurchaseValue, false, null);
tradesCache.addIndex(Trade::getAmount, false, null);
System.out.println(" Done");
}
/**
* Remove indexes to the caches.
*/
public static void removeIndexes()
{
NamedCache<UUID, Trade> tradesCache = getTradesCache();
System.out.print("Removing Indexes...");
tradesCache.removeIndex(Trade::getSymbol);
tradesCache.removeIndex(Trade::getPurchaseValue);
tradesCache.removeIndex(Trade::getAmount);
System.out.println(" Done");
}
/**
* Populate initial prices for symbols. Make the current price for all
* symbols to be $40 to make it fair and un-biased.
*/
public static void populatePrices()
{
NamedCache<String, Price> pricesCaches = getPricesCache();
for (int i = 0; i < SYMBOLS.length; i++)
{
Price price = new Price(SYMBOLS[i], INITIAL_PRICE);
pricesCaches.put(price.getSymbol(), price);
}
}
/**
* Create NR_POSITIONS_TO_CREATE in the cache.
*/
public static void createPositions()
{
createPositions(NR_POSITIONS_TO_CREATE);
}
/**
* Create "count" positions in the cache at the current price.
*
* @param count the number of entries to add
*/
public static void createPositions(int count)
{
System.out.printf("Creating %d Positions...\n", count);
NamedCache<UUID, Trade> tradesCache = getTradesCache();
NamedCache<String, Price> priceCache = getPricesCache();
if (priceCache.size() != 5)
{
populatePrices();
}
HashMap<UUID, Trade> trades = new HashMap<>();
for (int i = 0; i < count; i++)
{
// create a random position
String symbol = SYMBOLS[RANDOM.nextInt(SYMBOLS.length)];
int amount = RANDOM.nextInt(1000) + 1;
double price = priceCache.get(symbol).getPrice();
Trade trade = new Trade(symbol, amount, price);
trades.put(trade.getId(), trade);
// batch the putAll's at 10000
if (i % 10000 == 0)
{
System.out.println("Flushing 10000 trades from HashMap to Coherence cache...");
tradesCache.putAll(trades);
trades.clear();
}
}
if (!trades.isEmpty())
{
tradesCache.putAll(trades);
}
System.out.printf("Creation Complete! (Cache contains %d positions)\n", tradesCache.size());
}
/**
* Update a single random stock symbol price on each call.
*/
public static void updatePrices()
{
NamedCache<String, Price> priceCache = getPricesCache();
// choose random symbol to modify
String symbol = SYMBOLS[RANDOM.nextInt(SYMBOLS.length)];
// invoke using static method to ensure all arguments are captured
priceCache.invoke(symbol, updateStockPrice(RANDOM.nextFloat()));
}
/**
* An entry processor to update the price of a symbol.
*
* @param randomValue a random float to generate the price
*
* @return a {@link InvocableMap.EntryProcessor} to carry out the processing
*/
protected static InvocableMap.EntryProcessor<String, Price, Void> updateStockPrice(float randomValue)
{
return entry -> {
if (entry.isPresent())
{
Price price = entry.getValue();
float factor = (randomValue * (MAX_FACTOR - MIN_FACTOR) + MIN_FACTOR);
double newPrice = price.getPrice() * factor;
// when setting the price, if the value < MIN_PRICE, then make it MIN_PRICE
price.setPrice(newPrice <= MIN_PRICE ? MIN_PRICE : newPrice);
entry.setValue(price);
}
return null;
};
}
}
|
[
"\"COHERENCE_OPERATOR_SERVICE_SERVICE_HOST\"",
"\"COHERENCE_OPERATOR_SERVICE_SERVICE_PORT\""
] |
[] |
[
"COHERENCE_OPERATOR_SERVICE_SERVICE_HOST",
"COHERENCE_OPERATOR_SERVICE_SERVICE_PORT"
] |
[]
|
["COHERENCE_OPERATOR_SERVICE_SERVICE_HOST", "COHERENCE_OPERATOR_SERVICE_SERVICE_PORT"]
|
java
| 2 | 0 | |
utils/osutils.go
|
// Copyright 2018 NetApp, Inc. All Rights Reserved.
package utils
import (
"errors"
"fmt"
"io/ioutil"
"os"
"os/exec"
"path/filepath"
"regexp"
"strconv"
"strings"
"syscall"
"time"
"github.com/cenkalti/backoff"
log "github.com/sirupsen/logrus"
)
const iSCSIErrNoObjsFound = 21
const iSCSIDeviceDiscoveryTimeoutSecs = 90
const multipathDeviceDiscoveryTimeoutSecs = 90
var xtermControlRegex = regexp.MustCompile(`\x1B\[[0-9;]*[a-zA-Z]`)
var pidRunningRegex = regexp.MustCompile(`pid \d+ running`)
var pidRegex = regexp.MustCompile(`^\d+$`)
var chrootPathPrefix string
func init() {
if os.Getenv("DOCKER_PLUGIN_MODE") != "" {
chrootPathPrefix = "/host"
} else {
chrootPathPrefix = ""
}
}
// Attach the volume to the local host. This method must be able to accomplish its task using only the data passed in.
// It may be assumed that this method always runs on the host to which the volume will be attached.
func AttachNFSVolume(name, mountpoint string, publishInfo *VolumePublishInfo) error {
log.Debug(">>>> osutils.AttachNFSVolume")
defer log.Debug("<<<< osutils.AttachNFSVolume")
var exportPath = fmt.Sprintf("%s:%s", publishInfo.NfsServerIP, publishInfo.NfsPath)
var options = publishInfo.MountOptions
log.WithFields(log.Fields{
"volume": name,
"exportPath": exportPath,
"mountpoint": mountpoint,
"options": options,
}).Debug("Publishing NFS volume.")
return mountNFSPath(exportPath, mountpoint, options)
}
// Attach the volume to the local host. This method must be able to accomplish its task using only the data passed in.
// It may be assumed that this method always runs on the host to which the volume will be attached. If the mountpoint
// parameter is specified, the volume will be mounted. The device path is set on the in-out publishInfo parameter
// so that it may be mounted later instead.
func AttachISCSIVolume(name, mountpoint string, publishInfo *VolumePublishInfo) error {
log.Debug(">>>> osutils.AttachISCSIVolume")
defer log.Debug("<<<< osutils.AttachISCSIVolume")
var err error
var lunID = int(publishInfo.IscsiLunNumber)
var targetPortal = publishInfo.IscsiTargetPortal
var targetPortalIP = strings.Split(targetPortal, ":")[0]
var targetIQN = publishInfo.IscsiTargetIQN
var username = publishInfo.IscsiUsername
var initiatorSecret = publishInfo.IscsiInitiatorSecret
var iscsiInterface = publishInfo.IscsiInterface
var fstype = publishInfo.FilesystemType
var options = publishInfo.MountOptions
log.WithFields(log.Fields{
"volume": name,
"mountpoint": mountpoint,
"lunID": lunID,
"targetPortal": targetPortal,
"targetIQN": targetIQN,
"fstype": fstype,
}).Debug("Publishing iSCSI volume.")
if ISCSISupported() == false {
err := errors.New("unable to attach: open-iscsi tools not found on host")
log.Errorf("Unable to attach volume: open-iscsi utils not found")
return err
}
// If not logged in, login first
sessionExists, err := iSCSISessionExistsToTargetIQN(targetIQN)
if err != nil {
return err
}
if !sessionExists {
if publishInfo.UseCHAP {
err = loginWithChap(targetIQN, targetPortal, username, initiatorSecret, iscsiInterface, false)
if err != nil {
log.Errorf("Failed to login with CHAP credentials: %+v ", err)
return fmt.Errorf("iSCSI login error: %v", err)
}
} else {
err = EnsureISCSISession(targetPortalIP)
if err != nil {
return fmt.Errorf("iSCSI session error: %v", err)
}
}
}
// If LUN isn't present, rescan the target and wait for the device(s) to appear
if !isAlreadyAttached(lunID, targetIQN) {
err = rescanTargetAndWaitForDevice(lunID, targetIQN)
if err != nil {
log.Errorf("Could not find iSCSI device: %+v", err)
return err
}
}
err = waitForMultipathDeviceForLUN(lunID, targetIQN)
if err != nil {
return err
}
// Lookup all the SCSI device information
deviceInfo, err := getDeviceInfoForLUN(lunID, targetIQN)
if err != nil {
return fmt.Errorf("error getting iSCSI device information: %v", err)
} else if deviceInfo == nil {
return fmt.Errorf("could not get iSCSI device information for LUN %d", lunID)
}
log.WithFields(log.Fields{
"scsiLun": deviceInfo.LUN,
"multipathDevice": deviceInfo.MultipathDevice,
"devices": deviceInfo.Devices,
"fsType": deviceInfo.Filesystem,
"iqn": deviceInfo.IQN,
}).Debug("Found device.")
// Make sure we use the proper device (multipath if in use)
deviceToUse := deviceInfo.Devices[0]
if deviceInfo.MultipathDevice != "" {
deviceToUse = deviceInfo.MultipathDevice
}
if deviceToUse == "" {
return fmt.Errorf("could not determine device to use for %v", name)
}
devicePath := "/dev/" + deviceToUse
// Put a filesystem on the device if there isn't one already there
existingFstype := deviceInfo.Filesystem
if existingFstype == "" {
log.WithFields(log.Fields{"volume": name, "fstype": fstype}).Debug("Formatting LUN.")
err := formatVolume(devicePath, fstype)
if err != nil {
return fmt.Errorf("error formatting LUN %s, device %s: %v", name, deviceToUse, err)
}
} else if existingFstype != fstype {
log.WithFields(log.Fields{
"volume": name,
"existingFstype": existingFstype,
"requestedFstype": fstype,
}).Error("LUN already formatted with a different file system type.")
return fmt.Errorf("LUN %s, device %s already formatted with other filesystem: %s",
name, deviceToUse, existingFstype)
} else {
log.WithFields(log.Fields{
"volume": name,
"fstype": deviceInfo.Filesystem,
}).Debug("LUN already formatted.")
}
// Optionally mount the device
if mountpoint != "" {
if err := MountDevice(devicePath, mountpoint, options); err != nil {
return fmt.Errorf("error mounting LUN %v, device %v, mountpoint %v: %v",
name, deviceToUse, mountpoint, err)
}
}
// Return the device in the publish info in case the mount will be done later
publishInfo.DevicePath = devicePath
return nil
}
// DFInfo data structure for wrapping the parsed output from the 'df' command
type DFInfo struct {
Target string
Source string
}
// GetDFOutput returns parsed DF output
func GetDFOutput() ([]DFInfo, error) {
log.Debug(">>>> osutils.GetDFOutput")
defer log.Debug("<<<< osutils.GetDFOutput")
var result []DFInfo
out, err := execCommand("df", "--output=target,source")
if err != nil {
// df returns an error if there's a stale file handle that we can
// safely ignore. There may be other reasons. Consider it a warning if
// it printed anything to stdout.
if len(out) == 0 {
log.Error("Error encountered gathering df output.")
return nil, err
}
}
lines := strings.Split(strings.TrimSpace(string(out)), "\n")
for _, l := range lines {
a := strings.Fields(l)
if len(a) > 1 {
result = append(result, DFInfo{
Target: a[0],
Source: a[1],
})
}
}
if len(result) > 1 {
return result[1:], nil
}
return result, nil
}
// GetInitiatorIqns returns parsed contents of /etc/iscsi/initiatorname.iscsi
func GetInitiatorIqns() ([]string, error) {
log.Debug(">>>> osutils.GetInitiatorIqns")
defer log.Debug("<<<< osutils.GetInitiatorIqns")
var iqns []string
out, err := execCommand("cat", "/etc/iscsi/initiatorname.iscsi")
if err != nil {
log.Error("Error gathering initiator names.")
return nil, err
}
lines := strings.Split(string(out), "\n")
for _, l := range lines {
if strings.Contains(l, "InitiatorName=") {
iqns = append(iqns, strings.Split(l, "=")[1])
}
}
return iqns, nil
}
// PathExists returns true if the file/directory at the specified path exists,
// false otherwise or if an error occurs.
func PathExists(path string) bool {
if _, err := os.Stat(path); err == nil {
return true
}
return false
}
// getSysfsBlockDirsForLUN returns the list of directories in sysfs where the block devices should appear
// after the scan is successful. One directory is returned for each path in the host session map.
func getSysfsBlockDirsForLUN(lunID int, hostSessionMap map[int]int) []string {
paths := make([]string, 0)
for hostNumber, sessionNumber := range hostSessionMap {
path := fmt.Sprintf("/sys/class/scsi_host/host%d/device/session%d/iscsi_session/session%d/device/target%d:0:0/%d:0:0:%d",
hostNumber, sessionNumber, sessionNumber, hostNumber, hostNumber, lunID)
paths = append(paths, path)
}
return paths
}
// getDevicesForLUN find the /dev/sd* device names for an iSCSI LUN.
func getDevicesForLUN(paths []string) ([]string, error) {
devices := make([]string, 0)
for _, path := range paths {
dirname := path + "/block"
if !PathExists(dirname) {
continue
}
dirFd, err := os.Open(dirname)
if err != nil {
return nil, err
}
list, err := dirFd.Readdir(1)
dirFd.Close()
if err != nil {
return nil, err
}
if 0 == len(list) {
continue
}
devices = append(devices, list[0].Name())
}
return devices, nil
}
// rescanTargetAndWaitForDevice rescans all paths to a specific LUN and waits until all
// SCSI disk-by-path devices for that LUN are present on the host.
func rescanTargetAndWaitForDevice(lunID int, iSCSINodeName string) error {
fields := log.Fields{
"lunID": lunID,
"iSCSINodeName": iSCSINodeName,
}
log.WithFields(fields).Debug(">>>> osutils.rescanTargetAndWaitForDevice")
defer log.WithFields(fields).Debug("<<<< osutils.rescanTargetAndWaitForDevice")
hostSessionMap := getISCSIHostSessionMapForTarget(iSCSINodeName)
if len(hostSessionMap) == 0 {
return fmt.Errorf("no iSCSI hosts found for target %s", iSCSINodeName)
}
log.WithField("hostSessionMap", hostSessionMap).Debug("Built iSCSI host/session map.")
hosts := make([]int, 0)
for hostNumber := range hostSessionMap {
hosts = append(hosts, hostNumber)
}
if err := iSCSIRescanTargetLUN(lunID, hosts); err != nil {
log.WithField("rescanError", err).Error("Could not rescan for new LUN.")
}
paths := getSysfsBlockDirsForLUN(lunID, hostSessionMap)
log.Debugf("Scanning paths: %v", paths)
found := make([]string, 0)
checkAllDevicesExist := func() error {
found := make([]string, 0)
// Check if any paths present, and return nil (success) if so
for _, path := range paths {
dirname := path + "/block"
if !PathExists(dirname) {
return errors.New("device not present yet")
}
found = append(found, dirname)
}
return nil
}
devicesNotify := func(err error, duration time.Duration) {
log.WithField("increment", duration).Debug("All devices not yet present, waiting.")
}
deviceBackoff := backoff.NewExponentialBackOff()
deviceBackoff.InitialInterval = 1 * time.Second
deviceBackoff.Multiplier = 1.414 // approx sqrt(2)
deviceBackoff.RandomizationFactor = 0.1
deviceBackoff.MaxElapsedTime = 5 * time.Second
if err := backoff.RetryNotify(checkAllDevicesExist, deviceBackoff, devicesNotify); err == nil {
log.Debugf("Paths found: %v", found)
return nil
}
log.Debugf("Paths found so far: %v", found)
checkAnyDeviceExists := func() error {
found := make([]string, 0)
// Check if any paths present, and return nil (success) if so
for _, path := range paths {
dirname := path + "/block"
if PathExists(dirname) {
found = append(found, dirname)
}
}
if 0 == len(found) {
return errors.New("no devices present yet")
}
return nil
}
devicesNotify = func(err error, duration time.Duration) {
log.WithField("increment", duration).Debug("No devices present yet, waiting.")
}
deviceBackoff = backoff.NewExponentialBackOff()
deviceBackoff.InitialInterval = 1 * time.Second
deviceBackoff.Multiplier = 1.414 // approx sqrt(2)
deviceBackoff.RandomizationFactor = 0.1
deviceBackoff.MaxElapsedTime = (iSCSIDeviceDiscoveryTimeoutSecs - 5) * time.Second
// Run the check/rescan using an exponential backoff
if err := backoff.RetryNotify(checkAnyDeviceExists, deviceBackoff, devicesNotify); err != nil {
log.Warnf("Could not find all devices after %d seconds.", iSCSIDeviceDiscoveryTimeoutSecs)
// In the case of a failure, log info about what devices are present
execCommand("ls", "-al", "/dev")
execCommand("ls", "-al", "/dev/mapper")
execCommand("ls", "-al", "/dev/disk/by-path")
execCommand("lsscsi")
execCommand("lsscsi", "-t")
execCommand("free")
return err
}
log.Debugf("Paths found: %v", found)
return nil
}
// ScsiDeviceInfo contains information about SCSI devices
type ScsiDeviceInfo struct {
Host string
Channel string
Target string
LUN string
Devices []string
MultipathDevice string
Filesystem string
IQN string
HostSessionMap map[int]int
}
// getDeviceInfoForLUN finds iSCSI devices using /dev/disk/by-path values. This method should be
// called after calling rescanTargetAndWaitForDevice so that the device paths are known to exist.
func getDeviceInfoForLUN(lunID int, iSCSINodeName string) (*ScsiDeviceInfo, error) {
fields := log.Fields{
"lunID": lunID,
"iSCSINodeName": iSCSINodeName,
}
log.WithFields(fields).Debug(">>>> osutils.getDeviceInfoForLUN")
defer log.WithFields(fields).Debug("<<<< osutils.getDeviceInfoForLUN")
hostSessionMap := getISCSIHostSessionMapForTarget(iSCSINodeName)
if len(hostSessionMap) == 0 {
return nil, fmt.Errorf("no iSCSI hosts found for target %s", iSCSINodeName)
}
paths := getSysfsBlockDirsForLUN(lunID, hostSessionMap)
devices, err := getDevicesForLUN(paths)
if nil != err {
return nil, err
} else if 0 == len(devices) {
return nil, fmt.Errorf("scan not completed for LUN %d on target %s", lunID, iSCSINodeName)
}
multipathDevice := ""
for _, device := range devices {
multipathDevice = findMultipathDeviceForDevice(device)
if multipathDevice != "" {
break
}
}
fsType := ""
if multipathDevice != "" {
fsType = getFSType("/dev/" + multipathDevice)
} else {
fsType = getFSType("/dev/" + devices[0])
}
log.WithFields(log.Fields{
"LUN": strconv.Itoa(lunID),
"multipathDevice": multipathDevice,
"fsType": fsType,
"deviceNames": devices,
"hostSessionMap": hostSessionMap,
}).Debug("Found SCSI device.")
info := &ScsiDeviceInfo{
LUN: strconv.Itoa(lunID),
MultipathDevice: multipathDevice,
Devices: devices,
Filesystem: fsType,
IQN: iSCSINodeName,
HostSessionMap: hostSessionMap,
}
return info, nil
}
// getDeviceInfoForMountPath discovers the device that is currently mounted at the specified mount path. It
// uses the ScsiDeviceInfo struct so that it may return a multipath device (if any) plus one or more underlying
// physical devices.
func getDeviceInfoForMountPath(mountpath string) (*ScsiDeviceInfo, error) {
fields := log.Fields{"mountpath": mountpath}
log.WithFields(fields).Debug(">>>> osutils.getDeviceInfoForMountPath")
defer log.WithFields(fields).Debug("<<<< osutils.getDeviceInfoForMountPath")
device, _, err := GetDeviceNameFromMount(mountpath)
if err != nil {
return nil, err
}
device, err = filepath.EvalSymlinks(device)
if err != nil {
return nil, err
}
device = strings.TrimPrefix(device, "/dev/")
var deviceInfo *ScsiDeviceInfo
if !strings.HasPrefix(device, "dm-") {
deviceInfo = &ScsiDeviceInfo{
Devices: []string{device},
}
} else {
deviceInfo = &ScsiDeviceInfo{
Devices: findDevicesForMultipathDevice(device),
MultipathDevice: device,
}
}
log.WithFields(log.Fields{
"multipathDevice": deviceInfo.MultipathDevice,
"devices": deviceInfo.Devices,
}).Debug("Found SCSI device.")
return deviceInfo, nil
}
// waitForMultipathDeviceForLUN
func waitForMultipathDeviceForLUN(lunID int, iSCSINodeName string) error {
fields := log.Fields{
"lunID": lunID,
"iSCSINodeName": iSCSINodeName,
}
log.WithFields(fields).Debug(">>>> osutils.waitForMultipathDeviceForLUN")
defer log.WithFields(fields).Debug("<<<< osutils.waitForMultipathDeviceForLUN")
hostSessionMap := getISCSIHostSessionMapForTarget(iSCSINodeName)
if len(hostSessionMap) == 0 {
return fmt.Errorf("no iSCSI hosts found for target %s", iSCSINodeName)
}
paths := getSysfsBlockDirsForLUN(lunID, hostSessionMap)
devices, err := getDevicesForLUN(paths)
if nil != err {
return err
}
waitForMultipathDeviceForDevices(devices)
return nil
}
// waitForMultipathDeviceForDevices accepts a list of sd* device names and waits until
// a multipath device is present for at least one of those. It returns the name of the
// multipath device, or an empty string if multipathd isn't running or there is only one path.
func waitForMultipathDeviceForDevices(devices []string) string {
fields := log.Fields{"devices": devices}
log.WithFields(fields).Debug(">>>> osutils.waitForMultipathDeviceForDevices")
defer log.WithFields(fields).Debug("<<<< osutils.waitForMultipathDeviceForDevices")
if len(devices) <= 1 {
log.Debugf("Skipping multipath discovery, %d device(s) specified.", len(devices))
return ""
} else if !multipathdIsRunning() {
log.Debug("Skipping multipath discovery, multipathd isn't running.")
return ""
}
maxDuration := multipathDeviceDiscoveryTimeoutSecs * time.Second
multipathDevice := ""
checkMultipathDeviceExists := func() error {
for _, device := range devices {
multipathDevice = findMultipathDeviceForDevice(device)
if multipathDevice != "" {
return nil
}
}
if multipathDevice == "" {
return errors.New("multipath device not yet present")
}
return nil
}
deviceNotify := func(err error, duration time.Duration) {
log.WithField("increment", duration).Debug("Multipath device not yet present, waiting.")
}
multipathDeviceBackoff := backoff.NewExponentialBackOff()
multipathDeviceBackoff.InitialInterval = 1 * time.Second
multipathDeviceBackoff.Multiplier = 1.414 // approx sqrt(2)
multipathDeviceBackoff.RandomizationFactor = 0.1
multipathDeviceBackoff.MaxElapsedTime = maxDuration
// Run the check/rescan using an exponential backoff
if err := backoff.RetryNotify(checkMultipathDeviceExists, multipathDeviceBackoff, deviceNotify); err != nil {
log.Warnf("Could not find multipath device after %3.2f seconds.", maxDuration.Seconds())
} else {
log.WithField("multipathDevice", multipathDevice).Debug("Multipath device found.")
}
return multipathDevice
}
// findMultipathDeviceForDevice finds the devicemapper parent of a device name like /dev/sdx.
func findMultipathDeviceForDevice(device string) string {
log.WithField("device", device).Debug(">>>> osutils.findMultipathDeviceForDevice")
defer log.WithField("device", device).Debug("<<<< osutils.findMultipathDeviceForDevice")
holdersDir := "/sys/block/" + device + "/holders"
if dirs, err := ioutil.ReadDir(holdersDir); err == nil {
for _, f := range dirs {
name := f.Name()
if strings.HasPrefix(name, "dm-") {
return name
}
}
}
log.WithField("device", device).Debug("Could not find multipath device for device.")
return ""
}
// findDevicesForMultipathDevice finds the constituent devices for a devicemapper parent device like /dev/dm-0.
func findDevicesForMultipathDevice(device string) []string {
log.WithField("device", device).Debug(">>>> osutils.findDevicesForMultipathDevice")
defer log.WithField("device", device).Debug("<<<< osutils.findDevicesForMultipathDevice")
devices := make([]string, 0)
slavesDir := "/sys/block/" + device + "/slaves"
if dirs, err := ioutil.ReadDir(slavesDir); err == nil {
for _, f := range dirs {
name := f.Name()
if strings.HasPrefix(name, "sd") {
devices = append(devices, name)
}
}
}
if len(devices) == 0 {
log.WithField("device", device).Debug("Could not find devices for multipath device.")
} else {
log.WithFields(log.Fields{
"device": device,
"devices": devices,
}).Debug("Found devices for multipath device.")
}
return devices
}
// PrepareDeviceForRemoval informs Linux that a device will be removed.
func PrepareDeviceForRemoval(lunID int, iSCSINodeName string) {
fields := log.Fields{
"lunID": lunID,
"iSCSINodeName": iSCSINodeName,
"chrootPathPrefix": chrootPathPrefix,
}
log.WithFields(fields).Debug(">>>> osutils.PrepareDeviceForRemoval")
defer log.WithFields(fields).Debug("<<<< osutils.PrepareDeviceForRemoval")
deviceInfo, err := getDeviceInfoForLUN(lunID, iSCSINodeName)
if err != nil {
log.WithFields(log.Fields{
"error": err,
"lunID": lunID,
}).Info("Could not get device info for removal, skipping host removal steps.")
return
}
removeSCSIDevice(deviceInfo)
}
// PrepareDeviceAtMountPathForRemoval informs Linux that a device will be removed.
func PrepareDeviceAtMountPathForRemoval(mountpoint string, unmount bool) error {
fields := log.Fields{"mountpoint": mountpoint}
log.WithFields(fields).Debug(">>>> osutils.PrepareDeviceAtMountPathForRemoval")
defer log.WithFields(fields).Debug("<<<< osutils.PrepareDeviceAtMountPathForRemoval")
deviceInfo, err := getDeviceInfoForMountPath(mountpoint)
if err != nil {
return err
}
if unmount {
if err := Umount(mountpoint); err != nil {
return err
}
}
removeSCSIDevice(deviceInfo)
return nil
}
// removeSCSIDevice informs Linux that a device will be removed. The deviceInfo provided only needs
// the devices and multipathDevice fields set.
func removeSCSIDevice(deviceInfo *ScsiDeviceInfo) {
// Flush multipath device
multipathFlushDevice(deviceInfo)
// Flush devices
flushDevice(deviceInfo)
// Remove device
removeDevice(deviceInfo)
// Give the host a chance to fully process the removal
time.Sleep(time.Second)
}
// ISCSISupported returns true if iscsiadm is installed and in the PATH.
func ISCSISupported() bool {
log.Debug(">>>> osutils.ISCSISupported")
defer log.Debug("<<<< osutils.ISCSISupported")
_, err := execIscsiadmCommand("-V")
if err != nil {
log.Debug("iscsiadm tools not found on this host.")
return false
}
return true
}
// ISCSIDiscoveryInfo contains information about discovered iSCSI targets.
type ISCSIDiscoveryInfo struct {
Portal string
PortalIP string
TargetName string
}
// iSCSIDiscovery uses the 'iscsiadm' command to perform discovery.
func iSCSIDiscovery(portal string) ([]ISCSIDiscoveryInfo, error) {
log.WithField("portal", portal).Debug(">>>> osutils.iSCSIDiscovery")
defer log.Debug("<<<< osutils.iSCSIDiscovery")
out, err := execIscsiadmCommand("-m", "discovery", "-t", "sendtargets", "-p", portal)
if err != nil {
return nil, err
}
/*
iscsiadm -m discovery -t st -p 10.63.152.249:3260
10.63.152.249:3260,1 iqn.1992-08.com.netapp:2752.600a0980006074c20000000056b32c4d
10.63.152.250:3260,2 iqn.1992-08.com.netapp:2752.600a0980006074c20000000056b32c4d
a[0]==10.63.152.249:3260,1
a[1]==iqn.1992-08.com.netapp:2752.600a0980006074c20000000056b32c4d
*/
var discoveryInfo []ISCSIDiscoveryInfo
lines := strings.Split(string(out), "\n")
for _, l := range lines {
a := strings.Fields(l)
if len(a) >= 2 {
portalIP := strings.Split(a[0], ":")[0]
discoveryInfo = append(discoveryInfo, ISCSIDiscoveryInfo{
Portal: a[0],
PortalIP: portalIP,
TargetName: a[1],
})
log.WithFields(log.Fields{
"Portal": a[0],
"PortalIP": portalIP,
"TargetName": a[1],
}).Debug("Adding iSCSI discovery info.")
}
}
return discoveryInfo, nil
}
// ISCSISessionInfo contains information about iSCSI sessions.
type ISCSISessionInfo struct {
SID string
Portal string
PortalIP string
TargetName string
}
// getISCSISessionInfo parses output from 'iscsiadm -m session' and returns the parsed output.
func getISCSISessionInfo() ([]ISCSISessionInfo, error) {
log.Debug(">>>> osutils.getISCSISessionInfo")
defer log.Debug("<<<< osutils.getISCSISessionInfo")
out, err := execIscsiadmCommand("-m", "session")
if err != nil {
exitErr, ok := err.(*exec.ExitError)
if ok && exitErr.ProcessState.Sys().(syscall.WaitStatus).ExitStatus() == iSCSIErrNoObjsFound {
log.Debug("No iSCSI session found.")
return []ISCSISessionInfo{}, nil
} else {
log.WithField("error", err).Error("Problem checking iSCSI sessions.")
return nil, err
}
}
/*
# iscsiadm -m session
tcp: [3] 10.0.207.7:3260,1028 iqn.1992-08.com.netapp:sn.afbb1784f77411e582f8080027e22798:vs.3 (non-flash)
tcp: [4] 10.0.207.9:3260,1029 iqn.1992-08.com.netapp:sn.afbb1784f77411e582f8080027e22798:vs.3 (non-flash)
a[0]==tcp:
a[1]==[4]
a[2]==10.0.207.9:3260,1029
a[3]==iqn.1992-08.com.netapp:sn.afbb1784f77411e582f8080027e22798:vs.3
a[4]==(non-flash)
*/
var sessionInfo []ISCSISessionInfo
lines := strings.Split(strings.TrimSpace(string(out)), "\n")
for _, l := range lines {
a := strings.Fields(l)
if len(a) > 3 {
sid := a[1]
sid = sid[1 : len(sid)-1]
portalIP := strings.Split(a[2], ":")[0]
sessionInfo = append(sessionInfo, ISCSISessionInfo{
SID: sid,
Portal: a[2],
PortalIP: portalIP,
TargetName: a[3],
})
log.WithFields(log.Fields{
"SID": sid,
"Portal": a[2],
"PortalIP": portalIP,
"TargetName": a[3],
}).Debug("Adding iSCSI session info.")
}
}
return sessionInfo, nil
}
// ISCSIDisableDelete logs out from the supplied target and removes the iSCSI device.
func ISCSIDisableDelete(targetIQN, targetPortal string) error {
logFields := log.Fields{
"targetIQN": targetIQN,
"targetPortal": targetPortal,
}
log.WithFields(logFields).Debug(">>>> osutils.ISCSIDisableDelete")
defer log.WithFields(logFields).Debug("<<<< osutils.ISCSIDisableDelete")
_, err := execIscsiadmCommand("-m", "node", "-T", targetIQN, "--portal", targetPortal, "-u")
if err != nil {
log.WithField("error", err).Debug("Error during iSCSI logout.")
}
_, err = execIscsiadmCommand("-m", "node", "-o", "delete", "-T", targetIQN)
return err
}
// iSCSISessionExists checks to see if a session exists to the specified portal.
func iSCSISessionExists(portal string) (bool, error) {
log.Debug(">>>> osutils.iSCSISessionExists")
defer log.Debug("<<<< osutils.iSCSISessionExists")
sessionInfo, err := getISCSISessionInfo()
if err != nil {
log.WithField("error", err).Error("Problem checking iSCSI sessions.")
return false, err
}
for _, e := range sessionInfo {
if e.PortalIP == portal {
return true, nil
}
}
return false, nil
}
// iSCSISessionExistsToTargetIQN checks to see if a session exists to the specified target.
func iSCSISessionExistsToTargetIQN(targetIQN string) (bool, error) {
log.Debug(">>>> osutils.iSCSISessionExistsToTargetIQN")
defer log.Debug("<<<< osutils.iSCSISessionExistsToTargetIQN")
sessionInfo, err := getISCSISessionInfo()
if err != nil {
log.WithField("error", err).Error("Problem checking iSCSI sessions.")
return false, err
}
for _, e := range sessionInfo {
if e.TargetName == targetIQN {
return true, nil
}
}
return false, nil
}
// iSCSIRescanTargetLUN rescans a single LUN on an iSCSI target.
func iSCSIRescanTargetLUN(lunID int, hosts []int) error {
fields := log.Fields{"hosts": hosts, "lunID": lunID}
log.WithFields(fields).Debug(">>>> osutils.iSCSIRescanTargetLUN")
defer log.WithFields(fields).Debug("<<<< osutils.iSCSIRescanTargetLUN")
var (
f *os.File
err error
)
for _, hostNumber := range hosts {
filename := fmt.Sprintf("/sys/class/scsi_host/host%d/scan", hostNumber)
if f, err = os.OpenFile(filename, os.O_APPEND|os.O_WRONLY, 0200); err != nil {
log.WithField("file", filename).Warning("Could not open file for writing.")
return err
}
scanCmd := fmt.Sprintf("0 0 %d", lunID)
if written, err := f.WriteString(scanCmd); err != nil {
log.WithFields(log.Fields{"file": filename, "error": err}).Warning("Could not write to file.")
f.Close()
return err
} else if written == 0 {
log.WithField("file", filename).Warning("No data written to file.")
f.Close()
return fmt.Errorf("no data written to %s", filename)
}
f.Close()
log.WithFields(log.Fields{
"scanCmd": scanCmd,
"scanFile": filename,
}).Debug("Invoked single-LUN rescan.")
}
return nil
}
// isAlreadyAttached checks if there is already an established iSCSI session to the specified LUN.
func isAlreadyAttached(lunID int, targetIqn string) bool {
hostSessionMap := getISCSIHostSessionMapForTarget(targetIqn)
if len(hostSessionMap) == 0 {
return false
}
paths := getSysfsBlockDirsForLUN(lunID, hostSessionMap)
devices, err := getDevicesForLUN(paths)
if nil != err {
return false
}
return 0 < len(devices)
}
// getISCSIHostSessionMapForTarget returns a map of iSCSI host numbers to iSCSI session numbers
// for a given iSCSI target.
func getISCSIHostSessionMapForTarget(iSCSINodeName string) map[int]int {
fields := log.Fields{"iSCSINodeName": iSCSINodeName}
log.WithFields(fields).Debug(">>>> osutils.getISCSIHostSessionMapForTarget")
defer log.WithFields(fields).Debug("<<<< osutils.getISCSIHostSessionMapForTarget")
var (
hostNumber int
sessionNumber int
)
hostSessionMap := make(map[int]int)
sysPath := "/sys/class/iscsi_host/"
if hostDirs, err := ioutil.ReadDir(sysPath); err != nil {
log.WithField("error", err).Errorf("Could not read %s", sysPath)
return hostSessionMap
} else {
for _, hostDir := range hostDirs {
hostName := hostDir.Name()
if !strings.HasPrefix(hostName, "host") {
continue
} else if hostNumber, err = strconv.Atoi(strings.TrimPrefix(hostName, "host")); err != nil {
log.WithField("host", hostName).Error("Could not parse host number")
continue
}
devicePath := sysPath + hostName + "/device/"
if deviceDirs, err := ioutil.ReadDir(devicePath); err != nil {
log.WithField("error", err).Errorf("Could not read %f", devicePath)
return hostSessionMap
} else {
for _, deviceDir := range deviceDirs {
sessionName := deviceDir.Name()
if !strings.HasPrefix(sessionName, "session") {
continue
} else if sessionNumber, err = strconv.Atoi(strings.TrimPrefix(sessionName, "session")); err != nil {
log.WithField("session", sessionName).Error("Could not parse session number")
continue
}
targetNamePath := devicePath + sessionName + "/iscsi_session/" + sessionName + "/targetname"
if targetName, err := ioutil.ReadFile(targetNamePath); err != nil {
log.WithFields(log.Fields{
"path": targetNamePath,
"error": err,
}).Error("Could not read targetname file")
} else if strings.TrimSpace(string(targetName)) == iSCSINodeName {
log.WithFields(log.Fields{
"hostNumber": hostNumber,
"sessionNumber": sessionNumber,
}).Debug("Found iSCSI host/session.")
hostSessionMap[hostNumber] = sessionNumber
}
}
}
}
}
return hostSessionMap
}
// GetISCSIDevices returns a list of iSCSI devices that are attached to (but not necessarily mounted on) this host.
func GetISCSIDevices() ([]*ScsiDeviceInfo, error) {
log.Debug(">>>> osutils.GetISCSIDevices")
defer log.Debug("<<<< osutils.GetISCSIDevices")
devices := make([]*ScsiDeviceInfo, 0)
hostSessionMapCache := make(map[string]map[int]int)
// Start by reading the sessions from /sys/class/iscsi_session
sysPath := "/sys/class/iscsi_session/"
sessionDirs, err := ioutil.ReadDir(sysPath)
if err != nil {
log.WithField("error", err).Errorf("Could not read %s", sysPath)
return nil, err
}
// Loop through each of the iSCSI sessions
for _, sessionDir := range sessionDirs {
sessionName := sessionDir.Name()
if !strings.HasPrefix(sessionName, "session") {
continue
} else if _, err = strconv.Atoi(strings.TrimPrefix(sessionName, "session")); err != nil {
log.WithField("session", sessionName).Error("Could not parse session number")
return nil, err
}
// Find the target IQN from the session at /sys/class/iscsi_session/sessionXXX/targetname
sessionPath := sysPath + sessionName
targetNamePath := sessionPath + "/targetname"
targetNameBytes, err := ioutil.ReadFile(targetNamePath)
if err != nil {
log.WithFields(log.Fields{
"path": targetNamePath,
"error": err,
}).Error("Could not read targetname file")
return nil, err
}
targetIQN := strings.TrimSpace(string(targetNameBytes))
log.WithFields(log.Fields{
"targetIQN": targetIQN,
"sessionName": sessionName,
}).Debug("Found iSCSI session / target IQN.")
// Find the one target at /sys/class/iscsi_session/sessionXXX/device/targetHH:BB:DD (host:bus:device)
sessionDevicePath := sessionPath + "/device/"
targetDirs, err := ioutil.ReadDir(sessionDevicePath)
if err != nil {
log.WithField("error", err).Errorf("Could not read %s", sessionDevicePath)
return nil, err
}
// Get the one target directory
hostBusDeviceName := ""
targetDirName := ""
for _, targetDir := range targetDirs {
targetDirName = targetDir.Name()
if strings.HasPrefix(targetDirName, "target") {
hostBusDeviceName = strings.TrimPrefix(targetDirName, "target")
break
}
}
if hostBusDeviceName == "" {
log.Warningf("Could not find a host:bus:device directory at %s", sessionDevicePath)
continue
}
sessionDeviceHBDPath := sessionDevicePath + targetDirName + "/"
log.WithFields(log.Fields{
"hbdPath": sessionDeviceHBDPath,
"hbdName": hostBusDeviceName,
}).Debug("Found host/bus/device path.")
// Find the devices at /sys/class/iscsi_session/sessionXXX/device/targetHH:BB:DD/HH:BB:DD:LL (host:bus:device:lun)
hostBusDeviceLunDirs, err := ioutil.ReadDir(sessionDeviceHBDPath)
if err != nil {
log.WithField("error", err).Errorf("Could not read %s", sessionDeviceHBDPath)
return nil, err
}
for _, hostBusDeviceLunDir := range hostBusDeviceLunDirs {
hostBusDeviceLunDirName := hostBusDeviceLunDir.Name()
if !strings.HasPrefix(hostBusDeviceLunDirName, hostBusDeviceName) {
continue
}
sessionDeviceHBDLPath := sessionDeviceHBDPath + hostBusDeviceLunDirName + "/"
log.WithFields(log.Fields{
"hbdlPath": sessionDeviceHBDLPath,
"hbdlName": hostBusDeviceLunDirName,
}).Debug("Found host/bus/device/LUN path.")
hbdlValues := strings.Split(hostBusDeviceLunDirName, ":")
if len(hbdlValues) != 4 {
log.Errorf("Could not parse values from %s", hostBusDeviceLunDirName)
return nil, err
}
hostNum := hbdlValues[0]
busNum := hbdlValues[1]
deviceNum := hbdlValues[2]
lunNum := hbdlValues[3]
blockPath := sessionDeviceHBDLPath + "/block/"
// Find the block device at /sys/class/iscsi_session/sessionXXX/device/targetHH:BB:DD/HH:BB:DD:LL/block
blockDeviceDirs, err := ioutil.ReadDir(blockPath)
if err != nil {
log.WithField("error", err).Errorf("Could not read %s", blockPath)
return nil, err
}
for _, blockDeviceDir := range blockDeviceDirs {
blockDeviceName := blockDeviceDir.Name()
log.WithField("blockDeviceName", blockDeviceName).Debug("Found block device.")
// Find multipath device, if any
var slaveDevices []string
multipathDevice := findMultipathDeviceForDevice(blockDeviceName)
if multipathDevice != "" {
slaveDevices = findDevicesForMultipathDevice(multipathDevice)
} else {
slaveDevices = []string{blockDeviceName}
}
// Get the host/session map, using a cached value if available
hostSessionMap, ok := hostSessionMapCache[targetIQN]
if !ok {
hostSessionMap = getISCSIHostSessionMapForTarget(targetIQN)
hostSessionMapCache[targetIQN] = hostSessionMap
}
log.WithFields(log.Fields{
"host": hostNum,
"lun": lunNum,
"devices": slaveDevices,
"multipathDevice": multipathDevice,
"iqn": targetIQN,
"hostSessionMap": hostSessionMap,
}).Debug("Found iSCSI device.")
device := &ScsiDeviceInfo{
Host: hostNum,
Channel: busNum,
Target: deviceNum,
LUN: lunNum,
Devices: slaveDevices,
MultipathDevice: multipathDevice,
IQN: targetIQN,
HostSessionMap: hostSessionMap,
}
devices = append(devices, device)
}
}
}
return devices, nil
}
// GetMountedISCSIDevices returns a list of iSCSI devices that are *mounted* on this host.
func GetMountedISCSIDevices() ([]*ScsiDeviceInfo, error) {
log.Debug(">>>> osutils.GetMountedISCSIDevices")
defer log.Debug("<<<< osutils.GetMountedISCSIDevices")
procMounts, err := listProcMounts(procMountsPath)
if err != nil {
return nil, err
}
// Get a list of all mounted /dev devices
mountedDevices := make([]string, 0)
for _, procMount := range procMounts {
if !strings.HasPrefix(procMount.Device, "/dev/") {
continue
}
// Resolve any symlinks to get the real device
mountedDevice, err := filepath.EvalSymlinks(procMount.Device)
if err != nil {
log.Error(err)
continue
}
mountedDevices = append(mountedDevices, strings.TrimPrefix(mountedDevice, "/dev/"))
}
// Get all known iSCSI devices
iscsiDevices, err := GetISCSIDevices()
if err != nil {
return nil, err
}
mountedISCSIDevices := make([]*ScsiDeviceInfo, 0)
// For each mounted device, look for a matching iSCSI device
for _, mountedDevice := range mountedDevices {
iSCSIDeviceLoop:
for _, iscsiDevice := range iscsiDevices {
// First look for a multipath device match
if mountedDevice == iscsiDevice.MultipathDevice {
mountedISCSIDevices = append(mountedISCSIDevices, iscsiDevice)
break iSCSIDeviceLoop
} else {
// Then look for a slave device match
for _, iscsiSlaveDevice := range iscsiDevice.Devices {
if mountedDevice == iscsiSlaveDevice {
mountedISCSIDevices = append(mountedISCSIDevices, iscsiDevice)
break iSCSIDeviceLoop
}
}
}
}
}
for _, md := range mountedISCSIDevices {
log.WithFields(log.Fields{
"host": md.Host,
"lun": md.LUN,
"devices": md.Devices,
"multipathDevice": md.MultipathDevice,
"iqn": md.IQN,
"hostSessionMap": md.HostSessionMap,
}).Debug("Found mounted iSCSI device.")
}
return mountedISCSIDevices, nil
}
// ISCSITargetHasMountedDevice returns true if this host has any mounted devices on the specified target.
func ISCSITargetHasMountedDevice(targetIQN string) (bool, error) {
mountedISCSIDevices, err := GetMountedISCSIDevices()
if err != nil {
return false, err
}
for _, device := range mountedISCSIDevices {
if device.IQN == targetIQN {
return true, nil
}
}
return false, nil
}
// multipathFlushDevice invokes the 'multipath' commands to flush paths for a single device.
func multipathFlushDevice(deviceInfo *ScsiDeviceInfo) {
log.WithField("device", deviceInfo.MultipathDevice).Debug(">>>> osutils.multipathFlushDevice")
defer log.Debug("<<<< osutils.multipathFlushDevice")
if deviceInfo.MultipathDevice == "" {
return
}
_, err := execCommandWithTimeout("multipath", 30, "-f", "/dev/"+deviceInfo.MultipathDevice)
if err != nil {
// nothing to do if it generates an error but log it
log.WithFields(log.Fields{
"device": deviceInfo.MultipathDevice,
"error": err,
}).Warning("Error encountered in multipath flush device command.")
}
}
// flushDevice flushes any outstanding I/O to all paths to a device.
func flushDevice(deviceInfo *ScsiDeviceInfo) {
log.Debug(">>>> osutils.flushDevice")
defer log.Debug("<<<< osutils.flushDevice")
for _, device := range deviceInfo.Devices {
_, err := execCommandWithTimeout("blockdev", 5, "--flushbufs", "/dev/"+device)
if err != nil {
// nothing to do if it generates an error but log it
log.WithFields(log.Fields{
"device": device,
"error": err,
}).Warning("Error encountered in blockdev --flushbufs command.")
}
}
}
// removeDevice tells Linux that a device will be removed.
func removeDevice(deviceInfo *ScsiDeviceInfo) {
log.Debug(">>>> osutils.removeDevice")
defer log.Debug("<<<< osutils.removeDevice")
var (
f *os.File
err error
)
for _, deviceName := range deviceInfo.Devices {
filename := fmt.Sprintf("/sys/block/%s/device/delete", deviceName)
if f, err = os.OpenFile(filename, os.O_APPEND|os.O_WRONLY, 0200); err != nil {
log.WithField("file", filename).Warning("Could not open file for writing.")
return
}
if written, err := f.WriteString("1"); err != nil {
log.WithFields(log.Fields{"file": filename, "error": err}).Warning("Could not write to file.")
f.Close()
return
} else if written == 0 {
log.WithField("file", filename).Warning("No data written to file.")
f.Close()
return
}
f.Close()
log.WithField("scanFile", filename).Debug("Invoked device delete.")
}
}
// multipathdIsRunning returns true if the multipath daemon is running.
func multipathdIsRunning() bool {
log.Debug(">>>> osutils.multipathdIsRunning")
defer log.Debug("<<<< osutils.multipathdIsRunning")
out, err := execCommand("pgrep", "multipathd")
if err == nil {
pid := strings.TrimSpace(string(out))
if pidRegex.MatchString(pid) {
log.WithField("pid", pid).Debug("multipathd is running")
return true
}
} else {
log.Error(err)
}
out, err = execCommand("multipathd", "show", "daemon")
if err == nil {
if pidRunningRegex.MatchString(string(out)) {
log.Debug("multipathd is running")
return true
}
} else {
log.Error(err)
}
return false
}
// getFSType returns the filesystem for the supplied device.
func getFSType(device string) string {
log.WithField("device", device).Debug(">>>> osutils.getFSType")
defer log.Debug("<<<< osutils.getFSType")
fsType := ""
out, err := execCommand("blkid", device)
if err != nil {
log.WithField("device", device).Debug("Could not get FSType for device.")
return fsType
}
if strings.Contains(string(out), "TYPE=") {
for _, v := range strings.Split(string(out), " ") {
if strings.Contains(v, "TYPE=") {
fsType = strings.Split(v, "=")[1]
fsType = strings.Replace(fsType, "\"", "", -1)
fsType = strings.TrimSpace(fsType)
}
}
}
return fsType
}
// formatVolume creates a filesystem for the supplied device of the supplied type.
func formatVolume(device, fstype string) error {
logFields := log.Fields{"device": device, "fsType": fstype}
log.WithFields(logFields).Debug(">>>> osutils.formatVolume")
defer log.WithFields(logFields).Debug("<<<< osutils.formatVolume")
maxDuration := 30 * time.Second
formatVolume := func() error {
var err error
switch fstype {
case "xfs":
_, err = execCommand("mkfs.xfs", "-f", device)
case "ext3":
_, err = execCommand("mkfs.ext3", "-F", device)
case "ext4":
_, err = execCommand("mkfs.ext4", "-F", device)
default:
return fmt.Errorf("unsupported file system type: %s", fstype)
}
return err
}
formatNotify := func(err error, duration time.Duration) {
log.WithField("increment", duration).Debug("Format failed, retrying.")
}
formatBackoff := backoff.NewExponentialBackOff()
formatBackoff.InitialInterval = 2 * time.Second
formatBackoff.Multiplier = 2
formatBackoff.RandomizationFactor = 0.1
formatBackoff.MaxElapsedTime = maxDuration
// Run the check/rescan using an exponential backoff
if err := backoff.RetryNotify(formatVolume, formatBackoff, formatNotify); err != nil {
log.Warnf("Could not format device after %3.2f seconds.", maxDuration.Seconds())
return err
}
log.WithFields(logFields).Info("Device formatted.")
return nil
}
// MountDevice attaches the supplied device at the supplied location. Use this for iSCSI devices.
func MountDevice(device, mountpoint, options string) (err error) {
log.WithFields(log.Fields{
"device": device,
"mountpoint": mountpoint,
"options": options,
}).Debug(">>>> osutils.MountDevice")
defer log.Debug("<<<< osutils.MountDevice")
// Build the command
var args []string
if len(options) > 0 {
args = []string{"-o", strings.TrimPrefix(options, "-o "), device, mountpoint}
} else {
args = []string{device, mountpoint}
}
if _, err = execCommand("mkdir", "-p", mountpoint); err != nil {
log.WithField("error", err).Warning("Mkdir failed.")
}
if _, err = execCommand("mount", args...); err != nil {
log.WithField("error", err).Error("Mount failed.")
}
return
}
// mountNFSPath attaches the supplied NFS share at the supplied location with options.
func mountNFSPath(exportPath, mountpoint, options string) (err error) {
log.WithFields(log.Fields{
"exportPath": exportPath,
"mountpoint": mountpoint,
"options": options,
}).Debug(">>>> osutils.mountNFSPath")
defer log.Debug("<<<< osutils.mountNFSPath")
// Build the command
var args []string
if len(options) > 0 {
args = []string{"-t", "nfs", "-o", strings.TrimPrefix(options, "-o "), exportPath, mountpoint}
} else {
args = []string{"-t", "nfs", exportPath, mountpoint}
}
// Create the mount point dir if necessary
if _, err = execCommand("mkdir", "-p", mountpoint); err != nil {
log.WithField("error", err).Warning("Mkdir failed.")
}
if out, err := execCommand("mount", args...); err != nil {
log.WithField("output", string(out)).Debug("Mount failed.")
return fmt.Errorf("error mounting NFS volume %v on mountpoint %v: %v", exportPath, mountpoint, err)
}
return nil
}
// Umount detaches from the supplied location.
func Umount(mountpoint string) (err error) {
log.WithField("mountpoint", mountpoint).Debug(">>>> osutils.Umount")
defer log.Debug("<<<< osutils.Umount")
if _, err = execCommand("umount", mountpoint); err != nil {
log.WithField("error", err).Error("Umount failed.")
}
return
}
// loginISCSITarget logs in to an iSCSI target.
func loginISCSITarget(iqn, portal string) error {
log.WithFields(log.Fields{
"IQN": iqn,
"Portal": portal,
}).Debug(">>>> osutils.loginISCSITarget")
defer log.Debug("<<<< osutils.loginISCSITarget")
args := []string{"-m", "node", "-T", iqn, "-l", "-p", portal + ":3260"}
if _, err := execIscsiadmCommand(args...); err != nil {
log.WithField("error", err).Error("Error logging in to iSCSI target.")
return err
}
return nil
}
// loginWithChap will login to the iSCSI target with the supplied credentials.
func loginWithChap(tiqn, portal, username, password, iface string, logSensitiveInfo bool) error {
logFields := log.Fields{
"IQN": tiqn,
"portal": portal,
"username": username,
"password": "****",
"iface": iface,
}
if logSensitiveInfo {
logFields["password"] = password
}
log.WithFields(logFields).Debug(">>>> osutils.loginWithChap")
defer log.Debug("<<<< osutils.loginWithChap")
args := []string{"-m", "node", "-T", tiqn, "-p", portal + ":3260"}
createArgs := append(args, []string{"--interface", iface, "--op", "new"}...)
if _, err := execIscsiadmCommand(createArgs...); err != nil {
log.Error("Error running iscsiadm node create.")
return err
}
authMethodArgs := append(args, []string{"--op=update", "--name", "node.session.auth.authmethod", "--value=CHAP"}...)
if _, err := execIscsiadmCommand(authMethodArgs...); err != nil {
log.Error("Error running iscsiadm set authmethod.")
return err
}
authUserArgs := append(args, []string{"--op=update", "--name", "node.session.auth.username", "--value=" + username}...)
if _, err := execIscsiadmCommand(authUserArgs...); err != nil {
log.Error("Error running iscsiadm set authuser.")
return err
}
authPasswordArgs := append(args, []string{"--op=update", "--name", "node.session.auth.password", "--value=" + password}...)
if _, err := execIscsiadmCommand(authPasswordArgs...); err != nil {
log.Error("Error running iscsiadm set authpassword.")
return err
}
loginArgs := append(args, []string{"--login"}...)
if _, err := execIscsiadmCommand(loginArgs...); err != nil {
log.Error("Error running iscsiadm login.")
return err
}
return nil
}
func EnsureISCSISession(hostDataIP string) error {
log.WithField("hostDataIP", hostDataIP).Debug(">>>> osutils.EnsureISCSISession")
defer log.Debug("<<<< osutils.EnsureISCSISession")
// Ensure iSCSI is supported on system
if !ISCSISupported() {
return errors.New("iSCSI support not detected")
}
// Ensure iSCSI session exists for the specified iSCSI portal
sessionExists, err := iSCSISessionExists(hostDataIP)
if err != nil {
return fmt.Errorf("could not check for iSCSI session: %v", err)
}
if !sessionExists {
// Run discovery in case we haven't seen this target from this host
targets, err := iSCSIDiscovery(hostDataIP)
if err != nil {
return fmt.Errorf("could not run iSCSI discovery: %v", err)
}
if len(targets) == 0 {
return errors.New("iSCSI discovery found no targets")
}
log.WithFields(log.Fields{
"Targets": targets,
}).Debug("Found matching iSCSI targets.")
// Determine which target matches the portal we requested
targetIndex := -1
for i, target := range targets {
if target.PortalIP == hostDataIP {
targetIndex = i
break
}
}
if targetIndex == -1 {
return fmt.Errorf("iSCSI discovery found no targets with portal %s", hostDataIP)
}
// To enable multipath, log in to each discovered target with the same IQN (target name)
targetName := targets[targetIndex].TargetName
for _, target := range targets {
if target.TargetName == targetName {
// Log in to target
err = loginISCSITarget(target.TargetName, target.PortalIP)
if err != nil {
return fmt.Errorf("login to iSCSI target failed: %v", err)
}
}
}
// Recheck to ensure a session is now open
sessionExists, err = iSCSISessionExists(hostDataIP)
if err != nil {
return fmt.Errorf("could not recheck for iSCSI session: %v", err)
}
if !sessionExists {
return fmt.Errorf("expected iSCSI session %v NOT found, please login to the iSCSI portal", hostDataIP)
}
}
log.WithField("hostDataIP", hostDataIP).Debug("Found session to iSCSI portal.")
return nil
}
// execIscsiadmCommand uses the 'iscsiadm' command to perform operations
func execIscsiadmCommand(args ...string) ([]byte, error) {
return execCommand("iscsiadm", args...)
}
// execCommand invokes an external process
func execCommand(name string, args ...string) ([]byte, error) {
log.WithFields(log.Fields{
"command": name,
"args": args,
}).Debug(">>>> osutils.execCommand.")
out, err := exec.Command(name, args...).CombinedOutput()
log.WithFields(log.Fields{
"command": name,
"output": sanitizeString(string(out)),
"error": err,
}).Debug("<<<< osutils.execCommand.")
return out, err
}
// execCommandResult is used to return shell command results via channels between goroutines
type execCommandResult struct {
Output []byte
Error error
}
// execCommand invokes an external shell command
func execCommandWithTimeout(name string, timeoutSeconds time.Duration, args ...string) ([]byte, error) {
timeout := timeoutSeconds * time.Second
log.WithFields(log.Fields{
"command": name,
"timeoutSeconds": timeout,
"args": args,
}).Debug(">>>> osutils.execCommandWithTimeout.")
cmd := exec.Command(name, args...)
done := make(chan execCommandResult, 1)
var result execCommandResult
go func() {
out, err := cmd.CombinedOutput()
done <- execCommandResult{Output: out, Error: err}
}()
select {
case <-time.After(timeout):
if err := cmd.Process.Kill(); err != nil {
log.WithFields(log.Fields{
"process": name,
"error": err,
}).Error("failed to kill process")
result = execCommandResult{Output: nil, Error: err}
} else {
log.WithFields(log.Fields{
"process": name,
}).Error("process killed after timeout")
result = execCommandResult{Output: nil, Error: errors.New("process killed after timeout")}
}
case result = <-done:
break
}
log.WithFields(log.Fields{
"command": name,
"output": sanitizeString(string(result.Output)),
"error": result.Error,
}).Debug("<<<< osutils.execCommandWithTimeout.")
return result.Output, result.Error
}
func sanitizeString(s string) string {
// Strip xterm color & movement characters
s = xtermControlRegex.ReplaceAllString(s, "")
// Strip trailing newline
s = strings.TrimSuffix(s, "\n")
return s
}
|
[
"\"DOCKER_PLUGIN_MODE\""
] |
[] |
[
"DOCKER_PLUGIN_MODE"
] |
[]
|
["DOCKER_PLUGIN_MODE"]
|
go
| 1 | 0 | |
app/controllers/ApisController.go
|
package controllers
import (
"net/http"
"os"
"../lib"
)
// APIInfo ...
type APIInfo struct {
Name string `json:"name"`
Version string `json:"version"`
}
// GetAPIInfo ...
func GetAPIInfo(w http.ResponseWriter, r *http.Request) {
res := lib.Response{ResponseWriter: w}
res.SendOK(APIInfo{
Name: os.Getenv("API_TITLE"),
Version: os.Getenv("API_VERSION"),
})
}
|
[
"\"API_TITLE\"",
"\"API_VERSION\""
] |
[] |
[
"API_TITLE",
"API_VERSION"
] |
[]
|
["API_TITLE", "API_VERSION"]
|
go
| 2 | 0 | |
run_classifier.py
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""BERT finetuning runner."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
import time
import collections
import csv
import os
import modeling
import optimization
import tokenization
import tensorflow as tf
import numpy as np
from logger import Logger
flags = tf.flags
FLAGS = flags.FLAGS
l2_reg_lambda = 5e-3
## Required parameters
flags.DEFINE_string(
"data_dir", None,
"The input data dir. Should contain the .tsv files (or other data files) "
"for the task.")
flags.DEFINE_string(
"bert_config_file", None,
"The config json file corresponding to the pre-trained BERT model. "
"This specifies the model architecture.")
flags.DEFINE_string("task_name", None, "The name of the task to train.")
flags.DEFINE_string("vocab_file", None,
"The vocabulary file that the BERT model was trained on.")
flags.DEFINE_string(
"output_dir", None,
"The output directory where the model checkpoints will be written.")
## Other parameters
flags.DEFINE_string(
"init_checkpoint", None,
"Initial checkpoint (usually from a pre-trained BERT model).")
flags.DEFINE_bool(
"do_lower_case", True,
"Whether to lower case the input text. Should be True for uncased "
"models and False for cased models.")
flags.DEFINE_integer(
"max_seq_length", 128,
"The maximum total input sequence length after WordPiece tokenization. "
"Sequences longer than this will be truncated, and sequences shorter "
"than this will be padded.")
flags.DEFINE_bool("do_train", False, "Whether to run training.")
flags.DEFINE_bool("do_eval", False, "Whether to run eval on the dev set.")
flags.DEFINE_bool(
"do_predict", False,
"Whether to run the model in inference mode on the test set.")
flags.DEFINE_integer("train_batch_size", 32, "Total batch size for training.")
flags.DEFINE_integer("eval_batch_size", 8, "Total batch size for eval.")
flags.DEFINE_integer("predict_batch_size", 8, "Total batch size for predict.")
flags.DEFINE_float("learning_rate", 5e-5, "The initial learning rate for Adam.")
flags.DEFINE_float("num_train_epochs", 3.0,
"Total number of training epochs to perform.")
flags.DEFINE_float(
"warmup_proportion", 0.1,
"Proportion of training to perform linear learning rate warmup for. "
"E.g., 0.1 = 10% of training.")
flags.DEFINE_integer("save_checkpoints_steps", 1000,
"How often to save the model checkpoint.")
flags.DEFINE_integer("iterations_per_loop", 1000,
"How many steps to make in each estimator call.")
flags.DEFINE_bool("use_tpu", False, "Whether to use TPU or GPU/CPU.")
tf.flags.DEFINE_string(
"tpu_name", None,
"The Cloud TPU to use for training. This should be either the name "
"used when creating the Cloud TPU, or a grpc://ip.address.of.tpu:8470 "
"url.")
tf.flags.DEFINE_string(
"tpu_zone", None,
"[Optional] GCE zone where the Cloud TPU is located in. If not "
"specified, we will attempt to automatically detect the GCE project from "
"metadata.")
tf.flags.DEFINE_string(
"gcp_project", None,
"[Optional] Project name for the Cloud TPU-enabled project. If not "
"specified, we will attempt to automatically detect the GCE project from "
"metadata.")
tf.flags.DEFINE_string("master", None, "[Optional] TensorFlow master URL.")
flags.DEFINE_integer(
"num_tpu_cores", 8,
"Only used if `use_tpu` is True. Total number of TPU cores to use.")
def ranking_loss(labels, logits, batch_size):
lm = tf.constant(2.0) # lambda
m_plus = tf.constant(2.5)
m_minus = tf.constant(0.5)
L = tf.constant(0.0)
i = tf.constant(0)
cond = lambda i, L: tf.less(i, batch_size)
def loop_body(i, L):
cplus = labels[i] # positive class label index
# taking most informative negative class, use 2nd argmax
_, cminus_indices = tf.nn.top_k(logits[i, :], k=2)
cminus = tf.cond(tf.equal(cplus, cminus_indices[0]),
lambda: cminus_indices[1], lambda: cminus_indices[0])
splus = logits[i, cplus] # score for gold class
sminus = logits[i, cminus] # score for negative class
l = tf.log((1.0 + tf.exp((lm * (m_plus - splus))))) + tf.log((1.0 + tf.exp((lm * (m_minus + sminus)))))
return [tf.add(i, 1), tf.add(L, l)]
_, L = tf.while_loop(cond, loop_body, loop_vars=[i, L])
nbatch = tf.to_float(batch_size)
L = L / nbatch
return L
class InputExample(object):
"""A single training/test example for simple sequence classification."""
def __init__(self, guid, text_a,
text_b=None, label=None):
"""Constructs a InputExample.
Args:
guid: Unique id for the example.
text_a: string. The untokenized text of the first sequence. For single
sequence tasks, only this sequence must be specified.
text_b: (Optional) string. The untokenized text of the second sequence.
Only must be specified for sequence pair tasks.
label: (Optional) string. The label of the example. This should be
specified for train and dev examples, but not for test examples.
"""
self.guid = guid
self.text_a = text_a
self.text_b = text_b
self.label = label
class PaddingInputExample(object):
"""Fake example so the num input examples is a multiple of the batch size.
When running eval/predict on the TPU, we need to pad the number of examples
to be a multiple of the batch size, because the TPU requires a fixed batch
size. The alternative is to drop the last batch, which is bad because it means
the entire output data won't be generated.
We use this class instead of `None` because treating `None` as padding
battches could cause silent errors.
"""
class InputFeatures(object):
"""A single set of features of data."""
def __init__(self,
input_ids,
e11_p, e12_p, e21_p, e22_p, # tqx
si_id,
si_mask,
e1_mask,e2_mask,
input_mask,
segment_ids,
label_id,
is_real_example=True):
self.input_ids = input_ids
self.e11_p = e11_p # tqx
self.e12_p = e12_p # tqx
self.e21_p = e21_p # tqx
self.e22_p = e22_p # tqx
self.si_id = si_id #tqx
self.si_mask = si_mask
self.e1_mask = e1_mask
self.e2_mask = e2_mask
self.input_mask = input_mask
self.segment_ids = segment_ids
self.label_id = label_id
self.is_real_example = is_real_example
class DataProcessor(object):
"""Base class for data converters for sequence classification data sets."""
def get_train_examples(self, data_dir):
"""Gets a collection of `InputExample`s for the train set."""
raise NotImplementedError()
def get_dev_examples(self, data_dir):
"""Gets a collection of `InputExample`s for the dev set."""
raise NotImplementedError()
def get_test_examples(self, data_dir):
"""Gets a collection of `InputExample`s for prediction."""
raise NotImplementedError()
def get_labels(self):
"""Gets the list of labels for this data set."""
raise NotImplementedError()
@classmethod
def _read_tsv(cls, input_file, quotechar=None):
"""Reads a tab separated value file."""
with tf.gfile.Open(input_file, "r") as f:
reader = csv.reader(f, delimiter="\t", quotechar=quotechar)
lines = []
for line in reader:
lines.append(line)
return lines
class MrpcProcessor(DataProcessor):
"""Processor for the MRPC data set (GLUE version)."""
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "mytrain0706.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "mytest0706.tsv")), "dev")
def get_test_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "mytest0706.tsv")), "test")
def get_labels(self):
"""See base class."""
return ['Other',
'Message-Topic(e1,e2)', 'Message-Topic(e2,e1)',
'Product-Producer(e1,e2)', 'Product-Producer(e2,e1)',
'Instrument-Agency(e1,e2)', 'Instrument-Agency(e2,e1)',
'Entity-Destination(e1,e2)', 'Entity-Destination(e2,e1)',
'Cause-Effect(e1,e2)', 'Cause-Effect(e2,e1)',
'Component-Whole(e1,e2)', 'Component-Whole(e2,e1)',
'Entity-Origin(e1,e2)', 'Entity-Origin(e2,e1)',
'Member-Collection(e1,e2)', 'Member-Collection(e2,e1)',
'Content-Container(e1,e2)', 'Content-Container(e2,e1)']
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "%s-%s" % (set_type, i)
text_a = tokenization.convert_to_unicode(line[3])
text_b = tokenization.convert_to_unicode(line[4])
if set_type == "test":
label = "Other"
else:
label = tokenization.convert_to_unicode(line[0])
examples.append(
InputExample(guid=guid, text_a=text_a,
#e11_p=e11_p, e12_p=e12_p, e21_p=e21_p, e22_p=e22_p,
#e1_mask=e1_mask, e2_mask=e2_mask,
text_b=text_b, label=label))
return examples
def convert_single_example(ex_index, example, label_list, max_seq_length,
tokenizer):
"""Converts a single `InputExample` into a single `InputFeatures`."""
if isinstance(example, PaddingInputExample):
return InputFeatures(
input_ids=[0] * max_seq_length,
e11_p=0, #tqx
e12_p=0, #tqx
e21_p=0, #tqx
e22_p=0, #tqx
si_id=0, #tqx
si_mask=[0] * max_seq_length,
e1_mask=[0] * max_seq_length, # tqx
e2_mask=[0] * max_seq_length, # tqx
input_mask=[0] * max_seq_length,
segment_ids=[0] * max_seq_length,
label_id=0,
is_real_example=False)
label_map = {}
for (i, label) in enumerate(label_list):
label_map[label] = i
tokens_a = tokenizer.tokenize(example.text_a)
e11_p = tokens_a.index("##11") + 2 ##entity1 的首位置
e12_p = tokens_a.index("##12") - 1 ##entity1 的尾位置
e21_p = tokens_a.index("##21") + 2 ##entity2 的首位置
e22_p = tokens_a.index("##22") - 1 ##entity2 的尾位置
tokens_b = None
if example.text_b:
tokens_b = tokenizer.tokenize(example.text_b)
if tokens_b:
# Modifies `tokens_a` and `tokens_b` in place so that the total
# length is less than the specified length.
# Account for [CLS], [SEP], [SEP] with "- 3"
_truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3)
else:
# Account for [CLS] and [SEP] with "- 2"
if len(tokens_a) > max_seq_length - 2:
tokens_a = tokens_a[0:(max_seq_length - 2)]
si1_p = tokens_b.index("#")
si2_p = tokens_b.index("$")
si_id_start = 1 + len(tokens_a) + 1 + si1_p #+1
si_id = 1 + len(tokens_a) + 1 + si2_p
###$符号计算在内,针对没有指示词的情况,否则,不 - 1
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambiguously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens = []
segment_ids = []
tokens.append("[CLS]")
segment_ids.append(0)
for token in tokens_a:
tokens.append(token)
segment_ids.append(0)
tokens.append("[SEP]")
segment_ids.append(0)
if tokens_b:
for token in tokens_b:
tokens.append(token)
segment_ids.append(1)
tokens.append("[SEP]")
segment_ids.append(1)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1] * len(input_ids)
# Zero-pad up to the sequence length.
while len(input_ids) < max_seq_length:
input_ids.append(0)
input_mask.append(0)
segment_ids.append(0)
si_mask = [0] * si_id_start
while len(si_mask) <= si_id:
si_mask.append(1)
while len(si_mask) < max_seq_length:
si_mask.append(0)
e1_mask = [0] * e11_p
e2_mask = [0] * e21_p
while len(e1_mask) <= e12_p:
e1_mask.append(1)
while len(e1_mask) < max_seq_length:
e1_mask.append(0)
while len(e2_mask) <= e22_p:
e2_mask.append(1)
while len(e2_mask) < max_seq_length:
e2_mask.append(0)
assert len(input_ids) == max_seq_length
assert len(si_mask) == max_seq_length
assert len(e1_mask) == max_seq_length
assert len(e2_mask) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
label_id = label_map[example.label]
if ex_index < 5:
tf.logging.info("*** Example ***")
tf.logging.info("guid: %s" % (example.guid))
tf.logging.info("tokens: %s" % " ".join(
[tokenization.printable_text(x) for x in tokens]))
tf.logging.info("input_ids: %s" % " ".join([str(x) for x in input_ids]))
#tqx
tf.logging.info("e11_p: %s" % e11_p)
tf.logging.info("e12_p: %s" % e12_p)
tf.logging.info("e21_p: %s" % e21_p)
tf.logging.info("e22_p: %s" % e22_p)
tf.logging.info("si_id: %s" % si_id)
tf.logging.info("si_mask: %s" % " ".join([str(x) for x in si_mask]))
tf.logging.info("e1_mask: %s" % " ".join([str(x) for x in e1_mask]))
tf.logging.info("e2_mask: %s" % " ".join([str(x) for x in e2_mask]))
tf.logging.info("input_mask: %s" % " ".join([str(x) for x in input_mask]))
tf.logging.info("segment_ids: %s" % " ".join([str(x) for x in segment_ids]))
tf.logging.info("label: %s (id = %d)" % (example.label, label_id))
feature = InputFeatures(
input_ids=input_ids,
e11_p = e11_p,
e12_p = e12_p,
e21_p = e21_p,
e22_p = e22_p,
si_id = si_id,
si_mask = si_mask,
e1_mask = e1_mask,
e2_mask = e2_mask,
input_mask=input_mask,
segment_ids=segment_ids,
label_id=label_id,
is_real_example=True)
return feature
def file_based_convert_examples_to_features(
examples, label_list, max_seq_length, tokenizer, output_file):
"""Convert a set of `InputExample`s to a TFRecord file."""
writer = tf.python_io.TFRecordWriter(output_file)
for (ex_index, example) in enumerate(examples):
if ex_index % 10000 == 0:
tf.logging.info("Writing example %d of %d" % (ex_index, len(examples)))
feature = convert_single_example(ex_index, example, label_list,
max_seq_length, tokenizer)
def create_int_feature(values):
f = tf.train.Feature(int64_list=tf.train.Int64List(value=list(values)))
return f
features = collections.OrderedDict()
features["input_ids"] = create_int_feature(feature.input_ids)
features["e11_p"] = create_int_feature([int(feature.e11_p)]) #tqx
features["e12_p"] = create_int_feature([int(feature.e12_p)]) #tqx
features["e21_p"] = create_int_feature([int(feature.e21_p)]) #tqx
features["e22_p"] = create_int_feature([int(feature.e22_p)]) #tqx
features["e1_mask"] = create_int_feature(feature.e1_mask) #tqx
features["e2_mask"] = create_int_feature(feature.e2_mask) #tqx
features["si_id"] = create_int_feature([int(feature.si_id)]) #tqx
features["si_mask"] = create_int_feature(feature.si_mask) # tqx
features["input_mask"] = create_int_feature(feature.input_mask)
features["segment_ids"] = create_int_feature(feature.segment_ids)
features["label_ids"] = create_int_feature([feature.label_id])
features["is_real_example"] = create_int_feature(
[int(feature.is_real_example)])
tf_example = tf.train.Example(features=tf.train.Features(feature=features))
writer.write(tf_example.SerializeToString())
writer.close()
def file_based_input_fn_builder(input_file, seq_length, is_training,
drop_remainder):
"""Creates an `input_fn` closure to be passed to TPUEstimator."""
name_to_features = {
"input_ids": tf.FixedLenFeature([seq_length], tf.int64),
"e11_p": tf.FixedLenFeature([], tf.int64), #tqx
"e12_p": tf.FixedLenFeature([], tf.int64), #tqx
"e21_p": tf.FixedLenFeature([], tf.int64), #tqx
"e22_p": tf.FixedLenFeature([], tf.int64), #tqx
"si_id": tf.FixedLenFeature([], tf.int64), #tqx
"si_mask": tf.FixedLenFeature([seq_length], tf.int64),
"e1_mask": tf.FixedLenFeature([seq_length], tf.int64),
"e2_mask": tf.FixedLenFeature([seq_length], tf.int64),
"input_mask": tf.FixedLenFeature([seq_length], tf.int64),
"segment_ids": tf.FixedLenFeature([seq_length], tf.int64),
"label_ids": tf.FixedLenFeature([], tf.int64),
"is_real_example": tf.FixedLenFeature([], tf.int64),
}
def _decode_record(record, name_to_features):
"""Decodes a record to a TensorFlow example."""
example = tf.parse_single_example(record, name_to_features)
# tf.Example only supports tf.int64, but the TPU only supports tf.int32.
# So cast all int64 to int32.
for name in list(example.keys()):
t = example[name]
if t.dtype == tf.int64:
t = tf.to_int32(t)
example[name] = t
return example
def input_fn(params):
"""The actual input function."""
batch_size = params["batch_size"]
# For training, we want a lot of parallel reading and shuffling.
# For eval, we want no shuffling and parallel reading doesn't matter.
d = tf.data.TFRecordDataset(input_file)
if is_training:
d = d.repeat()
d = d.shuffle(buffer_size=100)
d = d.apply(
tf.contrib.data.map_and_batch(
lambda record: _decode_record(record, name_to_features),
batch_size=batch_size,
drop_remainder=drop_remainder))
return d
return input_fn
def _truncate_seq_pair(tokens_a, tokens_b, max_length):
"""Truncates a sequence pair in place to the maximum length."""
# This is a simple heuristic which will always truncate the longer sequence
# one token at a time. This makes more sense than truncating an equal percent
# of tokens from each, since if one sequence is very short then each token
# that's truncated likely contains more information than a longer sequence.
while True:
total_length = len(tokens_a) + len(tokens_b)
if total_length <= max_length:
break
if len(tokens_a) > len(tokens_b):
tokens_a.pop()
else:
tokens_b.pop()
def _truncate_seq_pair_10_text_b(tokens_a, max_length):
"""Truncates a sequence pair in place to the maximum length."""
# This is a simple heuristic which will always truncate the longer sequence
# one token at a time. This makes more sense than truncating an equal percent
# of tokens from each, since if one sequence is very short then each token
# that's truncated likely contains more information than a longer sequence.
while True:
if len(tokens_a) <= max_length:
break
else:
tokens_a.pop()
def create_model(bert_config, is_training, input_ids,
e11_p,e12_p,e21_p,e22_p,#tqx
si_id,
si_mask,
e1_mask, e2_mask,
input_mask, segment_ids,
labels, num_labels, use_one_hot_embeddings):
"""Creates a classification model."""
model = modeling.BertModel(
config=bert_config,
is_training=is_training,
input_ids=input_ids,
e11_p=e11_p, e12_p=e12_p, e21_p=e21_p, e22_p=e22_p, # tqx
si_id=si_id,
si_mask=si_mask,
e1_mask=e1_mask, e2_mask=e2_mask,
input_mask=input_mask,
token_type_ids=segment_ids,
use_one_hot_embeddings=use_one_hot_embeddings)
# In the demo, we are doing a simple classification task on the entire
# segment.
#
# If you want to use the token-level output, use model.get_sequence_output()
# instead.
output_layer = model.get_pooled_output()
hidden_size = output_layer.shape[-1].value
output_weights = tf.get_variable(
"output_weights", [num_labels, hidden_size],
initializer=tf.truncated_normal_initializer(stddev=0.02))
output_bias = tf.get_variable(
"output_bias", [num_labels], initializer=tf.zeros_initializer())
with tf.variable_scope("loss"):
if is_training:
# I.e., 0.1 dropout
output_layer = tf.nn.dropout(output_layer, keep_prob=0.9)
logits = tf.matmul(output_layer, output_weights, transpose_b=True)
logits = tf.nn.bias_add(logits, output_bias)
probabilities = tf.nn.softmax(logits, axis=-1)
log_probs = tf.nn.log_softmax(logits, axis=-1)
one_hot_labels = tf.one_hot(labels, depth=num_labels, dtype=tf.float32)
per_example_loss = - tf.reduce_min(one_hot_labels[:,1:] * log_probs[:,1:], axis=-1)
l2 = tf.add_n([tf.nn.l2_loss(v) for v in tf.trainable_variables()])
rc_probabilities = probabilities - probabilities * one_hot_labels
second_pre = - tf.reduce_max(rc_probabilities[:, 1:], axis=-1) + 1
rc_loss = - tf.math.log(second_pre)
loss = tf.reduce_sum(per_example_loss) + 5 * tf.reduce_sum(rc_loss) + l2 * l2_reg_lambda
return (loss, per_example_loss, logits, probabilities)
def model_fn_builder(bert_config, num_labels, init_checkpoint, learning_rate,
num_train_steps, num_warmup_steps, use_tpu,
use_one_hot_embeddings):
"""Returns `model_fn` closure for TPUEstimator."""
def model_fn(features, labels, mode, params): # pylint: disable=unused-argument
"""The `model_fn` for TPUEstimator."""
tf.logging.info("*** Features ***")
for name in sorted(features.keys()):
tf.logging.info(" name = %s, shape = %s" % (name, features[name].shape))
input_ids = features["input_ids"]
e11_p = features["e11_p"] #tqx
e12_p = features["e12_p"] #tqx
e21_p = features["e21_p"] #tqx
e22_p = features["e22_p"] #tqx
si_id = features["si_id"] #tqx
si_mask = features["si_mask"]
e1_mask = features["e1_mask"] #tqx
e2_mask = features["e2_mask"] #tqx
input_mask = features["input_mask"]
segment_ids = features["segment_ids"]
label_ids = features["label_ids"]
is_real_example = None
if "is_real_example" in features:
is_real_example = tf.cast(features["is_real_example"], dtype=tf.float32)
else:
is_real_example = tf.ones(tf.shape(label_ids), dtype=tf.float32)
is_training = (mode == tf.estimator.ModeKeys.TRAIN)
(total_loss, per_example_loss, logits, probabilities) = create_model(
bert_config, is_training, input_ids,
e11_p, e12_p, e21_p, e22_p, # tqx
si_id,
si_mask,
e1_mask, e2_mask,
input_mask, segment_ids, label_ids,
num_labels, use_one_hot_embeddings)
tvars = tf.trainable_variables()
initialized_variable_names = {}
scaffold_fn = None
if init_checkpoint:
(assignment_map, initialized_variable_names
) = modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint)
tf.train.init_from_checkpoint(init_checkpoint, assignment_map)
tf.logging.info("**** Trainable Variables ****")
for var in tvars:
init_string = ""
if var.name in initialized_variable_names:
init_string = ", *INIT_FROM_CKPT*"
tf.logging.info(" name = %s, shape = %s%s", var.name, var.shape,
init_string)
output_spec = None
if mode == tf.estimator.ModeKeys.TRAIN:
train_op = optimization.create_optimizer(
total_loss, learning_rate, num_train_steps, num_warmup_steps, use_tpu)
output_spec = tf.contrib.tpu.TPUEstimatorSpec(
mode=mode,
loss=total_loss,
train_op=train_op,
scaffold_fn=scaffold_fn)
elif mode == tf.estimator.ModeKeys.EVAL:
def metric_fn(per_example_loss, label_ids, logits, is_real_example):
predictions = tf.argmax(logits, axis=-1, output_type=tf.int32)
accuracy = tf.metrics.accuracy(
labels=label_ids, predictions=predictions, weights=is_real_example)
loss = tf.metrics.mean(values=per_example_loss, weights=is_real_example)
return {
"eval_accuracy": accuracy,
"eval_loss": loss,
}
eval_metrics = (metric_fn,
[per_example_loss, label_ids, logits, is_real_example])
output_spec = tf.contrib.tpu.TPUEstimatorSpec(
mode=mode,
loss=total_loss,
eval_metrics=eval_metrics,
scaffold_fn=scaffold_fn)
else:
output_spec = tf.contrib.tpu.TPUEstimatorSpec(
mode=mode,
predictions={"probabilities": probabilities},
scaffold_fn=scaffold_fn)
return output_spec
return model_fn
# This function is not used by this file but is still used by the Colab and
# people who depend on it.
def input_fn_builder(features, seq_length, is_training, drop_remainder):
"""Creates an `input_fn` closure to be passed to TPUEstimator."""
all_input_ids = []
all_e11_p = [] #tqx
all_e12_p = [] #tqx
all_e21_p = [] #tqx
all_e22_p = [] #tqx
all_si_id = [] #tqx
all_si_mask = []
all_e1_mask = []
all_e2_mask = []
all_input_mask = []
all_segment_ids = []
all_label_ids = []
for feature in features:
all_input_ids.append(feature.input_ids)
all_e11_p.append(feature.e11_p) #tqx
all_e12_p.append(feature.e12_p) #tqx
all_e21_p.append(feature.e21_p) #tqx
all_e22_p.append(feature.e22_p) #tqx
all_si_id.append(feature.si_id) #tqx
all_si_mask.append(feature.si_mask)
all_e1_mask.append(feature.e1_mask)
all_e2_mask.append(feature.e2_mask)
all_input_mask.append(feature.input_mask)
all_segment_ids.append(feature.segment_ids)
all_label_ids.append(feature.label_id)
def input_fn(params):
"""The actual input function."""
batch_size = params["batch_size"]
num_examples = len(features)
# This is for demo purposes and does NOT scale to large data sets. We do
# not use Dataset.from_generator() because that uses tf.py_func which is
# not TPU compatible. The right way to load data is with TFRecordReader.
d = tf.data.Dataset.from_tensor_slices({
"input_ids":
tf.constant(
all_input_ids, shape=[num_examples, seq_length],
dtype=tf.int32),
#tqx
"e11_p":
tf.constant(all_e11_p, shape=[num_examples], dtype=tf.int32),
"e12_p":
tf.constant(all_e21_p, shape=[num_examples], dtype=tf.int32),
"e21_p":
tf.constant(all_e12_p, shape=[num_examples], dtype=tf.int32),
"e22_p":
tf.constant(all_e22_p, shape=[num_examples], dtype=tf.int32),
"si_id":
tf.constant(all_si_id, shape=[num_examples], dtype=tf.int32),
"si_mask":
tf.constant(
all_si_mask,
shape=[num_examples, seq_length],
dtype=tf.int32),
"e1_mask":
tf.constant(
all_e1_mask,
shape=[num_examples, seq_length],
dtype=tf.int32),
"e2_mask":
tf.constant(
all_e2_mask,
shape=[num_examples, seq_length],
dtype=tf.int32),
"input_mask":
tf.constant(
all_input_mask,
shape=[num_examples, seq_length],
dtype=tf.int32),
"segment_ids":
tf.constant(
all_segment_ids,
shape=[num_examples, seq_length],
dtype=tf.int32),
"label_ids":
tf.constant(all_label_ids, shape=[num_examples], dtype=tf.int32),
})
if is_training:
d = d.repeat()
d = d.shuffle(buffer_size=100)
d = d.batch(batch_size=batch_size, drop_remainder=drop_remainder)
return d
return input_fn
# This function is not used by this file but is still used by the Colab and
# people who depend on it.
def convert_examples_to_features(examples, label_list, max_seq_length,
tokenizer):
"""Convert a set of `InputExample`s to a list of `InputFeatures`."""
features = []
for (ex_index, example) in enumerate(examples):
if ex_index % 10000 == 0:
tf.logging.info("Writing example %d of %d" % (ex_index, len(examples)))
feature = convert_single_example(ex_index, example, label_list,
max_seq_length, tokenizer)
features.append(feature)
return features
def main(_):
tf.set_random_seed(12345)
tf.logging.set_verbosity(tf.logging.INFO)
processors = {
"mrpc": MrpcProcessor,
}
tokenization.validate_case_matches_checkpoint(FLAGS.do_lower_case,
FLAGS.init_checkpoint)
if not FLAGS.do_train and not FLAGS.do_eval and not FLAGS.do_predict:
raise ValueError(
"At least one of `do_train`, `do_eval` or `do_predict' must be True.")
bert_config = modeling.BertConfig.from_json_file(FLAGS.bert_config_file)
if FLAGS.max_seq_length > bert_config.max_position_embeddings:
raise ValueError(
"Cannot use sequence length %d because the BERT model "
"was only trained up to sequence length %d" %
(FLAGS.max_seq_length, bert_config.max_position_embeddings))
tf.gfile.MakeDirs(FLAGS.output_dir)
task_name = FLAGS.task_name.lower()
if task_name not in processors:
raise ValueError("Task not found: %s" % (task_name))
processor = processors[task_name]()
label_list = processor.get_labels()
tokenizer = tokenization.FullTokenizer(
vocab_file=FLAGS.vocab_file, do_lower_case=FLAGS.do_lower_case)
tpu_cluster_resolver = None
is_per_host = tf.contrib.tpu.InputPipelineConfig.PER_HOST_V2
run_config = tf.contrib.tpu.RunConfig(
cluster=tpu_cluster_resolver,
master=FLAGS.master,
model_dir=FLAGS.output_dir,
save_checkpoints_steps=FLAGS.save_checkpoints_steps,
tpu_config=tf.contrib.tpu.TPUConfig(
iterations_per_loop=FLAGS.iterations_per_loop,
num_shards=FLAGS.num_tpu_cores,
per_host_input_for_training=is_per_host))
train_examples = None
num_train_steps = None
num_warmup_steps = None
if FLAGS.do_train:
train_examples = processor.get_train_examples(FLAGS.data_dir)
num_train_steps = int(
len(train_examples) / FLAGS.train_batch_size * FLAGS.num_train_epochs)
num_warmup_steps = int(num_train_steps * FLAGS.warmup_proportion)
predict_examples = processor.get_test_examples(FLAGS.data_dir)
num_actual_predict_examples = len(predict_examples)
model_fn = model_fn_builder(
bert_config=bert_config,
num_labels=len(label_list),
init_checkpoint=FLAGS.init_checkpoint,
learning_rate=FLAGS.learning_rate,
num_train_steps=num_train_steps,
num_warmup_steps=num_warmup_steps,
use_tpu=FLAGS.use_tpu,
use_one_hot_embeddings=FLAGS.use_tpu)
# If TPU is not available, this will fall back to normal Estimator on CPU
# or GPU.
estimator = tf.contrib.tpu.TPUEstimator(
use_tpu=FLAGS.use_tpu,
model_fn=model_fn,
config=run_config,
train_batch_size=FLAGS.train_batch_size,
eval_batch_size=FLAGS.eval_batch_size,
predict_batch_size=FLAGS.predict_batch_size)
checkpoint_dir = os.path.abspath(os.path.join(os.path.curdir, "checkpoints"))
if not os.path.exists(checkpoint_dir):
os.makedirs(checkpoint_dir)
timestamp = str(int(time.time()))
out_dir = os.path.abspath(os.path.join(os.path.curdir, "runs", timestamp))
logger = Logger(out_dir)
if FLAGS.do_train:
train_file = os.path.join(FLAGS.output_dir, "train.tf_record")
file_based_convert_examples_to_features(
train_examples, label_list, FLAGS.max_seq_length, tokenizer, train_file)
tf.logging.info("***** Running training *****")
tf.logging.info(" Num examples = %d", len(train_examples))
tf.logging.info(" Batch size = %d", FLAGS.train_batch_size)
tf.logging.info(" Num steps = %d", num_train_steps)
train_input_fn = file_based_input_fn_builder(
input_file=train_file,
seq_length=FLAGS.max_seq_length,
is_training=True,
drop_remainder=True)
# 实现训练时输出loss
tensors_to_log = {"train loss": "loss/Sum"}
logging_hook = tf.train.LoggingTensorHook(
tensors=tensors_to_log, every_n_iter=100)
estimator.train(input_fn=train_input_fn,
hooks=[logging_hook],
max_steps=num_train_steps)
predict_examples = processor.get_test_examples(FLAGS.data_dir)
num_actual_predict_examples = len(predict_examples)
predict_file = os.path.join(FLAGS.output_dir, "predict.tf_record")
file_based_convert_examples_to_features(
predict_examples, label_list, FLAGS.max_seq_length, tokenizer, predict_file)
tf.logging.info("***** Running prediction*****")
tf.logging.info(" Num examples = %d (%d actual, %d padding)",
len(predict_examples), num_actual_predict_examples,
len(predict_examples) - num_actual_predict_examples)
tf.logging.info(" Batch size = %d", FLAGS.predict_batch_size)
predict_input_fn = file_based_input_fn_builder(
input_file=predict_file,
seq_length=FLAGS.max_seq_length,
is_training=False,
drop_remainder=False)
result = estimator.predict(input_fn=predict_input_fn)
output_predict_file = os.path.join(FLAGS.output_dir, "test_results.tsv")
pres = []
with tf.gfile.GFile(output_predict_file, "w") as writer:
num_written_lines = 0
tf.logging.info("***** Predict results *****")
for (i, prediction) in enumerate(result):
probabilities = prediction["probabilities"]
pres.append(probabilities)
if i >= num_actual_predict_examples:
break
output_line = "\t".join(
str(class_probability)
for class_probability in probabilities) + "\n"
writer.write(output_line)
num_written_lines += 1
pres = np.argmax(pres, axis=-1)
pres = np.array(pres, dtype='int')
logger.logging_eval(100, 100, 100, pres)
assert num_written_lines == num_actual_predict_examples
if FLAGS.do_predict:
predict_examples = processor.get_test_examples(FLAGS.data_dir)
num_actual_predict_examples = len(predict_examples)
predict_file = os.path.join(FLAGS.output_dir, "predict.tf_record")
file_based_convert_examples_to_features(
predict_examples, label_list, FLAGS.max_seq_length, tokenizer, predict_file)
tf.logging.info("***** Running prediction*****")
tf.logging.info(" Num examples = %d (%d actual, %d padding)",
len(predict_examples), num_actual_predict_examples,
len(predict_examples) - num_actual_predict_examples)
tf.logging.info(" Batch size = %d", FLAGS.predict_batch_size)
predict_drop_remainder = False
predict_input_fn = file_based_input_fn_builder(
input_file=predict_file,
seq_length=FLAGS.max_seq_length,
is_training=False,
drop_remainder=predict_drop_remainder)
result = estimator.predict(input_fn=predict_input_fn)
output_predict_file = os.path.join(FLAGS.output_dir, "test_results.tsv")
pres = []
with tf.gfile.GFile(output_predict_file, "w") as writer:
num_written_lines = 0
tf.logging.info("***** Predict results *****")
for (i, prediction) in enumerate(result):
probabilities = prediction["probabilities"]
pres.append(probabilities)
if i >= num_actual_predict_examples:
break
output_line = "\t".join(
str(class_probability)
for class_probability in probabilities) + "\n"
writer.write(output_line)
num_written_lines += 1
pres = np.argmax(pres, axis=-1)
pres = np.array(pres, dtype='int')
logger.logging_eval(100, 100, 100, pres)
assert num_written_lines == num_actual_predict_examples
if __name__ == "__main__":
flags.mark_flag_as_required("data_dir")
flags.mark_flag_as_required("task_name")
flags.mark_flag_as_required("vocab_file")
flags.mark_flag_as_required("bert_config_file")
flags.mark_flag_as_required("output_dir")
tf.app.run()
|
[] |
[] |
[
"CUDA_DEVICE_ORDER",
"CUDA_VISIBLE_DEVICES"
] |
[]
|
["CUDA_DEVICE_ORDER", "CUDA_VISIBLE_DEVICES"]
|
python
| 2 | 0 | |
engine/etcdng/etcd_test.go
|
package etcdng
import (
"os"
"strings"
"testing"
"github.com/mailgun/vulcand/Godeps/_workspace/src/github.com/mailgun/go-etcd/etcd"
"github.com/mailgun/vulcand/Godeps/_workspace/src/github.com/mailgun/log"
"github.com/mailgun/vulcand/engine/test"
"github.com/mailgun/vulcand/plugin/registry"
"github.com/mailgun/vulcand/secret"
. "github.com/mailgun/vulcand/Godeps/_workspace/src/gopkg.in/check.v1"
)
func TestEtcd(t *testing.T) { TestingT(t) }
type EtcdSuite struct {
ng *ng
suite test.EngineSuite
nodes []string
etcdPrefix string
consistency string
client *etcd.Client
changesC chan interface{}
key string
stopC chan bool
}
var _ = Suite(&EtcdSuite{
etcdPrefix: "/vulcandtest",
consistency: etcd.STRONG_CONSISTENCY,
})
func (s *EtcdSuite) SetUpSuite(c *C) {
log.Init([]*log.LogConfig{&log.LogConfig{Name: "console"}})
key, err := secret.NewKeyString()
if err != nil {
panic(err)
}
s.key = key
nodes_string := os.Getenv("VULCAND_TEST_ETCD_NODES")
if nodes_string == "" {
// Skips the entire suite
c.Skip("This test requires etcd, provide comma separated nodes in VULCAND_TEST_ETCD_NODES environment variable")
return
}
s.nodes = strings.Split(nodes_string, ",")
}
func (s *EtcdSuite) SetUpTest(c *C) {
// Initiate a backend with a registry
key, err := secret.KeyFromString(s.key)
c.Assert(err, IsNil)
box, err := secret.NewBox(key)
c.Assert(err, IsNil)
engine, err := New(
s.nodes,
s.etcdPrefix,
registry.GetRegistry(),
Options{
EtcdConsistency: s.consistency,
Box: box,
})
c.Assert(err, IsNil)
s.ng = engine.(*ng)
s.client = s.ng.client
// Delete all values under the given prefix
_, err = s.client.Get(s.etcdPrefix, false, false)
if err != nil {
// There's no key like this
if !notFound(err) {
// We haven't expected this error, oops
c.Assert(err, IsNil)
}
} else {
_, err = s.ng.client.Delete(s.etcdPrefix, true)
c.Assert(err, IsNil)
}
s.changesC = make(chan interface{})
s.stopC = make(chan bool)
go s.ng.Subscribe(s.changesC, s.stopC)
s.suite.ChangesC = s.changesC
s.suite.Engine = engine
}
func (s *EtcdSuite) TearDownTest(c *C) {
close(s.stopC)
s.ng.Close()
}
func (s *EtcdSuite) TestEmptyParams(c *C) {
s.suite.EmptyParams(c)
}
func (s *EtcdSuite) TestHostCRUD(c *C) {
s.suite.HostCRUD(c)
}
func (s *EtcdSuite) TestHostWithKeyPair(c *C) {
s.suite.HostWithKeyPair(c)
}
func (s *EtcdSuite) TestHostUpsertKeyPair(c *C) {
s.suite.HostUpsertKeyPair(c)
}
func (s *EtcdSuite) TestHostWithOCSP(c *C) {
s.suite.HostWithOCSP(c)
}
func (s *EtcdSuite) TestListenerCRUD(c *C) {
s.suite.ListenerCRUD(c)
}
func (s *EtcdSuite) TestListenerSettingsCRUD(c *C) {
s.suite.ListenerSettingsCRUD(c)
}
func (s *EtcdSuite) TestBackendCRUD(c *C) {
s.suite.BackendCRUD(c)
}
func (s *EtcdSuite) TestBackendDeleteUsed(c *C) {
s.suite.BackendDeleteUsed(c)
}
func (s *EtcdSuite) TestServerCRUD(c *C) {
s.suite.ServerCRUD(c)
}
func (s *EtcdSuite) TestServerExpire(c *C) {
s.suite.ServerExpire(c)
}
func (s *EtcdSuite) TestFrontendCRUD(c *C) {
s.suite.FrontendCRUD(c)
}
func (s *EtcdSuite) TestFrontendExpire(c *C) {
s.suite.FrontendExpire(c)
}
func (s *EtcdSuite) TestFrontendBadBackend(c *C) {
s.suite.FrontendBadBackend(c)
}
func (s *EtcdSuite) TestMiddlewareCRUD(c *C) {
s.suite.MiddlewareCRUD(c)
}
func (s *EtcdSuite) TestMiddlewareExpire(c *C) {
s.suite.MiddlewareExpire(c)
}
func (s *EtcdSuite) TestMiddlewareBadFrontend(c *C) {
s.suite.MiddlewareBadFrontend(c)
}
func (s *EtcdSuite) TestMiddlewareBadType(c *C) {
s.suite.MiddlewareBadType(c)
}
|
[
"\"VULCAND_TEST_ETCD_NODES\""
] |
[] |
[
"VULCAND_TEST_ETCD_NODES"
] |
[]
|
["VULCAND_TEST_ETCD_NODES"]
|
go
| 1 | 0 | |
train_pytorch_ctc.py
|
from __future__ import print_function
import argparse
import random
import torch
# import torch.backends.cudnn as cudnn
import torch.optim as optim
import torch.utils.data
from torch.autograd import Variable
import numpy as np
# from warpctc_pytorch import CTCLoss
from torch.nn import CTCLoss
import utils
import mydataset
import crnn as crnn
import config
from online_test import val_model, val_model_text_screen
from tqdm import tqdm
# config.imgW = 800
config.alphabet = config.alphabet_v2
config.nclass = len(config.alphabet) + 1
config.saved_model_prefix = 'CRNN-4000-epooch'
config.train_infofile = "/media/nuptn1/JUPYTER/NghiaHT5/Vietnam_OCR/dataset/train_dataset.txt"
config.val_infofile = "/media/nuptn1/JUPYTER/NghiaHT5/Vietnam_OCR/dataset/val_dataset.txt"
config.keep_ratio = True
config.use_log = True
config.pretrained_model = ""
config.batchSize = 80
config.workers = 10
config.adam = True
# config.lr = 0.00003
import os
import datetime
os.environ["CUDA_VISIBLE_DEVICES"] = "1"
log_filename = os.path.join('log/','loss_acc-'+config.saved_model_prefix + '.log')
if not os.path.exists('debug_files'):
os.mkdir('debug_files')
if not os.path.exists(config.saved_model_dir):
os.mkdir(config.saved_model_dir)
if config.use_log and not os.path.exists('log'):
os.mkdir('log')
if config.use_log and os.path.exists(log_filename):
os.remove(log_filename)
if config.experiment is None:
config.experiment = 'expr'
if not os.path.exists(config.experiment):
os.mkdir(config.experiment)
config.manualSeed = random.randint(1, 10000) # fix seed
print("Random Seed: ", config.manualSeed)
random.seed(config.manualSeed)
np.random.seed(config.manualSeed)
torch.manual_seed(config.manualSeed)
# cudnn.benchmark = True
train_dataset = mydataset.MyDataset(info_filename=config.train_infofile)
assert train_dataset
if not config.random_sample:
sampler = mydataset.randomSequentialSampler(train_dataset, config.batchSize)
else:
sampler = None
train_loader = torch.utils.data.DataLoader(
train_dataset, batch_size=config.batchSize,
shuffle=True, sampler=sampler,
num_workers=int(config.workers),
collate_fn=mydataset.alignCollate(imgH=config.imgH, imgW=config.imgW, keep_ratio=config.keep_ratio))
test_dataset = mydataset.MyDataset(
info_filename=config.val_infofile, transform=mydataset.resizeNormalize((config.imgW, config.imgH), is_test=True))
converter = utils.strLabelConverter(config.alphabet)
# criterion = CTCLoss(reduction='sum',zero_infinity=True)
criterion = CTCLoss(reduction='sum')
best_acc = -1
# custom weights initialization called on crnn
def weights_init(m):
classname = m.__class__.__name__
if classname.find('Conv') != -1:
m.weight.data.normal_(0.0, 0.02)
elif classname.find('BatchNorm') != -1:
m.weight.data.normal_(1.0, 0.02)
m.bias.data.fill_(0)
crnn = crnn.CRNN(config.imgH, config.nc, config.nclass, config.nh)
if config.pretrained_model!='' and os.path.exists(config.pretrained_model):
print('loading pretrained model from %s' % config.pretrained_model)
crnn.load_state_dict(torch.load(config.pretrained_model))
else:
crnn.apply(weights_init)
# print(crnn)
# image = torch.FloatTensor(config.batchSize, 3, config.imgH, config.imgH)
# text = torch.IntTensor(config.batchSize * 5)
# length = torch.IntTensor(config.batchSize)
device = torch.device('cpu')
if config.cuda:
crnn.cuda()
# crnn = torch.nn.DataParallel(crnn, device_ids=range(opt.ngpu))
# image = image.cuda()
device = torch.device('cuda:0')
criterion = criterion.cuda()
# image = Variable(image)
# text = Variable(text)
# length = Variable(length)
# loss averager
loss_avg = utils.averager()
# setup optimizer
if config.adam:
optimizer = optim.Adam(crnn.parameters(), lr=config.lr, betas=(config.beta1, 0.999))
elif config.adadelta:
optimizer = optim.Adadelta(crnn.parameters(), lr=config.lr)
else:
optimizer = optim.RMSprop(crnn.parameters(), lr=config.lr)
def val(net, dataset, criterion, max_iter=100, text_screen=False):
print('Start val')
for p in net.parameters():
p.requires_grad = False
if text_screen==True:
num_correct, num_all = val_model_text_screen(config.val_infofile,net,True,log_file='compare-'+config.saved_model_prefix+'.log')
else:
num_correct, num_all = val_model(config.val_infofile,net,True,log_file='compare-'+config.saved_model_prefix+'.log')
accuracy = num_correct / num_all
print('ocr_acc: %f' % (accuracy))
if config.use_log:
with open(log_filename, 'a') as f:
f.write('ocr_acc:{}\n'.format(accuracy))
global best_acc
if accuracy > best_acc:
print("new best acc: ", accuracy )
best_acc = accuracy
torch.save(crnn.state_dict(), '{}/{}_{}_{}.pth'.format(config.saved_model_dir, config.saved_model_prefix, epoch,
int(best_acc * 1000)))
torch.save(crnn.state_dict(), '{}/{}.pth'.format(config.saved_model_dir, config.saved_model_prefix))
def trainBatch(net, criterion, optimizer):
data = train_iter.next()
cpu_images, cpu_texts = data
batch_size = cpu_images.size(0)
image = cpu_images.to(device)
text, length = converter.encode(cpu_texts)
# print("text:", text)
# print("length:", length)
# utils.loadData(text, t)
# utils.loadData(length, l)
preds = net(image) # seqLength x batchSize x alphabet_size
preds_size = Variable(torch.IntTensor([preds.size(0)] * batch_size)) # seqLength x batchSize
cost = criterion(preds.log_softmax(2).cpu(), text, preds_size, length) / batch_size
if torch.isnan(cost):
print(batch_size,cpu_texts)
else:
net.zero_grad()
cost.backward()
optimizer.step()
return cost
for epoch in range(config.niter):
loss_avg.reset()
print('epoch {}....'.format(epoch))
train_iter = iter(train_loader)
i = 0
n_batch = len(train_loader)
# while i < len(train_loader):
for i in tqdm(train_loader, total=len(train_loader)):
for p in crnn.parameters():
p.requires_grad = True
crnn.train()
cost = trainBatch(crnn, criterion, optimizer)
# print('epoch: {} iter: {}/{} Train loss: {:.3f}'.format(epoch, i, n_batch, cost.item()))
loss_avg.add(cost)
loss_avg.add(cost)
# i += 1
print('Train loss: %f' % (loss_avg.val()))
if config.use_log:
with open(log_filename, 'a') as f:
f.write('{}\n'.format(datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S.%f')))
f.write('train_loss:{}\n'.format(loss_avg.val()))
val(crnn, test_dataset, criterion)
if epoch%50==0:
print("evaluation text sreen ...")
val(crnn, test_dataset, criterion,text_screen=True)
|
[] |
[] |
[
"CUDA_VISIBLE_DEVICES"
] |
[]
|
["CUDA_VISIBLE_DEVICES"]
|
python
| 1 | 0 | |
src/config.py
|
import os, pathlib
TIMEOUT_BEFORE_UPSCALE = 900
BLACKOUT_OPACITY = 0.6
MOVE_SCALE = 3
ENCODING = 'utf-8'
TERMINATED_ERROR_CODES = [-15, 15]
defaultOpenDirectory = os.environ['HOME']
root = pathlib.Path(__file__).parent.parent.resolve()
tmp = f'{root}/tmp'
bin = f'{root}/bin'
realsr = f'{bin}/realsr-ncnn-vulkan'
convert = f'{bin}/convert'
composite = f'{bin}/composite'
modelJpeg = f'{bin}/realsr-ncnn-vulkan-models/models-DF2K_JPEG'
model = f'{bin}/realsr-ncnn-vulkan-models/models-DF2K'
|
[] |
[] |
[
"HOME"
] |
[]
|
["HOME"]
|
python
| 1 | 0 | |
driver.go
|
// Copyright 2010 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package sqlite
import (
"context"
"database/sql"
"database/sql/driver"
"errors"
"fmt"
"io"
"log"
"os"
"reflect"
"time"
)
func init() {
sql.Register("sqlite3", &impl{open: defaultOpen})
if os.Getenv("SQLITE_LOG") != "" {
ConfigLog(func(d interface{}, err error, msg string) {
log.Printf("%s: %s, %s\n", d, err, msg)
}, "SQLITE")
}
ConfigMemStatus(false)
}
// impl is an adapter to database/sql/driver
type impl struct {
open func(name string) (*Conn, error)
configure func(*Conn) error
}
type conn struct {
c *Conn
}
type stmt struct {
s *Stmt
rowsRef bool // true if there is a rowsImpl associated to this statement that has not been closed.
pendingClose bool
}
type rowsImpl struct {
s *stmt
columnNames []string // cache
ctx context.Context
}
type result struct {
id int64
rows int64
}
func (r *result) LastInsertId() (int64, error) {
return r.id, nil
}
func (r *result) RowsAffected() (int64, error) {
return r.rows, nil
}
// NewDriver creates a new driver with specialized connection creation/configuration.
// NewDriver(customOpen, nil) // no post-creation hook
// NewDriver(nil, customConfigure) // default connection creation but specific configuration step
func NewDriver(open func(name string) (*Conn, error), configure func(*Conn) error) driver.Driver {
if open == nil {
open = defaultOpen
}
return &impl{open: open, configure: configure}
}
var defaultOpen = func(name string) (*Conn, error) {
// OpenNoMutex == multi-thread mode (http://sqlite.org/compile.html#threadsafe and http://sqlite.org/threadsafe.html)
c, err := Open(name, OpenURI, OpenNoMutex, OpenReadWrite, OpenCreate)
if err != nil {
return nil, err
}
c.BusyTimeout(10 * time.Second)
//c.DefaultTimeLayout = "2006-01-02 15:04:05.999999999"
c.ScanNumericalAsTime = true
return c, nil
}
// Open opens a new database connection.
// ":memory:" for memory db,
// "" for temp file db
func (d *impl) Open(name string) (driver.Conn, error) {
c, err := d.open(name)
if err != nil {
return nil, err
}
if d.configure != nil {
if err = d.configure(c); err != nil {
_ = c.Close()
return nil, err
}
}
return &conn{c}, nil
}
// Unwrap gives access to underlying driver connection.
func Unwrap(db *sql.DB) *Conn {
_, err := db.Exec("unwrap")
if cerr, ok := err.(ConnError); ok {
return cerr.c
}
return nil
}
func (c *conn) Ping(ctx context.Context) error {
if c.c.IsClosed() {
return driver.ErrBadConn
}
_, err := c.ExecContext(ctx, "PRAGMA schema_verion", []driver.NamedValue{})
return err
}
// PRAGMA schema_version may be used to detect when the database schema is altered
func (c *conn) Exec(query string, args []driver.Value) (driver.Result, error) {
panic("ExecContext was not called.")
}
func (c *conn) Prepare(query string) (driver.Stmt, error) {
panic("use PrepareContext")
}
func (c *conn) PrepareContext(ctx context.Context, query string) (driver.Stmt, error) {
if c.c.IsClosed() {
return nil, driver.ErrBadConn
}
s, err := c.c.Prepare(query)
if err != nil {
return nil, err
}
return &stmt{s: s}, nil
}
func (c *conn) ExecContext(ctx context.Context, query string, args []driver.NamedValue) (driver.Result, error) {
if c.c.IsClosed() {
return nil, driver.ErrBadConn
}
if ctx.Done() != nil {
c.c.ProgressHandler(progressHandler, 100, ctx)
defer c.c.ProgressHandler(nil, 0, nil)
}
if len(args) == 0 {
if query == "unwrap" {
return nil, ConnError{c: c.c}
}
if err := c.c.FastExec(query); err != nil {
return nil, ctxError(ctx, err)
}
return c.c.result(), nil
}
for len(query) > 0 {
s, err := c.c.Prepare(query)
if err != nil {
return nil, ctxError(ctx, err)
} else if s.stmt == nil {
// this happens for a comment or white-space
query = s.tail
continue
}
var subargs []driver.NamedValue
count := s.BindParameterCount()
if len(s.tail) > 0 && len(args) >= count {
subargs = args[:count]
args = args[count:]
} else {
subargs = args
}
if err = s.bindNamedValue(subargs); err != nil {
return nil, ctxError(ctx, err)
}
err = s.exec()
if err != nil {
s.finalize()
return nil, ctxError(ctx, err)
}
if err = s.finalize(); err != nil {
return nil, ctxError(ctx, err)
}
query = s.tail
}
return c.c.result(), nil
}
func (c *conn) Close() error {
return c.c.Close()
}
func (c *conn) Begin() (driver.Tx, error) {
if c.c.IsClosed() {
return nil, driver.ErrBadConn
}
if err := c.c.Begin(); err != nil {
return nil, err
}
return c, nil
}
func (c *conn) BeginTx(ctx context.Context, opts driver.TxOptions) (driver.Tx, error) {
if c.c.IsClosed() {
return nil, driver.ErrBadConn
}
if !c.c.GetAutocommit() {
return nil, errors.New("Nested transactions are not supported")
}
if err := c.c.SetQueryOnly("", opts.ReadOnly); err != nil {
return nil, err
}
switch sql.IsolationLevel(opts.Isolation) {
case sql.LevelDefault, sql.LevelSerializable:
if err := c.c.FastExec("PRAGMA read_uncommitted=0"); err != nil {
return nil, err
}
case sql.LevelReadUncommitted:
if err := c.c.FastExec("PRAGMA read_uncommitted=1"); err != nil {
return nil, err
}
default:
return nil, fmt.Errorf("isolation level %d is not supported", opts.Isolation)
}
return c.Begin()
}
func (c *conn) Commit() error {
return c.c.Commit()
}
func (c *conn) Rollback() error {
return c.c.Rollback()
}
func (s *stmt) Close() error {
if s.rowsRef { // Currently, it never happens because the sql.Stmt doesn't call driver.Stmt in this case
s.pendingClose = true
return nil
}
return s.s.Finalize()
}
func (s *stmt) NumInput() int {
return s.s.BindParameterCount()
}
func (s *stmt) Exec(args []driver.Value) (driver.Result, error) {
panic("Using ExecContext")
}
func (s *stmt) Query(args []driver.Value) (driver.Rows, error) {
panic("Use QueryContext")
}
func (s *stmt) ExecContext(ctx context.Context, args []driver.NamedValue) (driver.Result, error) {
if err := s.s.bindNamedValue(args); err != nil {
return nil, err
}
if ctx.Done() != nil {
s.s.c.ProgressHandler(progressHandler, 100, ctx)
defer s.s.c.ProgressHandler(nil, 0, nil)
}
if err := s.s.exec(); err != nil {
return nil, ctxError(ctx, err)
}
return s.s.c.result(), nil
}
func (s *stmt) QueryContext(ctx context.Context, args []driver.NamedValue) (driver.Rows, error) {
if s.rowsRef {
return nil, errors.New("previously returned Rows still not closed")
}
if err := s.s.bindNamedValue(args); err != nil {
return nil, err
}
s.rowsRef = true
if ctx.Done() != nil {
s.s.c.ProgressHandler(progressHandler, 100, ctx)
}
return &rowsImpl{s, nil, ctx}, nil
}
func (s *stmt) bind(args []driver.Value) error {
for i, v := range args {
if err := s.s.BindByIndex(i+1, v); err != nil {
return err
}
}
return nil
}
func (r *rowsImpl) Columns() []string {
if r.columnNames == nil {
r.columnNames = r.s.s.ColumnNames()
}
return r.columnNames
}
func (r *rowsImpl) Next(dest []driver.Value) error {
ok, err := r.s.s.Next()
if err != nil {
return ctxError(r.ctx, err)
}
if !ok {
return io.EOF
}
for i := range dest {
dest[i], _ = r.s.s.ScanValue(i, true)
/*if !driver.IsScanValue(dest[i]) {
panic("Invalid type returned by ScanValue")
}*/
}
return nil
}
func (r *rowsImpl) Close() error {
if r.ctx.Done() != nil {
r.s.s.c.ProgressHandler(nil, 0, nil)
}
r.s.rowsRef = false
if r.s.pendingClose {
return r.s.Close()
}
return r.s.s.Reset()
}
func (r *rowsImpl) HasNextResultSet() bool {
return len(r.s.s.tail) > 0
}
func (r *rowsImpl) NextResultSet() error {
currentStmt := r.s.s
nextQuery := currentStmt.tail
var nextStmt *Stmt
var err error
for len(nextQuery) > 0 {
nextStmt, err = currentStmt.c.Prepare(nextQuery)
if err != nil {
return err
} else if nextStmt.stmt == nil {
// this happens for a comment or white-space
nextQuery = nextStmt.tail
continue
}
break
}
if nextStmt == nil {
return io.EOF
}
// TODO close vs reset ?
err = currentStmt.Finalize()
if err != nil {
return err
}
r.s.s = nextStmt
return nil
}
func (r *rowsImpl) ColumnTypeScanType(index int) reflect.Type {
switch r.s.s.ColumnType(index) {
case Integer:
return reflect.TypeOf(int64(0))
case Float:
return reflect.TypeOf(float64(0))
case Text:
return reflect.TypeOf("")
case Null:
return reflect.TypeOf(nil)
case Blob:
fallthrough
default:
return reflect.TypeOf([]byte{})
}
}
func (r *rowsImpl) ColumnTypeDatabaseTypeName(index int) string {
return r.s.s.ColumnDeclaredType(index)
}
func (c *Conn) result() driver.Result {
// TODO How to know that the last Stmt has done an INSERT? An authorizer?
id := c.LastInsertRowid()
// TODO How to know that the last Stmt has done a DELETE/INSERT/UPDATE? An authorizer?
rows := int64(c.Changes())
return &result{id, rows} // FIXME RowAffected/noRows
}
func (s *Stmt) bindNamedValue(args []driver.NamedValue) error {
for _, v := range args {
if len(v.Name) == 0 {
if err := s.BindByIndex(v.Ordinal, v.Value); err != nil {
return err
}
} else {
index, err := s.BindParameterIndex(":" + v.Name) // TODO "$" and "@"
if err != nil {
return err
}
if err = s.BindByIndex(index, v.Value); err != nil {
return err
}
}
}
return nil
}
func progressHandler(p interface{}) bool {
if ctx, ok := p.(context.Context); ok {
select {
case <-ctx.Done():
// Cancelled
return true
default:
return false
}
}
return false
}
func ctxError(ctx context.Context, err error) error {
ctxErr := ctx.Err()
if ctxErr != nil {
return ctxErr
}
return err
}
|
[
"\"SQLITE_LOG\""
] |
[] |
[
"SQLITE_LOG"
] |
[]
|
["SQLITE_LOG"]
|
go
| 1 | 0 | |
flink-yarn-tests/src/test/java/org/apache/flink/yarn/YarnTestBase.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.yarn;
import org.apache.commons.io.FileUtils;
import org.apache.flink.client.CliFrontend;
import org.apache.flink.client.FlinkYarnSessionCli;
import org.apache.flink.test.util.TestBaseUtils;
import org.apache.flink.util.TestLogger;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.service.Service;
import org.apache.hadoop.yarn.api.records.ApplicationReport;
import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.api.records.YarnApplicationState;
import org.apache.hadoop.yarn.client.api.YarnClient;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.exceptions.YarnException;
import org.apache.hadoop.yarn.server.MiniYARNCluster;
import org.apache.hadoop.yarn.server.nodemanager.NodeManager;
import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container;
import org.apache.log4j.spi.LoggingEvent;
import org.junit.After;
import org.junit.AfterClass;
import org.junit.Assert;
import org.junit.Before;
import org.junit.BeforeClass;
import org.junit.ClassRule;
import org.junit.rules.TemporaryFolder;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.slf4j.Marker;
import org.slf4j.MarkerFactory;
import java.io.ByteArrayOutputStream;
import java.io.File;
import java.io.FileNotFoundException;
import java.io.FileWriter;
import java.io.FilenameFilter;
import java.io.IOException;
import java.io.PrintStream;
import java.util.Arrays;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Scanner;
import java.util.concurrent.ConcurrentMap;
/**
* This base class allows to use the MiniYARNCluster.
* The cluster is re-used for all tests.
*
* This class is located in a different package which is build after flink-dist. This way,
* we can use the YARN uberjar of flink to start a Flink YARN session.
*
* The test is not thread-safe. Parallel execution of tests is not possible!
*/
public abstract class YarnTestBase extends TestLogger {
private static final Logger LOG = LoggerFactory.getLogger(YarnTestBase.class);
protected final static PrintStream originalStdout = System.out;
protected final static PrintStream originalStderr = System.err;
protected static String TEST_CLUSTER_NAME_KEY = "flink-yarn-minicluster-name";
protected final static int NUM_NODEMANAGERS = 2;
/** The tests are scanning for these strings in the final output. */
protected final static String[] PROHIBITED_STRINGS = {
"Exception", // we don't want any exceptions to happen
"Started [email protected]:8081" // Jetty should start on a random port in YARN mode.
};
/** These strings are white-listed, overriding teh prohibited strings */
protected final static String[] WHITELISTED_STRINGS = {
"akka.remote.RemoteTransportExceptionNoStackTrace",
// workaround for annoying InterruptedException logging:
// https://issues.apache.org/jira/browse/YARN-1022
"java.lang.InterruptedException"
};
// Temp directory which is deleted after the unit test.
@ClassRule
public static TemporaryFolder tmp = new TemporaryFolder();
protected static MiniYARNCluster yarnCluster = null;
/**
* Uberjar (fat jar) file of Flink
*/
protected static File flinkUberjar;
protected static final Configuration yarnConfiguration;
/**
* lib/ folder of the flink distribution.
*/
protected static File flinkLibFolder;
static {
yarnConfiguration = new YarnConfiguration();
yarnConfiguration.setInt(YarnConfiguration.RM_SCHEDULER_MINIMUM_ALLOCATION_MB, 512);
yarnConfiguration.setInt(YarnConfiguration.RM_SCHEDULER_MAXIMUM_ALLOCATION_MB, 4096); // 4096 is the available memory anyways
yarnConfiguration.setBoolean(YarnConfiguration.YARN_MINICLUSTER_FIXED_PORTS, true);
yarnConfiguration.setBoolean(YarnConfiguration.RM_SCHEDULER_INCLUDE_PORT_IN_NODE_NAME, true);
yarnConfiguration.setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS, 2);
yarnConfiguration.setInt(YarnConfiguration.RM_MAX_COMPLETED_APPLICATIONS, 2);
yarnConfiguration.setInt(YarnConfiguration.RM_SCHEDULER_MAXIMUM_ALLOCATION_VCORES, 4);
yarnConfiguration.setInt(YarnConfiguration.DEBUG_NM_DELETE_DELAY_SEC, 3600);
yarnConfiguration.setBoolean(YarnConfiguration.LOG_AGGREGATION_ENABLED, false);
yarnConfiguration.setInt(YarnConfiguration.NM_VCORES, 666); // memory is overwritten in the MiniYARNCluster.
// so we have to change the number of cores for testing.
yarnConfiguration.setInt(YarnConfiguration.RM_AM_EXPIRY_INTERVAL_MS, 20000); // 20 seconds expiry (to ensure we properly heartbeat with YARN).
}
/**
* Sleep a bit between the tests (we are re-using the YARN cluster for the tests)
*/
@After
public void sleep() {
try {
Thread.sleep(500);
} catch (InterruptedException e) {
Assert.fail("Should not happen");
}
}
private YarnClient yarnClient = null;
@Before
public void checkClusterEmpty() throws IOException, YarnException {
if(yarnClient == null) {
yarnClient = YarnClient.createYarnClient();
yarnClient.init(yarnConfiguration);
yarnClient.start();
}
List<ApplicationReport> apps = yarnClient.getApplications();
for(ApplicationReport app : apps) {
if(app.getYarnApplicationState() != YarnApplicationState.FINISHED
&& app.getYarnApplicationState() != YarnApplicationState.KILLED
&& app.getYarnApplicationState() != YarnApplicationState.FAILED) {
Assert.fail("There is at least one application on the cluster is not finished." +
"App "+app.getApplicationId()+" is in state "+app.getYarnApplicationState());
}
}
}
/**
* Locate a file or directory
*/
public static File findFile(String startAt, FilenameFilter fnf) {
File root = new File(startAt);
String[] files = root.list();
if(files == null) {
return null;
}
for(String file : files) {
File f = new File(startAt + File.separator + file);
if(f.isDirectory()) {
File r = findFile(f.getAbsolutePath(), fnf);
if(r != null) {
return r;
}
} else if (fnf.accept(f.getParentFile(), f.getName())) {
return f;
}
}
return null;
}
/**
* Filter to find root dir of the flink-yarn dist.
*/
public static class RootDirFilenameFilter implements FilenameFilter {
@Override
public boolean accept(File dir, String name) {
return name.startsWith("flink-dist") && name.endsWith(".jar") && dir.toString().contains("/lib");
}
}
public static class ContainsName implements FilenameFilter {
private String[] names;
private String excludeInPath = null;
/**
* @param names which have to be included in the filename.
*/
public ContainsName(String[] names) {
this.names = names;
}
public ContainsName(String[] names, String excludeInPath) {
this.names = names;
this.excludeInPath = excludeInPath;
}
@Override
public boolean accept(File dir, String name) {
if(excludeInPath == null) {
for(String n: names) {
if(!name.contains(n)) {
return false;
}
}
return true;
} else {
for(String n: names) {
if(!name.contains(n)) {
return false;
}
}
return !dir.toString().contains(excludeInPath);
}
}
}
public static File writeYarnSiteConfigXML(Configuration yarnConf) throws IOException {
tmp.create();
File yarnSiteXML = new File(tmp.newFolder().getAbsolutePath() + "/yarn-site.xml");
FileWriter writer = new FileWriter(yarnSiteXML);
yarnConf.writeXml(writer);
writer.flush();
writer.close();
return yarnSiteXML;
}
/**
* This method checks the written TaskManager and JobManager log files
* for exceptions.
*
* WARN: Please make sure the tool doesn't find old logfiles from previous test runs.
* So always run "mvn clean" before running the tests here.
*
*/
public static void ensureNoProhibitedStringInLogFiles(final String[] prohibited, final String[] whitelisted) {
File cwd = new File("target/" + yarnConfiguration.get(TEST_CLUSTER_NAME_KEY));
Assert.assertTrue("Expecting directory " + cwd.getAbsolutePath() + " to exist", cwd.exists());
Assert.assertTrue("Expecting directory " + cwd.getAbsolutePath() + " to be a directory", cwd.isDirectory());
File foundFile = findFile(cwd.getAbsolutePath(), new FilenameFilter() {
@Override
public boolean accept(File dir, String name) {
// scan each file for prohibited strings.
File f = new File(dir.getAbsolutePath()+ "/" + name);
try {
Scanner scanner = new Scanner(f);
while (scanner.hasNextLine()) {
final String lineFromFile = scanner.nextLine();
for (String aProhibited : prohibited) {
if (lineFromFile.contains(aProhibited)) {
boolean whitelistedFound = false;
for (String white : whitelisted) {
if (lineFromFile.contains(white)) {
whitelistedFound = true;
break;
}
}
if (!whitelistedFound) {
// logging in FATAL to see the actual message in TRAVIS tests.
Marker fatal = MarkerFactory.getMarker("FATAL");
LOG.error(fatal, "Prohibited String '{}' in line '{}'", aProhibited, lineFromFile);
return true;
}
}
}
}
} catch (FileNotFoundException e) {
LOG.warn("Unable to locate file: "+e.getMessage()+" file: "+f.getAbsolutePath());
}
return false;
}
});
if(foundFile != null) {
Scanner scanner = null;
try {
scanner = new Scanner(foundFile);
} catch (FileNotFoundException e) {
Assert.fail("Unable to locate file: "+e.getMessage()+" file: "+foundFile.getAbsolutePath());
}
LOG.warn("Found a file with a prohibited string. Printing contents:");
while (scanner.hasNextLine()) {
LOG.warn("LINE: "+scanner.nextLine());
}
Assert.fail("Found a file "+foundFile+" with a prohibited string: "+Arrays.toString(prohibited));
}
}
public static void sleep(int time) {
try {
Thread.sleep(time);
} catch (InterruptedException e) {
LOG.warn("Interruped",e);
}
}
public static int getRunningContainers() {
int count = 0;
for(int nmId = 0; nmId < NUM_NODEMANAGERS; nmId++) {
NodeManager nm = yarnCluster.getNodeManager(nmId);
ConcurrentMap<ContainerId, Container> containers = nm.getNMContext().getContainers();
count += containers.size();
}
return count;
}
public static void startYARNWithConfig(Configuration conf) {
// set the home directory to a tmp directory. Flink on YARN is using the home dir to distribute the file
File homeDir = null;
try {
homeDir = tmp.newFolder();
} catch (IOException e) {
e.printStackTrace();
Assert.fail(e.getMessage());
}
System.setProperty("user.home", homeDir.getAbsolutePath());
String uberjarStartLoc = "..";
LOG.info("Trying to locate uberjar in {}", new File(uberjarStartLoc));
flinkUberjar = findFile(uberjarStartLoc, new RootDirFilenameFilter());
Assert.assertNotNull("Flink uberjar not found", flinkUberjar);
String flinkDistRootDir = flinkUberjar.getParentFile().getParent();
flinkLibFolder = flinkUberjar.getParentFile(); // the uberjar is located in lib/
Assert.assertNotNull("Flink flinkLibFolder not found", flinkLibFolder);
Assert.assertTrue("lib folder not found", flinkLibFolder.exists());
Assert.assertTrue("lib folder not found", flinkLibFolder.isDirectory());
if (!flinkUberjar.exists()) {
Assert.fail("Unable to locate yarn-uberjar.jar");
}
try {
LOG.info("Starting up MiniYARNCluster");
if (yarnCluster == null) {
yarnCluster = new MiniYARNCluster(conf.get(YarnTestBase.TEST_CLUSTER_NAME_KEY), NUM_NODEMANAGERS, 1, 1);
yarnCluster.init(conf);
yarnCluster.start();
}
Map<String, String> map = new HashMap<String, String>(System.getenv());
File flinkConfDirPath = findFile(flinkDistRootDir, new ContainsName(new String[]{"flink-conf.yaml"}));
Assert.assertNotNull(flinkConfDirPath);
map.put("FLINK_CONF_DIR", flinkConfDirPath.getParent());
File yarnConfFile = writeYarnSiteConfigXML(conf);
map.put("YARN_CONF_DIR", yarnConfFile.getParentFile().getAbsolutePath());
map.put("IN_TESTS", "yes we are in tests"); // see FlinkYarnClient() for more infos
TestBaseUtils.setEnv(map);
Assert.assertTrue(yarnCluster.getServiceState() == Service.STATE.STARTED);
// wait for the nodeManagers to connect
while(!yarnCluster.waitForNodeManagersToConnect(500)) {
LOG.info("Waiting for Nodemanagers to connect");
}
} catch (Exception ex) {
ex.printStackTrace();
LOG.error("setup failure", ex);
Assert.fail();
}
}
/**
* Default @BeforeClass impl. Overwrite this for passing a different configuration
*/
@BeforeClass
public static void setup() {
startYARNWithConfig(yarnConfiguration);
}
// -------------------------- Runner -------------------------- //
protected static ByteArrayOutputStream outContent;
protected static ByteArrayOutputStream errContent;
enum RunTypes {
YARN_SESSION, CLI_FRONTEND
}
/**
* This method returns once the "startedAfterString" has been seen.
*/
protected Runner startWithArgs(String[] args, String startedAfterString, RunTypes type) {
LOG.info("Running with args {}", Arrays.toString(args));
outContent = new ByteArrayOutputStream();
errContent = new ByteArrayOutputStream();
System.setOut(new PrintStream(outContent));
System.setErr(new PrintStream(errContent));
final int START_TIMEOUT_SECONDS = 60;
Runner runner = new Runner(args, type);
runner.setName("Frontend (CLI/YARN Client) runner thread (startWithArgs()).");
runner.start();
for(int second = 0; second < START_TIMEOUT_SECONDS; second++) {
sleep(1000);
// check output for correct TaskManager startup.
if(outContent.toString().contains(startedAfterString)
|| errContent.toString().contains(startedAfterString) ) {
LOG.info("Found expected output in redirected streams");
return runner;
}
// check if thread died
if(!runner.isAlive()) {
sendOutput();
Assert.fail("Runner thread died before the test was finished. Return value = "+runner.getReturnValue());
}
}
sendOutput();
Assert.fail("During the timeout period of " + START_TIMEOUT_SECONDS + " seconds the " +
"expected string did not show up");
return null;
}
protected void runWithArgs(String[] args, String terminateAfterString, String[] failOnStrings, RunTypes type, int returnCode) {
runWithArgs(args,terminateAfterString, failOnStrings, type, returnCode, false);
}
/**
* The test has been passed once the "terminateAfterString" has been seen.
* @param args Command line arguments for the runner
* @param terminateAfterString the runner is searching the stdout and stderr for this string. as soon as it appears, the test has passed
* @param failOnStrings The runner is searching stdout and stderr for the strings specified here. If one appears, the test has failed
* @param type Set the type of the runner
* @param returnCode Expected return code from the runner.
* @param checkLogForTerminateString If true, the runner checks also the log4j logger for the terminate string
*/
protected void runWithArgs(String[] args, String terminateAfterString, String[] failOnStrings, RunTypes type, int returnCode, boolean checkLogForTerminateString) {
LOG.info("Running with args {}", Arrays.toString(args));
outContent = new ByteArrayOutputStream();
errContent = new ByteArrayOutputStream();
System.setOut(new PrintStream(outContent));
System.setErr(new PrintStream(errContent));
// we wait for at most three minutes
final int START_TIMEOUT_SECONDS = 180;
final long deadline = System.currentTimeMillis() + (START_TIMEOUT_SECONDS * 1000);
Runner runner = new Runner(args, type);
runner.start();
boolean expectedStringSeen = false;
boolean testPassedFromLog4j = false;
do {
sleep(1000);
String outContentString = outContent.toString();
String errContentString = errContent.toString();
if(failOnStrings != null) {
for (String failOnString : failOnStrings) {
if (outContentString.contains(failOnString)
|| errContentString.contains(failOnString)) {
LOG.warn("Failing test. Output contained illegal string '" + failOnString + "'");
sendOutput();
// stopping runner.
runner.sendStop();
Assert.fail("Output contained illegal string '" + failOnString + "'");
}
}
}
// check output for the expected terminateAfterString.
if(checkLogForTerminateString) {
LoggingEvent matchedEvent = UtilsTest.getEventContainingString(terminateAfterString);
if(matchedEvent != null) {
testPassedFromLog4j = true;
LOG.info("Found expected output in logging event {}", matchedEvent);
}
}
if (outContentString.contains(terminateAfterString) || errContentString.contains(terminateAfterString) || testPassedFromLog4j ) {
expectedStringSeen = true;
LOG.info("Found expected output in redirected streams");
// send "stop" command to command line interface
LOG.info("RunWithArgs: request runner to stop");
runner.sendStop();
// wait for the thread to stop
try {
runner.join(30000);
}
catch (InterruptedException e) {
LOG.warn("Interrupted while stopping runner", e);
}
LOG.warn("RunWithArgs runner stopped.");
}
else {
// check if thread died
if (!runner.isAlive()) {
if (runner.getReturnValue() != 0) {
Assert.fail("Runner thread died before the test was finished. Return value = "
+ runner.getReturnValue());
} else {
LOG.info("Runner stopped earlier than expected with return value = 0");
}
// leave loop: the runner died, so we can not expect new strings to show up.
break;
}
}
}
while (!expectedStringSeen && System.currentTimeMillis() < deadline);
sendOutput();
Assert.assertTrue("During the timeout period of " + START_TIMEOUT_SECONDS + " seconds the " +
"expected string did not show up", expectedStringSeen);
// check for 0 return code
Assert.assertEquals("Expected return value", returnCode, runner.getReturnValue());
LOG.info("Test was successful");
}
protected static void sendOutput() {
System.setOut(originalStdout);
System.setErr(originalStderr);
LOG.info("Sending stdout content through logger: \n\n{}\n\n", outContent.toString());
LOG.info("Sending stderr content through logger: \n\n{}\n\n", errContent.toString());
}
public static class Runner extends Thread {
private final String[] args;
private int returnValue;
private RunTypes type;
private FlinkYarnSessionCli yCli;
public Runner(String[] args, RunTypes type) {
this.args = args;
this.type = type;
}
public int getReturnValue() {
return returnValue;
}
@Override
public void run() {
switch(type) {
case YARN_SESSION:
yCli = new FlinkYarnSessionCli("", "", false);
returnValue = yCli.run(args);
break;
case CLI_FRONTEND:
try {
CliFrontend cli = new CliFrontend();
returnValue = cli.parseParameters(args);
} catch (Exception e) {
throw new RuntimeException(e);
}
break;
default:
throw new RuntimeException("Unknown type " + type);
}
if(returnValue != 0) {
Assert.fail("The YARN session returned with non-null value="+returnValue);
}
}
public void sendStop() {
if(yCli != null) {
yCli.stop();
}
}
}
// -------------------------- Tear down -------------------------- //
@AfterClass
public static void copyOnTravis() {
// When we are on travis, we copy the tmp files of JUnit (containing the MiniYARNCluster log files)
// to <flinkRoot>/target/flink-yarn-tests-*.
// The files from there are picked up by the ./tools/travis_watchdog.sh script
// to upload them to Amazon S3.
if(isOnTravis()) {
File target = new File("../target" + yarnConfiguration.get(TEST_CLUSTER_NAME_KEY));
if(!target.mkdirs()) {
LOG.warn("Error creating dirs to {}", target);
}
File src = tmp.getRoot();
LOG.info("copying the final files from {} to {}", src.getAbsolutePath(), target.getAbsolutePath());
try {
FileUtils.copyDirectoryToDirectory(src, target);
} catch (IOException e) {
LOG.warn("Error copying the final files from {} to {}: msg: {}", src.getAbsolutePath(), target.getAbsolutePath(), e.getMessage(), e);
}
}
}
public static boolean isOnTravis() {
return System.getenv("TRAVIS") != null && System.getenv("TRAVIS").equals("true");
}
}
|
[
"\"TRAVIS\"",
"\"TRAVIS\""
] |
[] |
[
"TRAVIS"
] |
[]
|
["TRAVIS"]
|
java
| 1 | 0 | |
bndDatePtr.go
|
// Copyright 2014 Rana Ian. All rights reserved.
// Use of this source code is governed by The MIT License
// found in the accompanying LICENSE file.
package ora
/*
#include <oci.h>
#include <stdlib.h>
#include "version.h"
*/
import "C"
import (
"time"
"unsafe"
"github.com/sergeantwolf/ora/date"
)
type bndDatePtr struct {
stmt *Stmt
ocibnd *C.OCIBind
value *Date
ocidate [1]date.Date
nullp
}
func (bnd *bndDatePtr) bind(value *Date, position namedPos, stmt *Stmt) error {
bnd.stmt = stmt
bnd.value = value
bnd.nullp.Set(value == nil || value.IsNull())
if value != nil {
bnd.ocidate[0] = value.Date
}
//bnd.stmt.logF(_drv.Cfg().Log.Stmt.Bind, "bind val=%#v null?=%t datep=%#v (%v)\n", bnd.value, bnd.nullp.IsNull(), bnd.datep, bnd.datep.Get())
ph, phLen, phFree := position.CString()
if ph != nil {
defer phFree()
}
r := C.bindByNameOrPos(
bnd.stmt.ocistmt, //OCIStmt *stmtp,
&bnd.ocibnd, //OCIBind **bindpp,
bnd.stmt.ses.srv.env.ocierr, //OCIError *errhp,
C.ub4(position.Ordinal), //ub4 position,
ph,
phLen,
unsafe.Pointer(&bnd.ocidate[0]), //void *valuep,
C.LENGTH_TYPE(7), //sb8 value_sz,
C.SQLT_DAT, //ub2 dty,
unsafe.Pointer(bnd.nullp.Pointer()), //void *indp,
nil, //ub2 *alenp,
nil, //ub2 *rcodep,
0, //ub4 maxarr_len,
nil, //ub4 *curelep,
C.OCI_DEFAULT) //ub4 mode );
if r == C.OCI_ERROR {
return bnd.stmt.ses.srv.env.ociError()
}
return nil
}
func (bnd *bndDatePtr) setPtr() (err error) {
bnd.stmt.logF(_drv.Cfg().Log.Stmt.Bind, "setPtr val=%#v nullp=%#v datep=%#v (%v)\n", bnd.value, bnd.nullp, bnd.ocidate[0], bnd.ocidate[0].Get())
if bnd.value == nil {
return nil
}
if bnd.nullp.IsNull() {
bnd.value.Set(time.Time{})
return nil
}
bnd.value.Date = bnd.ocidate[0]
return nil
}
func (bnd *bndDatePtr) close() (err error) {
defer func() {
if value := recover(); value != nil {
err = errR(value)
}
}()
stmt := bnd.stmt
bnd.stmt = nil
bnd.ocibnd = nil
bnd.value = nil
bnd.nullp.Free()
stmt.putBnd(bndIdxDatePtr, bnd)
return nil
}
|
[] |
[] |
[] |
[]
|
[]
|
go
| null | null | null |
utils.go
|
//
// Copyright © 2011-2018 Guy M. Allard
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
package stompngo
import (
"fmt"
"io"
"strings"
)
/*
Encode a string per STOMP 1.1+ specifications.
*/
func encode(s string) string {
r := s
for _, tr := range codecValues {
if strings.Index(r, tr.decoded) >= 0 {
r = strings.Replace(r, tr.decoded, tr.encoded, -1)
}
}
return r
}
/*
Decode a string per STOMP 1.1+ specifications.
*/
func decode(s string) string {
r := s
for _, tr := range codecValues {
if strings.Index(r, tr.encoded) >= 0 {
r = strings.Replace(r, tr.encoded, tr.decoded, -1)
}
}
return r
}
/*
A network helper. Read from the wire until a 0x00 byte is encountered.
*/
func readUntilNul(c *Connection) ([]uint8, error) {
c.setReadDeadline()
b, e := c.rdr.ReadBytes(0)
if c.checkReadError(e) != nil {
return b, e
}
if len(b) == 1 {
b = NULLBUFF
} else {
b = b[0 : len(b)-1]
}
return b, e
}
/*
A network helper. Read a full message body with a known length that is
> 0. Then read the trailing 'null' byte expected for STOMP frames.
*/
func readBody(c *Connection, l int) ([]uint8, error) {
b := make([]byte, l)
c.setReadDeadline()
n, e := io.ReadFull(c.rdr, b)
if n < l && n != 0 { // Short read, e is ErrUnexpectedEOF
c.log("SHORT READ", n, l, e)
return b[0 : n-1], e
}
if c.checkReadError(e) != nil { // Other erors
return b, e
}
c.setReadDeadline()
_, _ = c.rdr.ReadByte() // trailing NUL
if c.checkReadError(e) != nil { // Other erors
return b, e
}
return b, e
}
/*
Common Header Validation.
*/
func checkHeaders(h Headers, p string) error {
if h == nil {
return EHDRNIL
}
// Length check
if e := h.Validate(); e != nil {
return e
}
// Empty key / value check
for i := 0; i < len(h); i += 2 {
if h[i] == "" {
return EHDRMTK
}
if p == SPL_10 && h[i+1] == "" {
return EHDRMTV
}
}
// UTF8 check
if p != SPL_10 {
_, e := h.ValidateUTF8()
if e != nil {
return e
}
}
return nil
}
/*
Internal function used by heartbeat initialization.
*/
func max(a, b int64) int64 {
if a > b {
return a
}
return b
}
/*
Debug helper. Get properly formatted destination.
*/
func dumpmd(md MessageData) {
fmt.Printf("Command: %s\n", md.Message.Command)
fmt.Println("Headers:")
for i := 0; i < len(md.Message.Headers); i += 2 {
fmt.Printf("key:%s\t\tvalue:%s\n",
md.Message.Headers[i], md.Message.Headers[i+1])
}
fmt.Printf("Body: %s\n", string(md.Message.Body))
if md.Error != nil {
fmt.Printf("Error: %s\n", md.Error.Error())
} else {
fmt.Println("Error: nil")
}
}
|
[] |
[] |
[] |
[]
|
[]
|
go
| null | null | null |
Experimental/Snake Game.py
|
import random
import turtle
import time
class Square:
def __init__(self, x, y):
self.x = x
self.y = y
def drawself(self, turtle):
# draw a black box at its coordinates, leaving a small gap between cubes
turtle.goto(self.x - 9, self.y - 9)
turtle.begin_fill()
for i in range(4):
turtle.forward(18)
turtle.left(90)
turtle.end_fill()
class Food:
def __init__(self, x, y):
self.x = x
self.y = y
self.state = "ON"
def changelocation(self):
# I haven't programmed it to spawn outside the snake's body yet
self.x = random.randint(0, 20)*20 - 200
self.y = random.randint(0, 20)*20 - 200
def drawself(self, turtle):
# similar to the Square drawself, but blinks on and off
if self.state == "ON":
turtle.goto(self.x - 9, self.y - 9)
turtle.begin_fill()
for i in range(4):
turtle.forward(18)
turtle.left(90)
turtle.end_fill()
def changestate(self):
# controls the blinking
self.state = "OFF" if self.state == "ON" else "ON"
class Snake:
def __init__(self):
self.headposition = [20, 0] # keeps track of where it needs to go next
self.body = [Square(-20, 0), Square(0, 0), Square(20, 0)] # body is a list of squares
self.nextX = 1 # tells the snake which way it's going next
self.nextY = 0
self.crashed = False # I'll use this when I get around to collision detection
self.nextposition = [self.headposition[0] + 20*self.nextX,
self.headposition[1] + 20*self.nextY]
# prepares the next location to add to the snake
def moveOneStep(self):
if Square(self.nextposition[0], self.nextposition[1]) not in self.body:
# attempt (unsuccessful) at collision detection
self.body.append(Square(self.nextposition[0], self.nextposition[1]))
# moves the snake head to the next spot, deleting the tail
del self.body[0]
self.headposition[0], self.headposition[1] = self.body[-1].x, self.body[-1].y
# resets the head and nextposition
self.nextposition = [self.headposition[0] + 20*self.nextX,
self.headposition[1] + 20*self.nextY]
else:
self.crashed = True # more unsuccessful collision detection
def moveup(self): # pretty obvious what these do
self.nextX = 0
self.nextY = 1
def moveleft(self):
self.nextX = -1
self.nextY = 0
def moveright(self):
self.nextX = 1
self.nextY = 0
def movedown(self):
self.nextX = 0
self.nextY = -1
def eatFood(self):
# adds the next spot without deleting the tail, extending the snake by 1
self.body.append(Square(self.nextposition[0], self.nextposition[1]))
self.headposition[0], self.headposition[1] = self.body[-1].x, self.body[-1].y
self.nextposition = [self.headposition[0] + 20*self.nextX,
self.headposition[1] + 20*self.nextY]
def drawself(self, turtle): # draws the whole snake when called
for segment in self.body:
segment.drawself(turtle)
class Game:
def __init__(self):
# game object has a screen, a turtle, a basic snake and a food
self.screen = turtle.Screen()
self.artist = turtle.Turtle()
self.artist.up()
self.artist.hideturtle()
self.snake = Snake()
self.food = Food(100, 0)
self.counter = 0 # this will be used later
self.commandpending = False # as will this
def nextFrame(self):
while True: # now here's where it gets fiddly...
game.screen.listen()
game.screen.onkey(game.snakedown, "Down")
game.screen.onkey(game.snakeup, "Up")
game.screen.onkey(game.snakeleft, "Left")
game.screen.onkey(game.snakeright, "Right")
turtle.tracer(0) # follow it so far?
self.artist.clear()
if self.counter == 5:
# only moves to next frame every 5 loops, this was an attempt to get rid of the turning delay
if (self.snake.nextposition[0], self.snake.nextposition[1]) == (self.food.x, self.food.y):
self.snake.eatFood()
self.food.changelocation()
else:
self.snake.moveOneStep()
self.counter = 0
else:
self.counter += 1
self.food.changestate() # makes the food flash
self.food.drawself(self.artist) # show the food and snake
self.snake.drawself(self.artist)
turtle.update()
self.commandpending = False
time.sleep(0.05)
def snakeup(self):
print("going up") # put this in for debugging purposes
if not self.commandpending:
# should allow only one turn each frame; I don't think it's working
self.snake.moveup()
self.commandpending = True
def snakedown(self):
print("going down")
if not self.commandpending:
self.snake.movedown()
self.commandpending = True
def snakeleft(self):
print("going left")
if not self.commandpending:
self.snake.moveleft()
self.commandpending = True
def snakeright(self):
print("going right")
if not self.commandpending:
self.snake.moveright()
self.commandpending = True
game = Game()
game.nextFrame()
print("game over!")
game.screen.mainloop()
|
[] |
[] |
[] |
[]
|
[]
|
python
| null | null | null |
commands.go
|
package docker
import (
"archive/tar"
"bufio"
"bytes"
"encoding/base64"
"encoding/json"
"errors"
"fmt"
"github.com/dotcloud/docker/archive"
"github.com/dotcloud/docker/auth"
"github.com/dotcloud/docker/engine"
flag "github.com/dotcloud/docker/pkg/mflag"
"github.com/dotcloud/docker/pkg/sysinfo"
"github.com/dotcloud/docker/pkg/term"
"github.com/dotcloud/docker/registry"
"github.com/dotcloud/docker/utils"
"io"
"io/ioutil"
"net"
"net/http"
"net/http/httputil"
"net/url"
"os"
"os/signal"
"path"
"reflect"
"regexp"
"runtime"
"sort"
"strconv"
"strings"
"syscall"
"text/tabwriter"
"text/template"
"time"
)
var (
GITCOMMIT string
VERSION string
)
var (
ErrConnectionRefused = errors.New("Can't connect to docker daemon. Is 'docker -d' running on this host?")
)
func (cli *DockerCli) getMethod(name string) (func(...string) error, bool) {
methodName := "Cmd" + strings.ToUpper(name[:1]) + strings.ToLower(name[1:])
method := reflect.ValueOf(cli).MethodByName(methodName)
if !method.IsValid() {
return nil, false
}
return method.Interface().(func(...string) error), true
}
func ParseCommands(proto, addr string, args ...string) error {
cli := NewDockerCli(os.Stdin, os.Stdout, os.Stderr, proto, addr)
if len(args) > 0 {
method, exists := cli.getMethod(args[0])
if !exists {
fmt.Println("Error: Command not found:", args[0])
return cli.CmdHelp(args[1:]...)
}
return method(args[1:]...)
}
return cli.CmdHelp(args...)
}
func (cli *DockerCli) CmdHelp(args ...string) error {
if len(args) > 0 {
method, exists := cli.getMethod(args[0])
if !exists {
fmt.Fprintf(cli.err, "Error: Command not found: %s\n", args[0])
} else {
method("--help")
return nil
}
}
help := fmt.Sprintf("Usage: docker [OPTIONS] COMMAND [arg...]\n -H=[unix://%s]: tcp://host:port to bind/connect to or unix://path/to/socket to use\n\nA self-sufficient runtime for linux containers.\n\nCommands:\n", DEFAULTUNIXSOCKET)
for _, command := range [][]string{
{"attach", "Attach to a running container"},
{"build", "Build a container from a Dockerfile"},
{"commit", "Create a new image from a container's changes"},
{"cp", "Copy files/folders from the containers filesystem to the host path"},
{"diff", "Inspect changes on a container's filesystem"},
{"events", "Get real time events from the server"},
{"export", "Stream the contents of a container as a tar archive"},
{"history", "Show the history of an image"},
{"images", "List images"},
{"import", "Create a new filesystem image from the contents of a tarball"},
{"info", "Display system-wide information"},
{"insert", "Insert a file in an image"},
{"inspect", "Return low-level information on a container"},
{"kill", "Kill a running container"},
{"load", "Load an image from a tar archive"},
{"login", "Register or Login to the docker registry server"},
{"logs", "Fetch the logs of a container"},
{"port", "Lookup the public-facing port which is NAT-ed to PRIVATE_PORT"},
{"ps", "List containers"},
{"pull", "Pull an image or a repository from the docker registry server"},
{"push", "Push an image or a repository to the docker registry server"},
{"restart", "Restart a running container"},
{"rm", "Remove one or more containers"},
{"rmi", "Remove one or more images"},
{"run", "Run a command in a new container"},
{"save", "Save an image to a tar archive"},
{"search", "Search for an image in the docker index"},
{"start", "Start a stopped container"},
{"stop", "Stop a running container"},
{"tag", "Tag an image into a repository"},
{"top", "Lookup the running processes of a container"},
{"version", "Show the docker version information"},
{"wait", "Block until a container stops, then print its exit code"},
} {
help += fmt.Sprintf(" %-10.10s%s\n", command[0], command[1])
}
fmt.Fprintf(cli.err, "%s\n", help)
return nil
}
func (cli *DockerCli) CmdInsert(args ...string) error {
cmd := cli.Subcmd("insert", "IMAGE URL PATH", "Insert a file from URL in the IMAGE at PATH")
if err := cmd.Parse(args); err != nil {
return nil
}
if cmd.NArg() != 3 {
cmd.Usage()
return nil
}
v := url.Values{}
v.Set("url", cmd.Arg(1))
v.Set("path", cmd.Arg(2))
return cli.stream("POST", "/images/"+cmd.Arg(0)+"/insert?"+v.Encode(), nil, cli.out, nil)
}
// mkBuildContext returns an archive of an empty context with the contents
// of `dockerfile` at the path ./Dockerfile
func MkBuildContext(dockerfile string, files [][2]string) (archive.Archive, error) {
buf := new(bytes.Buffer)
tw := tar.NewWriter(buf)
files = append(files, [2]string{"Dockerfile", dockerfile})
for _, file := range files {
name, content := file[0], file[1]
hdr := &tar.Header{
Name: name,
Size: int64(len(content)),
}
if err := tw.WriteHeader(hdr); err != nil {
return nil, err
}
if _, err := tw.Write([]byte(content)); err != nil {
return nil, err
}
}
if err := tw.Close(); err != nil {
return nil, err
}
return buf, nil
}
func (cli *DockerCli) CmdBuild(args ...string) error {
cmd := cli.Subcmd("build", "[OPTIONS] PATH | URL | -", "Build a new container image from the source code at PATH")
tag := cmd.String([]string{"t", "-tag"}, "", "Repository name (and optionally a tag) to be applied to the resulting image in case of success")
suppressOutput := cmd.Bool([]string{"q", "-quiet"}, false, "Suppress verbose build output")
noCache := cmd.Bool([]string{"#no-cache", "-no-cache"}, false, "Do not use cache when building the image")
rm := cmd.Bool([]string{"#rm", "-rm"}, false, "Remove intermediate containers after a successful build")
if err := cmd.Parse(args); err != nil {
return nil
}
if cmd.NArg() != 1 {
cmd.Usage()
return nil
}
var (
context archive.Archive
isRemote bool
err error
)
if cmd.Arg(0) == "-" {
// As a special case, 'docker build -' will build from an empty context with the
// contents of stdin as a Dockerfile
dockerfile, err := ioutil.ReadAll(cli.in)
if err != nil {
return err
}
context, err = MkBuildContext(string(dockerfile), nil)
} else if utils.IsURL(cmd.Arg(0)) || utils.IsGIT(cmd.Arg(0)) {
isRemote = true
} else {
if _, err := os.Stat(cmd.Arg(0)); err != nil {
return err
}
filename := path.Join(cmd.Arg(0), "Dockerfile")
if _, err = os.Stat(filename); os.IsNotExist(err) {
return fmt.Errorf("no Dockerfile found in %s", cmd.Arg(0))
}
context, err = archive.Tar(cmd.Arg(0), archive.Uncompressed)
}
var body io.Reader
// Setup an upload progress bar
// FIXME: ProgressReader shouldn't be this annoying to use
if context != nil {
sf := utils.NewStreamFormatter(false)
body = utils.ProgressReader(ioutil.NopCloser(context), 0, cli.err, sf, true, "", "Uploading context")
}
// Upload the build context
v := &url.Values{}
v.Set("t", *tag)
if *suppressOutput {
v.Set("q", "1")
}
if isRemote {
v.Set("remote", cmd.Arg(0))
}
if *noCache {
v.Set("nocache", "1")
}
if *rm {
v.Set("rm", "1")
}
cli.LoadConfigFile()
headers := http.Header(make(map[string][]string))
buf, err := json.Marshal(cli.configFile)
if err != nil {
return err
}
headers.Add("X-Registry-Config", base64.URLEncoding.EncodeToString(buf))
if context != nil {
headers.Set("Content-Type", "application/tar")
}
err = cli.stream("POST", fmt.Sprintf("/build?%s", v.Encode()), body, cli.out, headers)
if jerr, ok := err.(*utils.JSONError); ok {
// If no error code is set, default to 1
if jerr.Code == 0 {
jerr.Code = 1
}
return &utils.StatusError{Status: jerr.Message, StatusCode: jerr.Code}
}
return err
}
// 'docker login': login / register a user to registry service.
func (cli *DockerCli) CmdLogin(args ...string) error {
cmd := cli.Subcmd("login", "[OPTIONS] [SERVER]", "Register or Login to a docker registry server, if no server is specified \""+auth.IndexServerAddress()+"\" is the default.")
var username, password, email string
cmd.StringVar(&username, []string{"u", "-username"}, "", "username")
cmd.StringVar(&password, []string{"p", "-password"}, "", "password")
cmd.StringVar(&email, []string{"e", "-email"}, "", "email")
err := cmd.Parse(args)
if err != nil {
return nil
}
serverAddress := auth.IndexServerAddress()
if len(cmd.Args()) > 0 {
serverAddress, err = registry.ExpandAndVerifyRegistryUrl(cmd.Arg(0))
if err != nil {
return err
}
fmt.Fprintf(cli.out, "Login against server at %s\n", serverAddress)
}
promptDefault := func(prompt string, configDefault string) {
if configDefault == "" {
fmt.Fprintf(cli.out, "%s: ", prompt)
} else {
fmt.Fprintf(cli.out, "%s (%s): ", prompt, configDefault)
}
}
readInput := func(in io.Reader, out io.Writer) string {
reader := bufio.NewReader(in)
line, _, err := reader.ReadLine()
if err != nil {
fmt.Fprintln(out, err.Error())
os.Exit(1)
}
return string(line)
}
cli.LoadConfigFile()
authconfig, ok := cli.configFile.Configs[serverAddress]
if !ok {
authconfig = auth.AuthConfig{}
}
if username == "" {
promptDefault("Username", authconfig.Username)
username = readInput(cli.in, cli.out)
if username == "" {
username = authconfig.Username
}
}
if username != authconfig.Username {
if password == "" {
oldState, _ := term.SaveState(cli.terminalFd)
fmt.Fprintf(cli.out, "Password: ")
term.DisableEcho(cli.terminalFd, oldState)
password = readInput(cli.in, cli.out)
fmt.Fprint(cli.out, "\n")
term.RestoreTerminal(cli.terminalFd, oldState)
if password == "" {
return fmt.Errorf("Error : Password Required")
}
}
if email == "" {
promptDefault("Email", authconfig.Email)
email = readInput(cli.in, cli.out)
if email == "" {
email = authconfig.Email
}
}
} else {
password = authconfig.Password
email = authconfig.Email
}
authconfig.Username = username
authconfig.Password = password
authconfig.Email = email
authconfig.ServerAddress = serverAddress
cli.configFile.Configs[serverAddress] = authconfig
body, statusCode, err := readBody(cli.call("POST", "/auth", cli.configFile.Configs[serverAddress], false))
if statusCode == 401 {
delete(cli.configFile.Configs, serverAddress)
auth.SaveConfig(cli.configFile)
return err
}
if err != nil {
return err
}
var out2 engine.Env
err = json.Unmarshal(body, &out2)
if err != nil {
cli.configFile, _ = auth.LoadConfig(os.Getenv("HOME"))
return err
}
auth.SaveConfig(cli.configFile)
if out2.Get("Status") != "" {
fmt.Fprintf(cli.out, "%s\n", out2.Get("Status"))
}
return nil
}
// 'docker wait': block until a container stops
func (cli *DockerCli) CmdWait(args ...string) error {
cmd := cli.Subcmd("wait", "CONTAINER [CONTAINER...]", "Block until a container stops, then print its exit code.")
if err := cmd.Parse(args); err != nil {
return nil
}
if cmd.NArg() < 1 {
cmd.Usage()
return nil
}
var encounteredError error
for _, name := range cmd.Args() {
status, err := waitForExit(cli, name)
if err != nil {
fmt.Fprintf(cli.err, "%s\n", err)
encounteredError = fmt.Errorf("Error: failed to wait one or more containers")
} else {
fmt.Fprintf(cli.out, "%d\n", status)
}
}
return encounteredError
}
// 'docker version': show version information
func (cli *DockerCli) CmdVersion(args ...string) error {
cmd := cli.Subcmd("version", "", "Show the docker version information.")
if err := cmd.Parse(args); err != nil {
return nil
}
if cmd.NArg() > 0 {
cmd.Usage()
return nil
}
if VERSION != "" {
fmt.Fprintf(cli.out, "Client version: %s\n", VERSION)
}
fmt.Fprintf(cli.out, "Go version (client): %s\n", runtime.Version())
if GITCOMMIT != "" {
fmt.Fprintf(cli.out, "Git commit (client): %s\n", GITCOMMIT)
}
body, _, err := readBody(cli.call("GET", "/version", nil, false))
if err != nil {
return err
}
out := engine.NewOutput()
remoteVersion, err := out.AddEnv()
if err != nil {
utils.Errorf("Error reading remote version: %s\n", err)
return err
}
if _, err := out.Write(body); err != nil {
utils.Errorf("Error reading remote version: %s\n", err)
return err
}
out.Close()
fmt.Fprintf(cli.out, "Server version: %s\n", remoteVersion.Get("Version"))
fmt.Fprintf(cli.out, "Git commit (server): %s\n", remoteVersion.Get("GitCommit"))
fmt.Fprintf(cli.out, "Go version (server): %s\n", remoteVersion.Get("GoVersion"))
release := utils.GetReleaseVersion()
if release != "" {
fmt.Fprintf(cli.out, "Last stable version: %s", release)
if (VERSION != "" || remoteVersion.Exists("Version")) && (strings.Trim(VERSION, "-dev") != release || strings.Trim(remoteVersion.Get("Version"), "-dev") != release) {
fmt.Fprintf(cli.out, ", please update docker")
}
fmt.Fprintf(cli.out, "\n")
}
return nil
}
// 'docker info': display system-wide information.
func (cli *DockerCli) CmdInfo(args ...string) error {
cmd := cli.Subcmd("info", "", "Display system-wide information")
if err := cmd.Parse(args); err != nil {
return nil
}
if cmd.NArg() > 0 {
cmd.Usage()
return nil
}
body, _, err := readBody(cli.call("GET", "/info", nil, false))
if err != nil {
return err
}
out := engine.NewOutput()
remoteInfo, err := out.AddEnv()
if err != nil {
return err
}
if _, err := out.Write(body); err != nil {
utils.Errorf("Error reading remote info: %s\n", err)
return err
}
out.Close()
fmt.Fprintf(cli.out, "Containers: %d\n", remoteInfo.GetInt("Containers"))
fmt.Fprintf(cli.out, "Images: %d\n", remoteInfo.GetInt("Images"))
fmt.Fprintf(cli.out, "Driver: %s\n", remoteInfo.Get("Driver"))
var driverStatus [][2]string
if err := remoteInfo.GetJson("DriverStatus", &driverStatus); err != nil {
return err
}
for _, pair := range driverStatus {
fmt.Fprintf(cli.out, " %s: %s\n", pair[0], pair[1])
}
if remoteInfo.GetBool("Debug") || os.Getenv("DEBUG") != "" {
fmt.Fprintf(cli.out, "Debug mode (server): %v\n", remoteInfo.GetBool("Debug"))
fmt.Fprintf(cli.out, "Debug mode (client): %v\n", os.Getenv("DEBUG") != "")
fmt.Fprintf(cli.out, "Fds: %d\n", remoteInfo.GetInt("NFd"))
fmt.Fprintf(cli.out, "Goroutines: %d\n", remoteInfo.GetInt("NGoroutines"))
fmt.Fprintf(cli.out, "Execution Driver: %s\n", remoteInfo.Get("ExecutionDriver"))
fmt.Fprintf(cli.out, "EventsListeners: %d\n", remoteInfo.GetInt("NEventsListener"))
fmt.Fprintf(cli.out, "Kernel Version: %s\n", remoteInfo.Get("KernelVersion"))
if initSha1 := remoteInfo.Get("InitSha1"); initSha1 != "" {
fmt.Fprintf(cli.out, "Init SHA1: %s\n", initSha1)
}
if initPath := remoteInfo.Get("InitPath"); initPath != "" {
fmt.Fprintf(cli.out, "Init Path: %s\n", initPath)
}
}
if len(remoteInfo.GetList("IndexServerAddress")) != 0 {
cli.LoadConfigFile()
u := cli.configFile.Configs[remoteInfo.Get("IndexServerAddress")].Username
if len(u) > 0 {
fmt.Fprintf(cli.out, "Username: %v\n", u)
fmt.Fprintf(cli.out, "Registry: %v\n", remoteInfo.GetList("IndexServerAddress"))
}
}
if !remoteInfo.GetBool("MemoryLimit") {
fmt.Fprintf(cli.err, "WARNING: No memory limit support\n")
}
if !remoteInfo.GetBool("SwapLimit") {
fmt.Fprintf(cli.err, "WARNING: No swap limit support\n")
}
if !remoteInfo.GetBool("IPv4Forwarding") {
fmt.Fprintf(cli.err, "WARNING: IPv4 forwarding is disabled.\n")
}
return nil
}
func (cli *DockerCli) CmdStop(args ...string) error {
cmd := cli.Subcmd("stop", "[OPTIONS] CONTAINER [CONTAINER...]", "Stop a running container (Send SIGTERM, and then SIGKILL after grace period)")
nSeconds := cmd.Int([]string{"t", "-time"}, 10, "Number of seconds to wait for the container to stop before killing it.")
if err := cmd.Parse(args); err != nil {
return nil
}
if cmd.NArg() < 1 {
cmd.Usage()
return nil
}
v := url.Values{}
v.Set("t", strconv.Itoa(*nSeconds))
var encounteredError error
for _, name := range cmd.Args() {
_, _, err := readBody(cli.call("POST", "/containers/"+name+"/stop?"+v.Encode(), nil, false))
if err != nil {
fmt.Fprintf(cli.err, "%s\n", err)
encounteredError = fmt.Errorf("Error: failed to stop one or more containers")
} else {
fmt.Fprintf(cli.out, "%s\n", name)
}
}
return encounteredError
}
func (cli *DockerCli) CmdRestart(args ...string) error {
cmd := cli.Subcmd("restart", "[OPTIONS] CONTAINER [CONTAINER...]", "Restart a running container")
nSeconds := cmd.Int([]string{"t", "-time"}, 10, "Number of seconds to try to stop for before killing the container. Once killed it will then be restarted. Default=10")
if err := cmd.Parse(args); err != nil {
return nil
}
if cmd.NArg() < 1 {
cmd.Usage()
return nil
}
v := url.Values{}
v.Set("t", strconv.Itoa(*nSeconds))
var encounteredError error
for _, name := range cmd.Args() {
_, _, err := readBody(cli.call("POST", "/containers/"+name+"/restart?"+v.Encode(), nil, false))
if err != nil {
fmt.Fprintf(cli.err, "%s\n", err)
encounteredError = fmt.Errorf("Error: failed to restart one or more containers")
} else {
fmt.Fprintf(cli.out, "%s\n", name)
}
}
return encounteredError
}
func (cli *DockerCli) forwardAllSignals(cid string) chan os.Signal {
sigc := make(chan os.Signal, 1)
utils.CatchAll(sigc)
go func() {
for s := range sigc {
if s == syscall.SIGCHLD {
continue
}
if _, _, err := readBody(cli.call("POST", fmt.Sprintf("/containers/%s/kill?signal=%d", cid, s), nil, false)); err != nil {
utils.Debugf("Error sending signal: %s", err)
}
}
}()
return sigc
}
func (cli *DockerCli) CmdStart(args ...string) error {
cmd := cli.Subcmd("start", "CONTAINER [CONTAINER...]", "Restart a stopped container")
attach := cmd.Bool([]string{"a", "-attach"}, false, "Attach container's stdout/stderr and forward all signals to the process")
openStdin := cmd.Bool([]string{"i", "-interactive"}, false, "Attach container's stdin")
if err := cmd.Parse(args); err != nil {
return nil
}
if cmd.NArg() < 1 {
cmd.Usage()
return nil
}
var cErr chan error
var tty bool
if *attach || *openStdin {
if cmd.NArg() > 1 {
return fmt.Errorf("Impossible to start and attach multiple containers at once.")
}
body, _, err := readBody(cli.call("GET", "/containers/"+cmd.Arg(0)+"/json", nil, false))
if err != nil {
return err
}
container := &Container{}
err = json.Unmarshal(body, container)
if err != nil {
return err
}
tty = container.Config.Tty
if !container.Config.Tty {
sigc := cli.forwardAllSignals(cmd.Arg(0))
defer utils.StopCatch(sigc)
}
var in io.ReadCloser
v := url.Values{}
v.Set("stream", "1")
if *openStdin && container.Config.OpenStdin {
v.Set("stdin", "1")
in = cli.in
}
v.Set("stdout", "1")
v.Set("stderr", "1")
cErr = utils.Go(func() error {
return cli.hijack("POST", "/containers/"+cmd.Arg(0)+"/attach?"+v.Encode(), container.Config.Tty, in, cli.out, cli.err, nil)
})
}
var encounteredError error
for _, name := range cmd.Args() {
_, _, err := readBody(cli.call("POST", "/containers/"+name+"/start", nil, false))
if err != nil {
if !*attach || !*openStdin {
fmt.Fprintf(cli.err, "%s\n", err)
encounteredError = fmt.Errorf("Error: failed to start one or more containers")
}
} else {
if !*attach || !*openStdin {
fmt.Fprintf(cli.out, "%s\n", name)
}
}
}
if encounteredError != nil {
if *openStdin || *attach {
cli.in.Close()
<-cErr
}
return encounteredError
}
if *openStdin || *attach {
if tty && cli.isTerminal {
if err := cli.monitorTtySize(cmd.Arg(0)); err != nil {
utils.Errorf("Error monitoring TTY size: %s\n", err)
}
}
return <-cErr
}
return nil
}
func (cli *DockerCli) CmdInspect(args ...string) error {
cmd := cli.Subcmd("inspect", "CONTAINER|IMAGE [CONTAINER|IMAGE...]", "Return low-level information on a container/image")
tmplStr := cmd.String([]string{"f", "#format", "-format"}, "", "Format the output using the given go template.")
if err := cmd.Parse(args); err != nil {
return nil
}
if cmd.NArg() < 1 {
cmd.Usage()
return nil
}
var tmpl *template.Template
if *tmplStr != "" {
var err error
if tmpl, err = template.New("").Parse(*tmplStr); err != nil {
fmt.Fprintf(cli.err, "Template parsing error: %v\n", err)
return &utils.StatusError{StatusCode: 64,
Status: "Template parsing error: " + err.Error()}
}
}
indented := new(bytes.Buffer)
indented.WriteByte('[')
status := 0
for _, name := range cmd.Args() {
obj, _, err := readBody(cli.call("GET", "/containers/"+name+"/json", nil, false))
if err != nil {
obj, _, err = readBody(cli.call("GET", "/images/"+name+"/json", nil, false))
if err != nil {
if strings.Contains(err.Error(), "No such") {
fmt.Fprintf(cli.err, "Error: No such image or container: %s\n", name)
} else {
fmt.Fprintf(cli.err, "%s", err)
}
status = 1
continue
}
}
if tmpl == nil {
if err = json.Indent(indented, obj, "", " "); err != nil {
fmt.Fprintf(cli.err, "%s\n", err)
status = 1
continue
}
} else {
// Has template, will render
var value interface{}
if err := json.Unmarshal(obj, &value); err != nil {
fmt.Fprintf(cli.err, "%s\n", err)
status = 1
continue
}
if err := tmpl.Execute(cli.out, value); err != nil {
return err
}
cli.out.Write([]byte{'\n'})
}
indented.WriteString(",")
}
if indented.Len() > 1 {
// Remove trailing ','
indented.Truncate(indented.Len() - 1)
}
indented.WriteByte(']')
if tmpl == nil {
if _, err := io.Copy(cli.out, indented); err != nil {
return err
}
}
if status != 0 {
return &utils.StatusError{StatusCode: status}
}
return nil
}
func (cli *DockerCli) CmdTop(args ...string) error {
cmd := cli.Subcmd("top", "CONTAINER [ps OPTIONS]", "Lookup the running processes of a container")
if err := cmd.Parse(args); err != nil {
return nil
}
if cmd.NArg() == 0 {
cmd.Usage()
return nil
}
val := url.Values{}
if cmd.NArg() > 1 {
val.Set("ps_args", strings.Join(cmd.Args()[1:], " "))
}
body, _, err := readBody(cli.call("GET", "/containers/"+cmd.Arg(0)+"/top?"+val.Encode(), nil, false))
if err != nil {
return err
}
procs := APITop{}
err = json.Unmarshal(body, &procs)
if err != nil {
return err
}
w := tabwriter.NewWriter(cli.out, 20, 1, 3, ' ', 0)
fmt.Fprintln(w, strings.Join(procs.Titles, "\t"))
for _, proc := range procs.Processes {
fmt.Fprintln(w, strings.Join(proc, "\t"))
}
w.Flush()
return nil
}
func (cli *DockerCli) CmdPort(args ...string) error {
cmd := cli.Subcmd("port", "CONTAINER PRIVATE_PORT", "Lookup the public-facing port which is NAT-ed to PRIVATE_PORT")
if err := cmd.Parse(args); err != nil {
return nil
}
if cmd.NArg() != 2 {
cmd.Usage()
return nil
}
port := cmd.Arg(1)
proto := "tcp"
parts := strings.SplitN(port, "/", 2)
if len(parts) == 2 && len(parts[1]) != 0 {
port = parts[0]
proto = parts[1]
}
body, _, err := readBody(cli.call("GET", "/containers/"+cmd.Arg(0)+"/json", nil, false))
if err != nil {
return err
}
var out Container
err = json.Unmarshal(body, &out)
if err != nil {
return err
}
if frontends, exists := out.NetworkSettings.Ports[Port(port+"/"+proto)]; exists && frontends != nil {
for _, frontend := range frontends {
fmt.Fprintf(cli.out, "%s:%s\n", frontend.HostIp, frontend.HostPort)
}
} else {
return fmt.Errorf("Error: No public port '%s' published for %s", cmd.Arg(1), cmd.Arg(0))
}
return nil
}
// 'docker rmi IMAGE' removes all images with the name IMAGE
func (cli *DockerCli) CmdRmi(args ...string) error {
cmd := cli.Subcmd("rmi", "IMAGE [IMAGE...]", "Remove one or more images")
if err := cmd.Parse(args); err != nil {
return nil
}
if cmd.NArg() < 1 {
cmd.Usage()
return nil
}
var encounteredError error
for _, name := range cmd.Args() {
body, _, err := readBody(cli.call("DELETE", "/images/"+name, nil, false))
if err != nil {
fmt.Fprintf(cli.err, "%s\n", err)
encounteredError = fmt.Errorf("Error: failed to remove one or more images")
} else {
outs := engine.NewTable("Created", 0)
if _, err := outs.ReadListFrom(body); err != nil {
fmt.Fprintf(cli.err, "%s\n", err)
encounteredError = fmt.Errorf("Error: failed to remove one or more images")
continue
}
for _, out := range outs.Data {
if out.Get("Deleted") != "" {
fmt.Fprintf(cli.out, "Deleted: %s\n", out.Get("Deleted"))
} else {
fmt.Fprintf(cli.out, "Untagged: %s\n", out.Get("Untagged"))
}
}
}
}
return encounteredError
}
func (cli *DockerCli) CmdHistory(args ...string) error {
cmd := cli.Subcmd("history", "[OPTIONS] IMAGE", "Show the history of an image")
quiet := cmd.Bool([]string{"q", "-quiet"}, false, "only show numeric IDs")
noTrunc := cmd.Bool([]string{"#notrunc", "-no-trunc"}, false, "Don't truncate output")
if err := cmd.Parse(args); err != nil {
return nil
}
if cmd.NArg() != 1 {
cmd.Usage()
return nil
}
body, _, err := readBody(cli.call("GET", "/images/"+cmd.Arg(0)+"/history", nil, false))
if err != nil {
return err
}
outs := engine.NewTable("Created", 0)
if _, err := outs.ReadListFrom(body); err != nil {
return err
}
w := tabwriter.NewWriter(cli.out, 20, 1, 3, ' ', 0)
if !*quiet {
fmt.Fprintln(w, "IMAGE\tCREATED\tCREATED BY\tSIZE")
}
for _, out := range outs.Data {
outID := out.Get("ID")
if !*quiet {
if *noTrunc {
fmt.Fprintf(w, "%s\t", outID)
} else {
fmt.Fprintf(w, "%s\t", utils.TruncateID(outID))
}
fmt.Fprintf(w, "%s ago\t", utils.HumanDuration(time.Now().UTC().Sub(time.Unix(out.GetInt64("Created"), 0))))
if *noTrunc {
fmt.Fprintf(w, "%s\t", out.Get("CreatedBy"))
} else {
fmt.Fprintf(w, "%s\t", utils.Trunc(out.Get("CreatedBy"), 45))
}
fmt.Fprintf(w, "%s\n", utils.HumanSize(out.GetInt64("Size")))
} else {
if *noTrunc {
fmt.Fprintln(w, outID)
} else {
fmt.Fprintln(w, utils.TruncateID(outID))
}
}
}
w.Flush()
return nil
}
func (cli *DockerCli) CmdRm(args ...string) error {
cmd := cli.Subcmd("rm", "[OPTIONS] CONTAINER [CONTAINER...]", "Remove one or more containers")
v := cmd.Bool([]string{"v", "-volumes"}, false, "Remove the volumes associated to the container")
link := cmd.Bool([]string{"l", "#link", "-link"}, false, "Remove the specified link and not the underlying container")
if err := cmd.Parse(args); err != nil {
return nil
}
if cmd.NArg() < 1 {
cmd.Usage()
return nil
}
val := url.Values{}
if *v {
val.Set("v", "1")
}
if *link {
val.Set("link", "1")
}
var encounteredError error
for _, name := range cmd.Args() {
_, _, err := readBody(cli.call("DELETE", "/containers/"+name+"?"+val.Encode(), nil, false))
if err != nil {
fmt.Fprintf(cli.err, "%s\n", err)
encounteredError = fmt.Errorf("Error: failed to remove one or more containers")
} else {
fmt.Fprintf(cli.out, "%s\n", name)
}
}
return encounteredError
}
// 'docker kill NAME' kills a running container
func (cli *DockerCli) CmdKill(args ...string) error {
cmd := cli.Subcmd("kill", "[OPTIONS] CONTAINER [CONTAINER...]", "Kill a running container (send SIGKILL, or specified signal)")
signal := cmd.String([]string{"s", "-signal"}, "KILL", "Signal to send to the container")
if err := cmd.Parse(args); err != nil {
return nil
}
if cmd.NArg() < 1 {
cmd.Usage()
return nil
}
var encounteredError error
for _, name := range cmd.Args() {
if _, _, err := readBody(cli.call("POST", fmt.Sprintf("/containers/%s/kill?signal=%s", name, *signal), nil, false)); err != nil {
fmt.Fprintf(cli.err, "%s\n", err)
encounteredError = fmt.Errorf("Error: failed to kill one or more containers")
} else {
fmt.Fprintf(cli.out, "%s\n", name)
}
}
return encounteredError
}
func (cli *DockerCli) CmdImport(args ...string) error {
cmd := cli.Subcmd("import", "URL|- [REPOSITORY[:TAG]]", "Create an empty filesystem image and import the contents of the tarball (.tar, .tar.gz, .tgz, .bzip, .tar.xz, .txz) into it, then optionally tag it.")
if err := cmd.Parse(args); err != nil {
return nil
}
if cmd.NArg() < 1 {
cmd.Usage()
return nil
}
var src, repository, tag string
if cmd.NArg() == 3 {
fmt.Fprintf(cli.err, "[DEPRECATED] The format 'URL|- [REPOSITORY [TAG]]' as been deprecated. Please use URL|- [REPOSITORY[:TAG]]\n")
src, repository, tag = cmd.Arg(0), cmd.Arg(1), cmd.Arg(2)
} else {
src = cmd.Arg(0)
repository, tag = utils.ParseRepositoryTag(cmd.Arg(1))
}
v := url.Values{}
v.Set("repo", repository)
v.Set("tag", tag)
v.Set("fromSrc", src)
var in io.Reader
if src == "-" {
in = cli.in
}
return cli.stream("POST", "/images/create?"+v.Encode(), in, cli.out, nil)
}
func (cli *DockerCli) CmdPush(args ...string) error {
cmd := cli.Subcmd("push", "NAME", "Push an image or a repository to the registry")
if err := cmd.Parse(args); err != nil {
return nil
}
name := cmd.Arg(0)
if name == "" {
cmd.Usage()
return nil
}
cli.LoadConfigFile()
// Resolve the Repository name from fqn to endpoint + name
endpoint, _, err := registry.ResolveRepositoryName(name)
if err != nil {
return err
}
// Resolve the Auth config relevant for this server
authConfig := cli.configFile.ResolveAuthConfig(endpoint)
// If we're not using a custom registry, we know the restrictions
// applied to repository names and can warn the user in advance.
// Custom repositories can have different rules, and we must also
// allow pushing by image ID.
if len(strings.SplitN(name, "/", 2)) == 1 {
username := cli.configFile.Configs[auth.IndexServerAddress()].Username
if username == "" {
username = "<user>"
}
return fmt.Errorf("Impossible to push a \"root\" repository. Please rename your repository in <user>/<repo> (ex: %s/%s)", username, name)
}
v := url.Values{}
push := func(authConfig auth.AuthConfig) error {
buf, err := json.Marshal(authConfig)
if err != nil {
return err
}
registryAuthHeader := []string{
base64.URLEncoding.EncodeToString(buf),
}
return cli.stream("POST", "/images/"+name+"/push?"+v.Encode(), nil, cli.out, map[string][]string{
"X-Registry-Auth": registryAuthHeader,
})
}
if err := push(authConfig); err != nil {
if err.Error() == registry.ErrLoginRequired.Error() {
fmt.Fprintln(cli.out, "\nPlease login prior to push:")
if err := cli.CmdLogin(endpoint); err != nil {
return err
}
authConfig := cli.configFile.ResolveAuthConfig(endpoint)
return push(authConfig)
}
return err
}
return nil
}
func (cli *DockerCli) CmdPull(args ...string) error {
cmd := cli.Subcmd("pull", "NAME", "Pull an image or a repository from the registry")
tag := cmd.String([]string{"t", "-tag"}, "", "Download tagged image in repository")
if err := cmd.Parse(args); err != nil {
return nil
}
if cmd.NArg() != 1 {
cmd.Usage()
return nil
}
remote, parsedTag := utils.ParseRepositoryTag(cmd.Arg(0))
if *tag == "" {
*tag = parsedTag
}
// Resolve the Repository name from fqn to endpoint + name
endpoint, _, err := registry.ResolveRepositoryName(remote)
if err != nil {
return err
}
cli.LoadConfigFile()
// Resolve the Auth config relevant for this server
authConfig := cli.configFile.ResolveAuthConfig(endpoint)
v := url.Values{}
v.Set("fromImage", remote)
v.Set("tag", *tag)
pull := func(authConfig auth.AuthConfig) error {
buf, err := json.Marshal(authConfig)
if err != nil {
return err
}
registryAuthHeader := []string{
base64.URLEncoding.EncodeToString(buf),
}
return cli.stream("POST", "/images/create?"+v.Encode(), nil, cli.out, map[string][]string{
"X-Registry-Auth": registryAuthHeader,
})
}
if err := pull(authConfig); err != nil {
if err.Error() == registry.ErrLoginRequired.Error() {
fmt.Fprintln(cli.out, "\nPlease login prior to pull:")
if err := cli.CmdLogin(endpoint); err != nil {
return err
}
authConfig := cli.configFile.ResolveAuthConfig(endpoint)
return pull(authConfig)
}
return err
}
return nil
}
func (cli *DockerCli) CmdImages(args ...string) error {
cmd := cli.Subcmd("images", "[OPTIONS] [NAME]", "List images")
quiet := cmd.Bool([]string{"q", "-quiet"}, false, "only show numeric IDs")
all := cmd.Bool([]string{"a", "-all"}, false, "show all images (by default filter out the intermediate images used to build)")
noTrunc := cmd.Bool([]string{"#notrunc", "-no-trunc"}, false, "Don't truncate output")
flViz := cmd.Bool([]string{"v", "#viz", "-viz"}, false, "output graph in graphviz format")
flTree := cmd.Bool([]string{"t", "#tree", "-tree"}, false, "output graph in tree format")
if err := cmd.Parse(args); err != nil {
return nil
}
if cmd.NArg() > 1 {
cmd.Usage()
return nil
}
filter := cmd.Arg(0)
if *flViz || *flTree {
body, _, err := readBody(cli.call("GET", "/images/json?all=1", nil, false))
if err != nil {
return err
}
outs := engine.NewTable("Created", 0)
if _, err := outs.ReadListFrom(body); err != nil {
return err
}
var (
printNode func(cli *DockerCli, noTrunc bool, image *engine.Env, prefix string)
startImage *engine.Env
roots = engine.NewTable("Created", outs.Len())
byParent = make(map[string]*engine.Table)
)
for _, image := range outs.Data {
if image.Get("ParentId") == "" {
roots.Add(image)
} else {
if children, exists := byParent[image.Get("ParentId")]; exists {
children.Add(image)
} else {
byParent[image.Get("ParentId")] = engine.NewTable("Created", 1)
byParent[image.Get("ParentId")].Add(image)
}
}
if filter != "" {
if filter == image.Get("ID") || filter == utils.TruncateID(image.Get("ID")) {
startImage = image
}
for _, repotag := range image.GetList("RepoTags") {
if repotag == filter {
startImage = image
}
}
}
}
if *flViz {
fmt.Fprintf(cli.out, "digraph docker {\n")
printNode = (*DockerCli).printVizNode
} else {
printNode = (*DockerCli).printTreeNode
}
if startImage != nil {
root := engine.NewTable("Created", 1)
root.Add(startImage)
cli.WalkTree(*noTrunc, root, byParent, "", printNode)
} else if filter == "" {
cli.WalkTree(*noTrunc, roots, byParent, "", printNode)
}
if *flViz {
fmt.Fprintf(cli.out, " base [style=invisible]\n}\n")
}
} else {
v := url.Values{}
if cmd.NArg() == 1 {
v.Set("filter", filter)
}
if *all {
v.Set("all", "1")
}
body, _, err := readBody(cli.call("GET", "/images/json?"+v.Encode(), nil, false))
if err != nil {
return err
}
outs := engine.NewTable("Created", 0)
if _, err := outs.ReadListFrom(body); err != nil {
return err
}
w := tabwriter.NewWriter(cli.out, 20, 1, 3, ' ', 0)
if !*quiet {
fmt.Fprintln(w, "REPOSITORY\tTAG\tIMAGE ID\tCREATED\tVIRTUAL SIZE")
}
for _, out := range outs.Data {
for _, repotag := range out.GetList("RepoTags") {
repo, tag := utils.ParseRepositoryTag(repotag)
outID := out.Get("ID")
if !*noTrunc {
outID = utils.TruncateID(outID)
}
if !*quiet {
fmt.Fprintf(w, "%s\t%s\t%s\t%s ago\t%s\n", repo, tag, outID, utils.HumanDuration(time.Now().UTC().Sub(time.Unix(out.GetInt64("Created"), 0))), utils.HumanSize(out.GetInt64("VirtualSize")))
} else {
fmt.Fprintln(w, outID)
}
}
}
if !*quiet {
w.Flush()
}
}
return nil
}
func (cli *DockerCli) WalkTree(noTrunc bool, images *engine.Table, byParent map[string]*engine.Table, prefix string, printNode func(cli *DockerCli, noTrunc bool, image *engine.Env, prefix string)) {
length := images.Len()
if length > 1 {
for index, image := range images.Data {
if index+1 == length {
printNode(cli, noTrunc, image, prefix+"└─")
if subimages, exists := byParent[image.Get("ID")]; exists {
cli.WalkTree(noTrunc, subimages, byParent, prefix+" ", printNode)
}
} else {
printNode(cli, noTrunc, image, prefix+"\u251C─")
if subimages, exists := byParent[image.Get("ID")]; exists {
cli.WalkTree(noTrunc, subimages, byParent, prefix+"\u2502 ", printNode)
}
}
}
} else {
for _, image := range images.Data {
printNode(cli, noTrunc, image, prefix+"└─")
if subimages, exists := byParent[image.Get("ID")]; exists {
cli.WalkTree(noTrunc, subimages, byParent, prefix+" ", printNode)
}
}
}
}
func (cli *DockerCli) printVizNode(noTrunc bool, image *engine.Env, prefix string) {
var (
imageID string
parentID string
)
if noTrunc {
imageID = image.Get("ID")
parentID = image.Get("ParentId")
} else {
imageID = utils.TruncateID(image.Get("ID"))
parentID = utils.TruncateID(image.Get("ParentId"))
}
if parentID == "" {
fmt.Fprintf(cli.out, " base -> \"%s\" [style=invis]\n", imageID)
} else {
fmt.Fprintf(cli.out, " \"%s\" -> \"%s\"\n", parentID, imageID)
}
if image.GetList("RepoTags")[0] != "<none>:<none>" {
fmt.Fprintf(cli.out, " \"%s\" [label=\"%s\\n%s\",shape=box,fillcolor=\"paleturquoise\",style=\"filled,rounded\"];\n",
imageID, imageID, strings.Join(image.GetList("RepoTags"), "\\n"))
}
}
func (cli *DockerCli) printTreeNode(noTrunc bool, image *engine.Env, prefix string) {
var imageID string
if noTrunc {
imageID = image.Get("ID")
} else {
imageID = utils.TruncateID(image.Get("ID"))
}
fmt.Fprintf(cli.out, "%s%s Virtual Size: %s", prefix, imageID, utils.HumanSize(image.GetInt64("VirtualSize")))
if image.GetList("RepoTags")[0] != "<none>:<none>" {
fmt.Fprintf(cli.out, " Tags: %s\n", strings.Join(image.GetList("RepoTags"), ", "))
} else {
fmt.Fprint(cli.out, "\n")
}
}
func displayablePorts(ports *engine.Table) string {
result := []string{}
for _, port := range ports.Data {
if port.Get("IP") == "" {
result = append(result, fmt.Sprintf("%d/%s", port.GetInt("PublicPort"), port.Get("Type")))
} else {
result = append(result, fmt.Sprintf("%s:%d->%d/%s", port.Get("IP"), port.GetInt("PublicPort"), port.GetInt("PrivatePort"), port.Get("Type")))
}
}
sort.Strings(result)
return strings.Join(result, ", ")
}
func (cli *DockerCli) CmdPs(args ...string) error {
cmd := cli.Subcmd("ps", "[OPTIONS]", "List containers")
quiet := cmd.Bool([]string{"q", "-quiet"}, false, "Only display numeric IDs")
size := cmd.Bool([]string{"s", "-size"}, false, "Display sizes")
all := cmd.Bool([]string{"a", "-all"}, false, "Show all containers. Only running containers are shown by default.")
noTrunc := cmd.Bool([]string{"#notrunc", "-no-trunc"}, false, "Don't truncate output")
nLatest := cmd.Bool([]string{"l", "-latest"}, false, "Show only the latest created container, include non-running ones.")
since := cmd.String([]string{"#sinceId", "-since-id"}, "", "Show only containers created since Id, include non-running ones.")
before := cmd.String([]string{"#beforeId", "-before-id"}, "", "Show only container created before Id, include non-running ones.")
last := cmd.Int([]string{"n"}, -1, "Show n last created containers, include non-running ones.")
if err := cmd.Parse(args); err != nil {
return nil
}
v := url.Values{}
if *last == -1 && *nLatest {
*last = 1
}
if *all {
v.Set("all", "1")
}
if *last != -1 {
v.Set("limit", strconv.Itoa(*last))
}
if *since != "" {
v.Set("since", *since)
}
if *before != "" {
v.Set("before", *before)
}
if *size {
v.Set("size", "1")
}
body, _, err := readBody(cli.call("GET", "/containers/json?"+v.Encode(), nil, false))
if err != nil {
return err
}
outs := engine.NewTable("Created", 0)
if _, err := outs.ReadListFrom(body); err != nil {
return err
}
w := tabwriter.NewWriter(cli.out, 20, 1, 3, ' ', 0)
if !*quiet {
fmt.Fprint(w, "CONTAINER ID\tIMAGE\tCOMMAND\tCREATED\tSTATUS\tPORTS\tNAMES")
if *size {
fmt.Fprintln(w, "\tSIZE")
} else {
fmt.Fprint(w, "\n")
}
}
for _, out := range outs.Data {
var (
outID = out.Get("ID")
outNames = out.GetList("Names")
)
if !*noTrunc {
outID = utils.TruncateID(outID)
}
// Remove the leading / from the names
for i := 0; i < len(outNames); i++ {
outNames[i] = outNames[i][1:]
}
if !*quiet {
var (
outCommand = out.Get("Command")
ports = engine.NewTable("", 0)
)
if !*noTrunc {
outCommand = utils.Trunc(outCommand, 20)
}
ports.ReadListFrom([]byte(out.Get("Ports")))
fmt.Fprintf(w, "%s\t%s\t%s\t%s ago\t%s\t%s\t%s\t", outID, out.Get("Image"), outCommand, utils.HumanDuration(time.Now().UTC().Sub(time.Unix(out.GetInt64("Created"), 0))), out.Get("Status"), displayablePorts(ports), strings.Join(outNames, ","))
if *size {
if out.GetInt("SizeRootFs") > 0 {
fmt.Fprintf(w, "%s (virtual %s)\n", utils.HumanSize(out.GetInt64("SizeRw")), utils.HumanSize(out.GetInt64("SizeRootFs")))
} else {
fmt.Fprintf(w, "%s\n", utils.HumanSize(out.GetInt64("SizeRw")))
}
} else {
fmt.Fprint(w, "\n")
}
} else {
fmt.Fprintln(w, outID)
}
}
if !*quiet {
w.Flush()
}
return nil
}
func (cli *DockerCli) CmdCommit(args ...string) error {
cmd := cli.Subcmd("commit", "[OPTIONS] CONTAINER [REPOSITORY[:TAG]]", "Create a new image from a container's changes")
flComment := cmd.String([]string{"m", "-message"}, "", "Commit message")
flAuthor := cmd.String([]string{"a", "#author", "-author"}, "", "Author (eg. \"John Hannibal Smith <[email protected]>\"")
flConfig := cmd.String([]string{"#run", "-run"}, "", "Config automatically applied when the image is run. "+`(ex: -run='{"Cmd": ["cat", "/world"], "PortSpecs": ["22"]}')`)
if err := cmd.Parse(args); err != nil {
return nil
}
var name, repository, tag string
if cmd.NArg() == 3 {
fmt.Fprintf(cli.err, "[DEPRECATED] The format 'CONTAINER [REPOSITORY [TAG]]' as been deprecated. Please use CONTAINER [REPOSITORY[:TAG]]\n")
name, repository, tag = cmd.Arg(0), cmd.Arg(1), cmd.Arg(2)
} else {
name = cmd.Arg(0)
repository, tag = utils.ParseRepositoryTag(cmd.Arg(1))
}
if name == "" {
cmd.Usage()
return nil
}
v := url.Values{}
v.Set("container", name)
v.Set("repo", repository)
v.Set("tag", tag)
v.Set("comment", *flComment)
v.Set("author", *flAuthor)
var config *Config
if *flConfig != "" {
config = &Config{}
if err := json.Unmarshal([]byte(*flConfig), config); err != nil {
return err
}
}
body, _, err := readBody(cli.call("POST", "/commit?"+v.Encode(), config, false))
if err != nil {
return err
}
apiID := &APIID{}
err = json.Unmarshal(body, apiID)
if err != nil {
return err
}
fmt.Fprintf(cli.out, "%s\n", apiID.ID)
return nil
}
func (cli *DockerCli) CmdEvents(args ...string) error {
cmd := cli.Subcmd("events", "[OPTIONS]", "Get real time events from the server")
since := cmd.String([]string{"#since", "-since"}, "", "Show previously created events and then stream.")
if err := cmd.Parse(args); err != nil {
return nil
}
if cmd.NArg() != 0 {
cmd.Usage()
return nil
}
v := url.Values{}
if *since != "" {
loc := time.FixedZone(time.Now().Zone())
format := "2006-01-02 15:04:05 -0700 MST"
if len(*since) < len(format) {
format = format[:len(*since)]
}
if t, err := time.ParseInLocation(format, *since, loc); err == nil {
v.Set("since", strconv.FormatInt(t.Unix(), 10))
} else {
v.Set("since", *since)
}
}
if err := cli.stream("GET", "/events?"+v.Encode(), nil, cli.out, nil); err != nil {
return err
}
return nil
}
func (cli *DockerCli) CmdExport(args ...string) error {
cmd := cli.Subcmd("export", "CONTAINER", "Export the contents of a filesystem as a tar archive to STDOUT")
if err := cmd.Parse(args); err != nil {
return nil
}
if cmd.NArg() != 1 {
cmd.Usage()
return nil
}
if err := cli.stream("GET", "/containers/"+cmd.Arg(0)+"/export", nil, cli.out, nil); err != nil {
return err
}
return nil
}
func (cli *DockerCli) CmdDiff(args ...string) error {
cmd := cli.Subcmd("diff", "CONTAINER", "Inspect changes on a container's filesystem")
if err := cmd.Parse(args); err != nil {
return nil
}
if cmd.NArg() != 1 {
cmd.Usage()
return nil
}
body, _, err := readBody(cli.call("GET", "/containers/"+cmd.Arg(0)+"/changes", nil, false))
if err != nil {
return err
}
outs := engine.NewTable("", 0)
if _, err := outs.ReadListFrom(body); err != nil {
return err
}
for _, change := range outs.Data {
var kind string
switch change.GetInt("Kind") {
case archive.ChangeModify:
kind = "C"
case archive.ChangeAdd:
kind = "A"
case archive.ChangeDelete:
kind = "D"
}
fmt.Fprintf(cli.out, "%s %s\n", kind, change.Get("Path"))
}
return nil
}
func (cli *DockerCli) CmdLogs(args ...string) error {
cmd := cli.Subcmd("logs", "CONTAINER", "Fetch the logs of a container")
follow := cmd.Bool([]string{"f", "-follow"}, false, "Follow log output")
if err := cmd.Parse(args); err != nil {
return nil
}
if cmd.NArg() != 1 {
cmd.Usage()
return nil
}
name := cmd.Arg(0)
body, _, err := readBody(cli.call("GET", "/containers/"+name+"/json", nil, false))
if err != nil {
return err
}
container := &Container{}
err = json.Unmarshal(body, container)
if err != nil {
return err
}
v := url.Values{}
v.Set("logs", "1")
v.Set("stdout", "1")
v.Set("stderr", "1")
if *follow && container.State.Running {
v.Set("stream", "1")
}
if err := cli.hijack("POST", "/containers/"+name+"/attach?"+v.Encode(), container.Config.Tty, nil, cli.out, cli.err, nil); err != nil {
return err
}
return nil
}
func (cli *DockerCli) CmdAttach(args ...string) error {
cmd := cli.Subcmd("attach", "[OPTIONS] CONTAINER", "Attach to a running container")
noStdin := cmd.Bool([]string{"#nostdin", "-no-stdin"}, false, "Do not attach stdin")
proxy := cmd.Bool([]string{"#sig-proxy", "-sig-proxy"}, true, "Proxify all received signal to the process (even in non-tty mode)")
if err := cmd.Parse(args); err != nil {
return nil
}
if cmd.NArg() != 1 {
cmd.Usage()
return nil
}
name := cmd.Arg(0)
body, _, err := readBody(cli.call("GET", "/containers/"+name+"/json", nil, false))
if err != nil {
return err
}
container := &Container{}
err = json.Unmarshal(body, container)
if err != nil {
return err
}
if !container.State.IsRunning() {
return fmt.Errorf("Impossible to attach to a stopped container, start it first")
}
if container.Config.Tty && cli.isTerminal {
if err := cli.monitorTtySize(cmd.Arg(0)); err != nil {
utils.Debugf("Error monitoring TTY size: %s", err)
}
}
var in io.ReadCloser
v := url.Values{}
v.Set("stream", "1")
if !*noStdin && container.Config.OpenStdin {
v.Set("stdin", "1")
in = cli.in
}
v.Set("stdout", "1")
v.Set("stderr", "1")
if *proxy && !container.Config.Tty {
sigc := cli.forwardAllSignals(cmd.Arg(0))
defer utils.StopCatch(sigc)
}
if err := cli.hijack("POST", "/containers/"+cmd.Arg(0)+"/attach?"+v.Encode(), container.Config.Tty, in, cli.out, cli.err, nil); err != nil {
return err
}
_, status, err := getExitCode(cli, cmd.Arg(0))
if err != nil {
return err
}
if status != 0 {
return &utils.StatusError{StatusCode: status}
}
return nil
}
func (cli *DockerCli) CmdSearch(args ...string) error {
cmd := cli.Subcmd("search", "TERM", "Search the docker index for images")
noTrunc := cmd.Bool([]string{"#notrunc", "-no-trunc"}, false, "Don't truncate output")
trusted := cmd.Bool([]string{"t", "#trusted", "-trusted"}, false, "Only show trusted builds")
stars := cmd.Int([]string{"s", "#stars", "-stars"}, 0, "Only displays with at least xxx stars")
if err := cmd.Parse(args); err != nil {
return nil
}
if cmd.NArg() != 1 {
cmd.Usage()
return nil
}
v := url.Values{}
v.Set("term", cmd.Arg(0))
body, _, err := readBody(cli.call("GET", "/images/search?"+v.Encode(), nil, false))
if err != nil {
return err
}
outs := engine.NewTable("star_count", 0)
if _, err := outs.ReadListFrom(body); err != nil {
return err
}
w := tabwriter.NewWriter(cli.out, 10, 1, 3, ' ', 0)
fmt.Fprintf(w, "NAME\tDESCRIPTION\tSTARS\tOFFICIAL\tTRUSTED\n")
for _, out := range outs.Data {
if (*trusted && !out.GetBool("is_trusted")) || (*stars > out.GetInt("star_count")) {
continue
}
desc := strings.Replace(out.Get("description"), "\n", " ", -1)
desc = strings.Replace(desc, "\r", " ", -1)
if !*noTrunc && len(desc) > 45 {
desc = utils.Trunc(desc, 42) + "..."
}
fmt.Fprintf(w, "%s\t%s\t%d\t", out.Get("name"), desc, out.GetInt("star_count"))
if out.GetBool("is_official") {
fmt.Fprint(w, "[OK]")
}
fmt.Fprint(w, "\t")
if out.GetBool("is_trusted") {
fmt.Fprint(w, "[OK]")
}
fmt.Fprint(w, "\n")
}
w.Flush()
return nil
}
// Ports type - Used to parse multiple -p flags
type ports []int
func (cli *DockerCli) CmdTag(args ...string) error {
cmd := cli.Subcmd("tag", "[OPTIONS] IMAGE REPOSITORY[:TAG]", "Tag an image into a repository")
force := cmd.Bool([]string{"f", "#force", "-force"}, false, "Force")
if err := cmd.Parse(args); err != nil {
return nil
}
if cmd.NArg() != 2 && cmd.NArg() != 3 {
cmd.Usage()
return nil
}
var repository, tag string
if cmd.NArg() == 3 {
fmt.Fprintf(cli.err, "[DEPRECATED] The format 'IMAGE [REPOSITORY [TAG]]' as been deprecated. Please use IMAGE [REPOSITORY[:TAG]]\n")
repository, tag = cmd.Arg(1), cmd.Arg(2)
} else {
repository, tag = utils.ParseRepositoryTag(cmd.Arg(1))
}
v := url.Values{}
v.Set("repo", repository)
v.Set("tag", tag)
if *force {
v.Set("force", "1")
}
if _, _, err := readBody(cli.call("POST", "/images/"+cmd.Arg(0)+"/tag?"+v.Encode(), nil, false)); err != nil {
return err
}
return nil
}
//FIXME Only used in tests
func ParseRun(args []string, sysInfo *sysinfo.SysInfo) (*Config, *HostConfig, *flag.FlagSet, error) {
cmd := flag.NewFlagSet("run", flag.ContinueOnError)
cmd.SetOutput(ioutil.Discard)
cmd.Usage = nil
return parseRun(cmd, args, sysInfo)
}
func parseRun(cmd *flag.FlagSet, args []string, sysInfo *sysinfo.SysInfo) (*Config, *HostConfig, *flag.FlagSet, error) {
var (
// FIXME: use utils.ListOpts for attach and volumes?
flAttach = NewListOpts(ValidateAttach)
flVolumes = NewListOpts(ValidatePath)
flLinks = NewListOpts(ValidateLink)
flEnv = NewListOpts(ValidateEnv)
flPublish ListOpts
flExpose ListOpts
flDns ListOpts
flVolumesFrom ListOpts
flLxcOpts ListOpts
flAutoRemove = cmd.Bool([]string{"#rm", "-rm"}, false, "Automatically remove the container when it exits (incompatible with -d)")
flDetach = cmd.Bool([]string{"d", "-detach"}, false, "Detached mode: Run container in the background, print new container id")
flNetwork = cmd.Bool([]string{"n", "-networking"}, true, "Enable networking for this container")
flPrivileged = cmd.Bool([]string{"#privileged", "-privileged"}, false, "Give extended privileges to this container")
flPublishAll = cmd.Bool([]string{"P", "-publish-all"}, false, "Publish all exposed ports to the host interfaces")
flStdin = cmd.Bool([]string{"i", "-interactive"}, false, "Keep stdin open even if not attached")
flTty = cmd.Bool([]string{"t", "-tty"}, false, "Allocate a pseudo-tty")
flContainerIDFile = cmd.String([]string{"#cidfile", "-cidfile"}, "", "Write the container ID to the file")
flEntrypoint = cmd.String([]string{"#entrypoint", "-entrypoint"}, "", "Overwrite the default entrypoint of the image")
flHostname = cmd.String([]string{"h", "-hostname"}, "", "Container host name")
flMemoryString = cmd.String([]string{"m", "-memory"}, "", "Memory limit (format: <number><optional unit>, where unit = b, k, m or g)")
flUser = cmd.String([]string{"u", "-user"}, "", "Username or UID")
flWorkingDir = cmd.String([]string{"w", "-workdir"}, "", "Working directory inside the container")
flCpuShares = cmd.Int64([]string{"c", "-cpu-shares"}, 0, "CPU shares (relative weight)")
// For documentation purpose
_ = cmd.Bool([]string{"#sig-proxy", "-sig-proxy"}, true, "Proxify all received signal to the process (even in non-tty mode)")
_ = cmd.String([]string{"#name", "-name"}, "", "Assign a name to the container")
)
cmd.Var(&flAttach, []string{"a", "-attach"}, "Attach to stdin, stdout or stderr.")
cmd.Var(&flVolumes, []string{"v", "-volume"}, "Bind mount a volume (e.g. from the host: -v /host:/container, from docker: -v /container)")
cmd.Var(&flLinks, []string{"#link", "-link"}, "Add link to another container (name:alias)")
cmd.Var(&flEnv, []string{"e", "-env"}, "Set environment variables")
cmd.Var(&flPublish, []string{"p", "-publish"}, fmt.Sprintf("Publish a container's port to the host (format: %s) (use 'docker port' to see the actual mapping)", PortSpecTemplateFormat))
cmd.Var(&flExpose, []string{"#expose", "-expose"}, "Expose a port from the container without publishing it to your host")
cmd.Var(&flDns, []string{"#dns", "-dns"}, "Set custom dns servers")
cmd.Var(&flVolumesFrom, []string{"#volumes-from", "-volumes-from"}, "Mount volumes from the specified container(s)")
cmd.Var(&flLxcOpts, []string{"#lxc-conf", "-lxc-conf"}, "Add custom lxc options -lxc-conf=\"lxc.cgroup.cpuset.cpus = 0,1\"")
if err := cmd.Parse(args); err != nil {
return nil, nil, cmd, err
}
// Check if the kernel supports memory limit cgroup.
if sysInfo != nil && *flMemoryString != "" && !sysInfo.MemoryLimit {
*flMemoryString = ""
}
// Validate input params
if *flDetach && flAttach.Len() > 0 {
return nil, nil, cmd, ErrConflictAttachDetach
}
if *flWorkingDir != "" && !path.IsAbs(*flWorkingDir) {
return nil, nil, cmd, ErrInvalidWorikingDirectory
}
if *flDetach && *flAutoRemove {
return nil, nil, cmd, ErrConflictDetachAutoRemove
}
// If neither -d or -a are set, attach to everything by default
if flAttach.Len() == 0 && !*flDetach {
if !*flDetach {
flAttach.Set("stdout")
flAttach.Set("stderr")
if *flStdin {
flAttach.Set("stdin")
}
}
}
var flMemory int64
if *flMemoryString != "" {
parsedMemory, err := utils.RAMInBytes(*flMemoryString)
if err != nil {
return nil, nil, cmd, err
}
flMemory = parsedMemory
}
var binds []string
// add any bind targets to the list of container volumes
for bind := range flVolumes.GetMap() {
if arr := strings.Split(bind, ":"); len(arr) > 1 {
if arr[0] == "/" {
return nil, nil, cmd, fmt.Errorf("Invalid bind mount: source can't be '/'")
}
dstDir := arr[1]
flVolumes.Set(dstDir)
binds = append(binds, bind)
flVolumes.Delete(bind)
} else if bind == "/" {
return nil, nil, cmd, fmt.Errorf("Invalid volume: path can't be '/'")
}
}
var (
parsedArgs = cmd.Args()
runCmd []string
entrypoint []string
image string
)
if len(parsedArgs) >= 1 {
image = cmd.Arg(0)
}
if len(parsedArgs) > 1 {
runCmd = parsedArgs[1:]
}
if *flEntrypoint != "" {
entrypoint = []string{*flEntrypoint}
}
lxcConf, err := parseLxcConfOpts(flLxcOpts)
if err != nil {
return nil, nil, cmd, err
}
var (
domainname string
hostname = *flHostname
parts = strings.SplitN(hostname, ".", 2)
)
if len(parts) > 1 {
hostname = parts[0]
domainname = parts[1]
}
ports, portBindings, err := parsePortSpecs(flPublish.GetAll())
if err != nil {
return nil, nil, cmd, err
}
// Merge in exposed ports to the map of published ports
for _, e := range flExpose.GetAll() {
if strings.Contains(e, ":") {
return nil, nil, cmd, fmt.Errorf("Invalid port format for --expose: %s", e)
}
p := NewPort(splitProtoPort(e))
if _, exists := ports[p]; !exists {
ports[p] = struct{}{}
}
}
config := &Config{
Hostname: hostname,
Domainname: domainname,
PortSpecs: nil, // Deprecated
ExposedPorts: ports,
User: *flUser,
Tty: *flTty,
NetworkDisabled: !*flNetwork,
OpenStdin: *flStdin,
Memory: flMemory,
CpuShares: *flCpuShares,
AttachStdin: flAttach.Get("stdin"),
AttachStdout: flAttach.Get("stdout"),
AttachStderr: flAttach.Get("stderr"),
Env: flEnv.GetAll(),
Cmd: runCmd,
Dns: flDns.GetAll(),
Image: image,
Volumes: flVolumes.GetMap(),
VolumesFrom: strings.Join(flVolumesFrom.GetAll(), ","),
Entrypoint: entrypoint,
WorkingDir: *flWorkingDir,
}
hostConfig := &HostConfig{
Binds: binds,
ContainerIDFile: *flContainerIDFile,
LxcConf: lxcConf,
Privileged: *flPrivileged,
PortBindings: portBindings,
Links: flLinks.GetAll(),
PublishAllPorts: *flPublishAll,
}
if sysInfo != nil && flMemory > 0 && !sysInfo.SwapLimit {
//fmt.Fprintf(stdout, "WARNING: Your kernel does not support swap limit capabilities. Limitation discarded.\n")
config.MemorySwap = -1
}
// When allocating stdin in attached mode, close stdin at client disconnect
if config.OpenStdin && config.AttachStdin {
config.StdinOnce = true
}
return config, hostConfig, cmd, nil
}
func (cli *DockerCli) CmdRun(args ...string) error {
config, hostConfig, cmd, err := parseRun(cli.Subcmd("run", "[OPTIONS] IMAGE [COMMAND] [ARG...]", "Run a command in a new container"), args, nil)
if err != nil {
return err
}
if config.Image == "" {
cmd.Usage()
return nil
}
// Retrieve relevant client-side config
var (
flName = cmd.Lookup("name")
flRm = cmd.Lookup("rm")
flSigProxy = cmd.Lookup("sig-proxy")
autoRemove, _ = strconv.ParseBool(flRm.Value.String())
sigProxy, _ = strconv.ParseBool(flSigProxy.Value.String())
)
// Disable sigProxy in case on TTY
if config.Tty {
sigProxy = false
}
var containerIDFile io.WriteCloser
if len(hostConfig.ContainerIDFile) > 0 {
if _, err := os.Stat(hostConfig.ContainerIDFile); err == nil {
return fmt.Errorf("cid file found, make sure the other container isn't running or delete %s", hostConfig.ContainerIDFile)
}
if containerIDFile, err = os.Create(hostConfig.ContainerIDFile); err != nil {
return fmt.Errorf("failed to create the container ID file: %s", err)
}
defer containerIDFile.Close()
}
containerValues := url.Values{}
if name := flName.Value.String(); name != "" {
containerValues.Set("name", name)
}
//create the container
body, statusCode, err := readBody(cli.call("POST", "/containers/create?"+containerValues.Encode(), config, false))
//if image not found try to pull it
if statusCode == 404 {
_, tag := utils.ParseRepositoryTag(config.Image)
if tag == "" {
tag = DEFAULTTAG
}
fmt.Fprintf(cli.err, "Unable to find image '%s' (tag: %s) locally\n", config.Image, tag)
v := url.Values{}
repos, tag := utils.ParseRepositoryTag(config.Image)
v.Set("fromImage", repos)
v.Set("tag", tag)
// Resolve the Repository name from fqn to endpoint + name
endpoint, _, err := registry.ResolveRepositoryName(repos)
if err != nil {
return err
}
// Load the auth config file, to be able to pull the image
cli.LoadConfigFile()
// Resolve the Auth config relevant for this server
authConfig := cli.configFile.ResolveAuthConfig(endpoint)
buf, err := json.Marshal(authConfig)
if err != nil {
return err
}
registryAuthHeader := []string{
base64.URLEncoding.EncodeToString(buf),
}
if err = cli.stream("POST", "/images/create?"+v.Encode(), nil, cli.err, map[string][]string{"X-Registry-Auth": registryAuthHeader}); err != nil {
return err
}
if body, _, err = readBody(cli.call("POST", "/containers/create?"+containerValues.Encode(), config, false)); err != nil {
return err
}
} else if err != nil {
return err
}
var runResult APIRun
if err := json.Unmarshal(body, &runResult); err != nil {
return err
}
for _, warning := range runResult.Warnings {
fmt.Fprintf(cli.err, "WARNING: %s\n", warning)
}
if len(hostConfig.ContainerIDFile) > 0 {
if _, err = containerIDFile.Write([]byte(runResult.ID)); err != nil {
return fmt.Errorf("failed to write the container ID to the file: %s", err)
}
}
if sigProxy {
sigc := cli.forwardAllSignals(runResult.ID)
defer utils.StopCatch(sigc)
}
var (
waitDisplayId chan struct{}
errCh chan error
)
if !config.AttachStdout && !config.AttachStderr {
// Make this asynchrone in order to let the client write to stdin before having to read the ID
waitDisplayId = make(chan struct{})
go func() {
defer close(waitDisplayId)
fmt.Fprintf(cli.out, "%s\n", runResult.ID)
}()
}
// We need to instanciate the chan because the select needs it. It can
// be closed but can't be uninitialized.
hijacked := make(chan io.Closer)
// Block the return until the chan gets closed
defer func() {
utils.Debugf("End of CmdRun(), Waiting for hijack to finish.")
if _, ok := <-hijacked; ok {
utils.Errorf("Hijack did not finish (chan still open)")
}
}()
if config.AttachStdin || config.AttachStdout || config.AttachStderr {
var (
out, stderr io.Writer
in io.ReadCloser
v = url.Values{}
)
v.Set("stream", "1")
if config.AttachStdin {
v.Set("stdin", "1")
in = cli.in
}
if config.AttachStdout {
v.Set("stdout", "1")
out = cli.out
}
if config.AttachStderr {
v.Set("stderr", "1")
if config.Tty {
stderr = cli.out
} else {
stderr = cli.err
}
}
errCh = utils.Go(func() error {
return cli.hijack("POST", "/containers/"+runResult.ID+"/attach?"+v.Encode(), config.Tty, in, out, stderr, hijacked)
})
} else {
close(hijacked)
}
// Acknowledge the hijack before starting
select {
case closer := <-hijacked:
// Make sure that hijack gets closed when returning. (result
// in closing hijack chan and freeing server's goroutines.
if closer != nil {
defer closer.Close()
}
case err := <-errCh:
if err != nil {
utils.Debugf("Error hijack: %s", err)
return err
}
}
//start the container
if _, _, err = readBody(cli.call("POST", "/containers/"+runResult.ID+"/start", hostConfig, false)); err != nil {
return err
}
if (config.AttachStdin || config.AttachStdout || config.AttachStderr) && config.Tty && cli.isTerminal {
if err := cli.monitorTtySize(runResult.ID); err != nil {
utils.Errorf("Error monitoring TTY size: %s\n", err)
}
}
if errCh != nil {
if err := <-errCh; err != nil {
utils.Debugf("Error hijack: %s", err)
return err
}
}
// Detached mode: wait for the id to be displayed and return.
if !config.AttachStdout && !config.AttachStderr {
// Detached mode
<-waitDisplayId
return nil
}
var status int
// Attached mode
if autoRemove {
// Autoremove: wait for the container to finish, retrieve
// the exit code and remove the container
if _, _, err := readBody(cli.call("POST", "/containers/"+runResult.ID+"/wait", nil, false)); err != nil {
return err
}
if _, status, err = getExitCode(cli, runResult.ID); err != nil {
return err
}
if _, _, err := readBody(cli.call("DELETE", "/containers/"+runResult.ID+"?v=1", nil, false)); err != nil {
return err
}
} else {
if !config.Tty {
// In non-tty mode, we can't dettach, so we know we need to wait.
if status, err = waitForExit(cli, runResult.ID); err != nil {
return err
}
} else {
// In TTY mode, there is a race. If the process dies too slowly, the state can be update after the getExitCode call
// and result in a wrong exit code.
// No Autoremove: Simply retrieve the exit code
if _, status, err = getExitCode(cli, runResult.ID); err != nil {
return err
}
}
}
if status != 0 {
return &utils.StatusError{StatusCode: status}
}
return nil
}
func (cli *DockerCli) CmdCp(args ...string) error {
cmd := cli.Subcmd("cp", "CONTAINER:PATH HOSTPATH", "Copy files/folders from the PATH to the HOSTPATH")
if err := cmd.Parse(args); err != nil {
return nil
}
if cmd.NArg() != 2 {
cmd.Usage()
return nil
}
var copyData APICopy
info := strings.Split(cmd.Arg(0), ":")
if len(info) != 2 {
return fmt.Errorf("Error: Path not specified")
}
copyData.Resource = info[1]
copyData.HostPath = cmd.Arg(1)
stream, statusCode, err := cli.call("POST", "/containers/"+info[0]+"/copy", copyData, false)
if stream != nil {
defer stream.Close()
}
if err != nil {
return err
}
if statusCode == 200 {
if err := archive.Untar(stream, copyData.HostPath, nil); err != nil {
return err
}
}
return nil
}
func (cli *DockerCli) CmdSave(args ...string) error {
cmd := cli.Subcmd("save", "IMAGE", "Save an image to a tar archive (streamed to stdout)")
if err := cmd.Parse(args); err != nil {
return err
}
if cmd.NArg() != 1 {
cmd.Usage()
return nil
}
image := cmd.Arg(0)
if err := cli.stream("GET", "/images/"+image+"/get", nil, cli.out, nil); err != nil {
return err
}
return nil
}
func (cli *DockerCli) CmdLoad(args ...string) error {
cmd := cli.Subcmd("load", "", "Load an image from a tar archive on STDIN")
if err := cmd.Parse(args); err != nil {
return err
}
if cmd.NArg() != 0 {
cmd.Usage()
return nil
}
if err := cli.stream("POST", "/images/load", cli.in, cli.out, nil); err != nil {
return err
}
return nil
}
func (cli *DockerCli) call(method, path string, data interface{}, passAuthInfo bool) (io.ReadCloser, int, error) {
var params io.Reader
if data != nil {
buf, err := json.Marshal(data)
if err != nil {
return nil, -1, err
}
params = bytes.NewBuffer(buf)
}
// fixme: refactor client to support redirect
re := regexp.MustCompile("/+")
path = re.ReplaceAllString(path, "/")
req, err := http.NewRequest(method, fmt.Sprintf("/v%g%s", APIVERSION, path), params)
if err != nil {
return nil, -1, err
}
if passAuthInfo {
cli.LoadConfigFile()
// Resolve the Auth config relevant for this server
authConfig := cli.configFile.ResolveAuthConfig(auth.IndexServerAddress())
getHeaders := func(authConfig auth.AuthConfig) (map[string][]string, error) {
buf, err := json.Marshal(authConfig)
if err != nil {
return nil, err
}
registryAuthHeader := []string{
base64.URLEncoding.EncodeToString(buf),
}
return map[string][]string{"X-Registry-Auth": registryAuthHeader}, nil
}
if headers, err := getHeaders(authConfig); err == nil && headers != nil {
for k, v := range headers {
req.Header[k] = v
}
}
}
req.Header.Set("User-Agent", "Docker-Client/"+VERSION)
req.Host = cli.addr
if data != nil {
req.Header.Set("Content-Type", "application/json")
} else if method == "POST" {
req.Header.Set("Content-Type", "plain/text")
}
dial, err := net.Dial(cli.proto, cli.addr)
if err != nil {
if strings.Contains(err.Error(), "connection refused") {
return nil, -1, ErrConnectionRefused
}
return nil, -1, err
}
clientconn := httputil.NewClientConn(dial, nil)
resp, err := clientconn.Do(req)
if err != nil {
clientconn.Close()
if strings.Contains(err.Error(), "connection refused") {
return nil, -1, ErrConnectionRefused
}
return nil, -1, err
}
wrapper := utils.NewReadCloserWrapper(resp.Body, func() error {
if resp != nil && resp.Body != nil {
resp.Body.Close()
}
return clientconn.Close()
})
return wrapper, resp.StatusCode, nil
}
func (cli *DockerCli) stream(method, path string, in io.Reader, out io.Writer, headers map[string][]string) error {
if (method == "POST" || method == "PUT") && in == nil {
in = bytes.NewReader([]byte{})
}
// fixme: refactor client to support redirect
re := regexp.MustCompile("/+")
path = re.ReplaceAllString(path, "/")
req, err := http.NewRequest(method, fmt.Sprintf("/v%g%s", APIVERSION, path), in)
if err != nil {
return err
}
req.Header.Set("User-Agent", "Docker-Client/"+VERSION)
req.Host = cli.addr
if method == "POST" {
req.Header.Set("Content-Type", "plain/text")
}
if headers != nil {
for k, v := range headers {
req.Header[k] = v
}
}
dial, err := net.Dial(cli.proto, cli.addr)
if err != nil {
if strings.Contains(err.Error(), "connection refused") {
return fmt.Errorf("Can't connect to docker daemon. Is 'docker -d' running on this host?")
}
return err
}
clientconn := httputil.NewClientConn(dial, nil)
resp, err := clientconn.Do(req)
defer clientconn.Close()
if err != nil {
if strings.Contains(err.Error(), "connection refused") {
return fmt.Errorf("Can't connect to docker daemon. Is 'docker -d' running on this host?")
}
return err
}
defer resp.Body.Close()
if resp.StatusCode < 200 || resp.StatusCode >= 400 {
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
return err
}
if len(body) == 0 {
return fmt.Errorf("Error :%s", http.StatusText(resp.StatusCode))
}
return fmt.Errorf("Error: %s", bytes.TrimSpace(body))
}
if matchesContentType(resp.Header.Get("Content-Type"), "application/json") {
return utils.DisplayJSONMessagesStream(resp.Body, out, cli.terminalFd, cli.isTerminal)
}
if _, err := io.Copy(out, resp.Body); err != nil {
return err
}
return nil
}
func (cli *DockerCli) hijack(method, path string, setRawTerminal bool, in io.ReadCloser, stdout, stderr io.Writer, started chan io.Closer) error {
defer func() {
if started != nil {
close(started)
}
}()
// fixme: refactor client to support redirect
re := regexp.MustCompile("/+")
path = re.ReplaceAllString(path, "/")
req, err := http.NewRequest(method, fmt.Sprintf("/v%g%s", APIVERSION, path), nil)
if err != nil {
return err
}
req.Header.Set("User-Agent", "Docker-Client/"+VERSION)
req.Header.Set("Content-Type", "plain/text")
req.Host = cli.addr
dial, err := net.Dial(cli.proto, cli.addr)
if err != nil {
if strings.Contains(err.Error(), "connection refused") {
return fmt.Errorf("Can't connect to docker daemon. Is 'docker -d' running on this host?")
}
return err
}
clientconn := httputil.NewClientConn(dial, nil)
defer clientconn.Close()
// Server hijacks the connection, error 'connection closed' expected
clientconn.Do(req)
rwc, br := clientconn.Hijack()
defer rwc.Close()
if started != nil {
started <- rwc
}
var receiveStdout chan error
var oldState *term.State
if in != nil && setRawTerminal && cli.isTerminal && os.Getenv("NORAW") == "" {
oldState, err = term.SetRawTerminal(cli.terminalFd)
if err != nil {
return err
}
defer term.RestoreTerminal(cli.terminalFd, oldState)
}
if stdout != nil || stderr != nil {
receiveStdout = utils.Go(func() (err error) {
defer func() {
if in != nil {
if setRawTerminal && cli.isTerminal {
term.RestoreTerminal(cli.terminalFd, oldState)
}
in.Close()
}
}()
// When TTY is ON, use regular copy
if setRawTerminal {
_, err = io.Copy(stdout, br)
} else {
_, err = utils.StdCopy(stdout, stderr, br)
}
utils.Debugf("[hijack] End of stdout")
return err
})
}
sendStdin := utils.Go(func() error {
if in != nil {
io.Copy(rwc, in)
utils.Debugf("[hijack] End of stdin")
}
if tcpc, ok := rwc.(*net.TCPConn); ok {
if err := tcpc.CloseWrite(); err != nil {
utils.Errorf("Couldn't send EOF: %s\n", err)
}
} else if unixc, ok := rwc.(*net.UnixConn); ok {
if err := unixc.CloseWrite(); err != nil {
utils.Errorf("Couldn't send EOF: %s\n", err)
}
}
// Discard errors due to pipe interruption
return nil
})
if stdout != nil || stderr != nil {
if err := <-receiveStdout; err != nil {
utils.Errorf("Error receiveStdout: %s", err)
return err
}
}
if !cli.isTerminal {
if err := <-sendStdin; err != nil {
utils.Errorf("Error sendStdin: %s", err)
return err
}
}
return nil
}
func (cli *DockerCli) getTtySize() (int, int) {
if !cli.isTerminal {
return 0, 0
}
ws, err := term.GetWinsize(cli.terminalFd)
if err != nil {
utils.Errorf("Error getting size: %s", err)
if ws == nil {
return 0, 0
}
}
return int(ws.Height), int(ws.Width)
}
func (cli *DockerCli) resizeTty(id string) {
height, width := cli.getTtySize()
if height == 0 && width == 0 {
return
}
v := url.Values{}
v.Set("h", strconv.Itoa(height))
v.Set("w", strconv.Itoa(width))
if _, _, err := readBody(cli.call("POST", "/containers/"+id+"/resize?"+v.Encode(), nil, false)); err != nil {
utils.Errorf("Error resize: %s", err)
}
}
func (cli *DockerCli) monitorTtySize(id string) error {
cli.resizeTty(id)
sigchan := make(chan os.Signal, 1)
signal.Notify(sigchan, syscall.SIGWINCH)
go func() {
for _ = range sigchan {
cli.resizeTty(id)
}
}()
return nil
}
func (cli *DockerCli) Subcmd(name, signature, description string) *flag.FlagSet {
flags := flag.NewFlagSet(name, flag.ContinueOnError)
flags.Usage = func() {
fmt.Fprintf(cli.err, "\nUsage: docker %s %s\n\n%s\n\n", name, signature, description)
flags.PrintDefaults()
os.Exit(2)
}
return flags
}
func (cli *DockerCli) LoadConfigFile() (err error) {
cli.configFile, err = auth.LoadConfig(os.Getenv("HOME"))
if err != nil {
fmt.Fprintf(cli.err, "WARNING: %s\n", err)
}
return err
}
func waitForExit(cli *DockerCli, containerId string) (int, error) {
body, _, err := readBody(cli.call("POST", "/containers/"+containerId+"/wait", nil, false))
if err != nil {
return -1, err
}
var out APIWait
if err := json.Unmarshal(body, &out); err != nil {
return -1, err
}
return out.StatusCode, nil
}
// getExitCode perform an inspect on the container. It returns
// the running state and the exit code.
func getExitCode(cli *DockerCli, containerId string) (bool, int, error) {
body, _, err := readBody(cli.call("GET", "/containers/"+containerId+"/json", nil, false))
if err != nil {
// If we can't connect, then the daemon probably died.
if err != ErrConnectionRefused {
return false, -1, err
}
return false, -1, nil
}
c := &Container{}
if err := json.Unmarshal(body, c); err != nil {
return false, -1, err
}
return c.State.IsRunning(), c.State.GetExitCode(), nil
}
func readBody(stream io.ReadCloser, statusCode int, err error) ([]byte, int, error) {
if stream != nil {
defer stream.Close()
}
if err != nil {
return nil, statusCode, err
}
body, err := ioutil.ReadAll(stream)
if err != nil {
return nil, -1, err
}
if statusCode < 200 || statusCode >= 400 {
if len(body) == 0 {
return nil, statusCode, fmt.Errorf("Error: %s", http.StatusText(statusCode))
}
return nil, statusCode, fmt.Errorf("Error: %s", bytes.TrimSpace(body))
}
return body, statusCode, nil
}
func NewDockerCli(in io.ReadCloser, out, err io.Writer, proto, addr string) *DockerCli {
var (
isTerminal = false
terminalFd uintptr
)
if in != nil {
if file, ok := in.(*os.File); ok {
terminalFd = file.Fd()
isTerminal = term.IsTerminal(terminalFd)
}
}
if err == nil {
err = out
}
return &DockerCli{
proto: proto,
addr: addr,
in: in,
out: out,
err: err,
isTerminal: isTerminal,
terminalFd: terminalFd,
}
}
type DockerCli struct {
proto string
addr string
configFile *auth.ConfigFile
in io.ReadCloser
out io.Writer
err io.Writer
isTerminal bool
terminalFd uintptr
}
|
[
"\"HOME\"",
"\"DEBUG\"",
"\"DEBUG\"",
"\"NORAW\"",
"\"HOME\""
] |
[] |
[
"HOME",
"NORAW",
"DEBUG"
] |
[]
|
["HOME", "NORAW", "DEBUG"]
|
go
| 3 | 0 | |
src/third_party/skia/infra/bots/task_drivers/perf_puppeteer_canvas/perf_puppeteer_canvas.go
|
// Copyright 2020 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// This executable is meant to be a general way to gather perf data using puppeteer. The logic
// (e.g. what bench to run, how to process that particular output) is selected using the ExtraConfig
// part of the task name.
package main
import (
"context"
"encoding/json"
"flag"
"fmt"
"io/ioutil"
"math"
"os"
"path/filepath"
"sort"
"go.skia.org/infra/go/exec"
"go.skia.org/infra/go/skerr"
"go.skia.org/infra/task_driver/go/lib/os_steps"
"go.skia.org/infra/task_driver/go/td"
)
const perfKeyWebGLVersion = "webgl_version"
func main() {
var (
// Required properties for this task.
projectID = flag.String("project_id", "", "ID of the Google Cloud project.")
taskName = flag.String("task_name", "", "Name of the task.")
benchmarkPath = flag.String("benchmark_path", "", "Path to location of the benchmark files (e.g. //tools/perf-puppeteer).")
outputPath = flag.String("output_path", "", "Perf Output will be produced here")
gitHash = flag.String("git_hash", "", "Git hash this data corresponds to")
taskID = flag.String("task_id", "", "task id this data was generated on")
nodeBinPath = flag.String("node_bin_path", "", "Path to the node bin directory (should have npm also). This directory *must* be on the PATH when this executable is called, otherwise, the wrong node or npm version may be found (e.g. the one on the system), even if we are explicitly calling npm with the absolute path.")
// These flags feed into the perf trace keys associated with the output data.
osTrace = flag.String("os_trace", "", "OS this is running on.")
modelTrace = flag.String("model_trace", "", "Description of host machine.")
cpuOrGPUTrace = flag.String("cpu_or_gpu_trace", "", "If this is a CPU or GPU configuration.")
cpuOrGPUValueTrace = flag.String("cpu_or_gpu_value_trace", "", "The hardware of this CPU/GPU")
webGLVersion = flag.String("webgl_version", "", "Major WebGl version to use when creating gl drawing context. 1 or 2")
// Flags that may be required for certain configs
canvaskitBinPath = flag.String("canvaskit_bin_path", "", "The location of a canvaskit.js and canvaskit.wasm")
// Debugging flags.
local = flag.Bool("local", false, "True if running locally (as opposed to on the bots)")
outputSteps = flag.String("o", "", "If provided, dump a JSON blob of step data to the given file. Prints to stdout if '-' is given.")
)
// Setup.
ctx := td.StartRun(projectID, taskID, taskName, outputSteps, local)
defer td.EndRun(ctx)
keys := map[string]string{
"os": *osTrace,
"model": *modelTrace,
perfKeyCpuOrGPU: *cpuOrGPUTrace,
"cpu_or_gpu_value": *cpuOrGPUValueTrace,
perfKeyWebGLVersion: *webGLVersion,
}
outputWithoutResults, err := makePerfObj(*gitHash, *taskID, os.Getenv("SWARMING_BOT_ID"), keys)
if err != nil {
td.Fatal(ctx, skerr.Wrap(err))
}
// Absolute paths work more consistently than relative paths.
nodeBinAbsPath := td.MustGetAbsolutePathOfFlag(ctx, *nodeBinPath, "node_bin_path")
benchmarkAbsPath := td.MustGetAbsolutePathOfFlag(ctx, *benchmarkPath, "benchmark_path")
canvaskitBinAbsPath := td.MustGetAbsolutePathOfFlag(ctx, *canvaskitBinPath, "canvaskit_bin_path")
outputAbsPath := td.MustGetAbsolutePathOfFlag(ctx, *outputPath, "output_path")
if err := setup(ctx, benchmarkAbsPath, nodeBinAbsPath); err != nil {
td.Fatal(ctx, skerr.Wrap(err))
}
if err := benchCanvas(ctx, outputWithoutResults, benchmarkAbsPath, canvaskitBinAbsPath, nodeBinAbsPath); err != nil {
td.Fatal(ctx, skerr.Wrap(err))
}
// outputFile name should be unique between tasks, so as to avoid having duplicate name files
// uploaded to GCS.
outputFile := filepath.Join(outputAbsPath, fmt.Sprintf("perf-%s.json", *taskID))
if err := processFramesData(ctx, outputWithoutResults, benchmarkAbsPath, outputFile); err != nil {
td.Fatal(ctx, skerr.Wrap(err))
}
}
const perfKeyCpuOrGPU = "cpu_or_gpu"
func makePerfObj(gitHash, taskID, machineID string, keys map[string]string) (perfJSONFormat, error) {
rv := perfJSONFormat{}
if gitHash == "" {
return rv, skerr.Fmt("Must provide --git_hash")
}
if taskID == "" {
return rv, skerr.Fmt("Must provide --task_id")
}
rv.GitHash = gitHash
rv.SwarmingTaskID = taskID
rv.SwarmingMachineID = machineID
rv.Key = keys
rv.Key["arch"] = "wasm"
rv.Key["browser"] = "Chromium"
rv.Key["configuration"] = "Release"
rv.Key["extra_config"] = "CanvasPerf"
rv.Key["binary"] = "CanvasKit"
rv.Results = map[string]map[string]perfResult{}
return rv, nil
}
func setup(ctx context.Context, benchmarkPath, nodeBinPath string) error {
ctx = td.StartStep(ctx, td.Props("setup").Infra())
defer td.EndStep(ctx)
if _, err := exec.RunCwd(ctx, benchmarkPath, filepath.Join(nodeBinPath, "npm"), "ci"); err != nil {
return td.FailStep(ctx, skerr.Wrap(err))
}
if err := os.MkdirAll(filepath.Join(benchmarkPath, "out"), 0777); err != nil {
return td.FailStep(ctx, skerr.Wrap(err))
}
return nil
}
// benchCanvas runs the puppeteer canvas_perf_driver.html test and parses the results.
func benchCanvas(ctx context.Context, perf perfJSONFormat, benchmarkPath, canvaskitBinPath, nodeBinPath string) error {
ctx = td.StartStep(ctx, td.Props("perf canvas tests"))
defer td.EndStep(ctx)
err := td.Do(ctx, td.Props("Benchmark Canvas"), func(ctx context.Context) error {
// See comment in setup about why we specify the absolute path for node.
args := []string{filepath.Join(nodeBinPath, "node"),
"perf-canvaskit-with-puppeteer",
"--bench_html", "canvas_perf.html",
"--canvaskit_js", filepath.Join(canvaskitBinPath, "canvaskit.js"),
"--canvaskit_wasm", filepath.Join(canvaskitBinPath, "canvaskit.wasm"),
"--assets", "canvas_perf_assets", // relative path
"--output", filepath.Join(benchmarkPath, "out", "perf.json"),
"--timeout", "240",
}
if perf.Key[perfKeyCpuOrGPU] != "CPU" {
args = append(args, "--use_gpu")
if perf.Key[perfKeyWebGLVersion] == "1" {
args = append(args, "--query_params webgl1")
}
}
_, err := exec.RunCwd(ctx, benchmarkPath, args...)
if err != nil {
return skerr.Wrap(err)
}
return nil
})
if err != nil {
return td.FailStep(ctx, skerr.Wrap(err))
}
return nil
}
// description of the output file format
type perfJSONFormat struct {
GitHash string `json:"gitHash"`
SwarmingTaskID string `json:"swarming_task_id"`
SwarmingMachineID string `json:"swarming_machine_id"`
Key map[string]string `json:"key"`
// Maps bench name -> "config" -> result key -> value
Results map[string]map[string]perfResult `json:"results"`
}
type perfResult map[string]float32
// description of the input file format.
type oneTestResult struct {
WithoutFlushMS []float32 `json:"without_flush_ms"`
WithFlushMS []float32 `json:"with_flush_ms"`
TotalFrameMS []float32 `json:"total_frame_ms"`
}
// processFramesData looks at the result of benchCanvas, computes summary data on
// those files and adds them as Results into the provided perf object. The perf object is then
// written in JSON format to outputPath.
func processFramesData(ctx context.Context, perf perfJSONFormat, benchmarkPath, outputFilePath string) error {
perfJSONPath := filepath.Join(benchmarkPath, "out", "perf.json")
ctx = td.StartStep(ctx, td.Props("process perf output "+perfJSONPath))
defer td.EndStep(ctx)
err := td.Do(ctx, td.Props("Process "+perfJSONPath), func(ctx context.Context) error {
config := "software"
if perf.Key[perfKeyCpuOrGPU] != "CPU" {
config = "webgl2"
if perf.Key[perfKeyWebGLVersion] == "1" {
config = "webgl1"
}
}
b, err := os_steps.ReadFile(ctx, perfJSONPath)
if err != nil {
return skerr.Wrap(err)
}
var fileData map[string]oneTestResult
if err := json.Unmarshal(b, &fileData); err != nil {
return skerr.Wrap(err)
}
for name, item := range fileData {
metrics, err := calculatePerfFromTest(item) // item is a oneTestResult
if err != nil {
return skerr.Wrap(err)
}
perf.Results[name] = map[string]perfResult{
config: metrics,
}
}
return nil
})
if err != nil {
return td.FailStep(ctx, skerr.Wrap(err))
}
err = td.Do(ctx, td.Props("Writing perf JSON file to "+outputFilePath), func(ctx context.Context) error {
if err := os.MkdirAll(filepath.Dir(outputFilePath), 0777); err != nil {
return skerr.Wrap(err)
}
b, err := json.MarshalIndent(perf, "", " ")
if err != nil {
return skerr.Wrap(err)
}
if err = ioutil.WriteFile(outputFilePath, b, 0666); err != nil {
return skerr.Wrap(err)
}
return nil
})
if err != nil {
return td.FailStep(ctx, skerr.Wrap(err))
}
return nil
}
// Computer averages and quantiles of the frame time results from one test.
func calculatePerfFromTest(metrics oneTestResult) (map[string]float32, error) {
avgWithoutFlushMS, medianWithoutFlushMS, stddevWithoutFlushMS, _, _, _ := summarize(metrics.WithoutFlushMS)
avgWithFlushMS, medianWithFlushMS, stddevWithFlushMS, _, _, _ := summarize(metrics.WithFlushMS)
avgFrame, medFrame, stdFrame, percentile90Frame, percentile95Frame, percentile99Frame := summarize(metrics.TotalFrameMS)
rv := map[string]float32{
"avg_render_without_flush_ms": avgWithoutFlushMS,
"median_render_without_flush_ms": medianWithoutFlushMS,
"stddev_render_without_flush_ms": stddevWithoutFlushMS,
"avg_render_with_flush_ms": avgWithFlushMS,
"median_render_with_flush_ms": medianWithFlushMS,
"stddev_render_with_flush_ms": stddevWithFlushMS,
"avg_render_frame_ms": avgFrame,
"median_render_frame_ms": medFrame,
"stddev_render_frame_ms": stdFrame,
// more detailed statistics on total frame times
"90th_percentile_frame_ms": percentile90Frame,
"95th_percentile_frame_ms": percentile95Frame,
"99th_percentile_frame_ms": percentile99Frame,
}
return rv, nil
}
func summarize(input []float32) (float32, float32, float32, float32, float32, float32) {
// Make a copy of the data so we don't mutate the order of the original
sorted := make([]float32, len(input))
copy(sorted, input)
sort.Slice(sorted, func(i, j int) bool {
return sorted[i] < sorted[j]
})
avg := computeAverage(sorted)
variance := float32(0)
for i := 0; i < len(sorted); i++ {
variance += (sorted[i] - avg) * (sorted[i] - avg)
}
stddev := float32(math.Sqrt(float64(variance / float32(len(sorted)))))
medIdx := (len(sorted) * 50) / 100
percentile90Idx := (len(sorted) * 90) / 100
percentile95Idx := (len(sorted) * 95) / 100
percentile99Idx := (len(sorted) * 99) / 100
return avg, sorted[medIdx], stddev, sorted[percentile90Idx], sorted[percentile95Idx], sorted[percentile99Idx]
}
func computeAverage(d []float32) float32 {
avg := float32(0)
for i := 0; i < len(d); i++ {
avg += d[i]
}
avg /= float32(len(d))
return avg
}
|
[
"\"SWARMING_BOT_ID\""
] |
[] |
[
"SWARMING_BOT_ID"
] |
[]
|
["SWARMING_BOT_ID"]
|
go
| 1 | 0 | |
sdk/eventhub/azure-eventhub/samples/sync_samples/client_identity_authentication.py
|
#!/usr/bin/env python
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
"""
An example to show authentication using credentials defined by azure.identity library.
EnvironmentCredential is capable of authenticating as a service principal using a client secret or a certificate, or as
a user with a username and password. Configuration is attempted in this order, using these environment variables:
Service principal with secret:
- **AZURE_TENANT_ID**: ID of the service principal's tenant. Also called its 'directory' ID.
- **AZURE_CLIENT_ID**: the service principal's client ID
- **AZURE_CLIENT_SECRET**: one of the service principal's client secrets
Service principal with certificate:
- **AZURE_TENANT_ID**: ID of the service principal's tenant. Also called its 'directory' ID.
- **AZURE_CLIENT_ID**: the service principal's client ID
- **AZURE_CLIENT_CERTIFICATE_PATH**: path to a PEM-encoded certificate file including the private key. The
certificate must not be password-protected.
User with username and password:
- **AZURE_CLIENT_ID**: the application's client ID
- **AZURE_USERNAME**: a username (usually an email address)
- **AZURE_PASSWORD**: that user's password
- **AZURE_TENANT_ID**: (optional) ID of the service principal's tenant. Also called its 'directory' ID.
If not provided, defaults to the 'organizations' tenant, which supports only Azure Active Directory work or
school accounts.
Please refer to azure.identity library for detailed information.
This sample also shows the process of utilizing a different credential object, in this case, DefaultAzureCredential,
both to demonstrate the ease of adjusting authentication, and to surface another method for doing so.
"""
import os
from azure.eventhub import EventData, EventHubProducerClient
from azure.identity import EnvironmentCredential
fully_qualified_namespace = os.environ['EVENT_HUB_HOSTNAME']
eventhub_name = os.environ['EVENT_HUB_NAME']
credential = EnvironmentCredential()
# Note: One has other options to specify the credential. For instance, DefaultAzureCredential.
# Default Azure Credentials attempt a chained set of authentication methods, per documentation here: https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/identity/azure-identity
# For example user to be logged in can be specified by the environment variable AZURE_USERNAME, consumed via the ManagedIdentityCredential
# Alternately, one can specify the AZURE_TENANT_ID, AZURE_CLIENT_ID, and AZURE_CLIENT_SECRET to use the EnvironmentCredentialClass.
# The docs above specify all mechanisms which the defaultCredential internally support.
#
# credential = DefaultAzureCredential()
producer = EventHubProducerClient(fully_qualified_namespace=fully_qualified_namespace,
eventhub_name=eventhub_name,
credential=credential)
with producer:
event_data_batch = producer.create_batch()
while True:
try:
event_data_batch.add(EventData('Message inside EventBatchData'))
except ValueError:
# EventDataBatch object reaches max_size.
# New EventDataBatch object can be created here to send more data.
break
producer.send_batch(event_data_batch)
print('Finished sending.')
|
[] |
[] |
[
"EVENT_HUB_HOSTNAME",
"EVENT_HUB_NAME"
] |
[]
|
["EVENT_HUB_HOSTNAME", "EVENT_HUB_NAME"]
|
python
| 2 | 0 | |
aad-machine-to-machine/server.py
|
import json
import os
from blacksheep.server.application import Application
from blacksheep.server.authentication.jwt import JWTBearerAuthentication
from blacksheep.server.responses import html
from dotenv import load_dotenv
from guardpost.authentication import Identity
from guardpost.authorization import Policy
from guardpost.common import AuthenticatedRequirement
# read .env file into environment variables
load_dotenv()
app = Application()
aad_authority = os.environ["API_ISSUER"]
api_audience = os.environ["API_AUDIENCE"]
# configure the application to support authentication using JWT access tokens obtained
# from "Authorization: Bearer {...}" request headers;
# access tokens are validated using OpenID Connect configuration from the configured
# authority
app.use_authentication().add(
JWTBearerAuthentication(
authority=aad_authority,
valid_audiences=[api_audience],
)
)
# configure authorization with a default policy that requires an authenticated user for
# all endpoints, except when request handlers are explicitly decorated by
# @allow_anonymous
app.use_authorization().with_default_policy(
Policy("authenticated", AuthenticatedRequirement())
)
get = app.router.get
@get("/")
def home(user: Identity):
assert user.is_authenticated()
return html(
f"""
<!DOCTYPE html>
<html>
<head>
<style>
pre {{
border: 1px dotted darkred;
padding: 1rem;
}}
</style>
</head>
<body>
<h1>Welcome! These are your claims:</h1>
<pre>{json.dumps(user.claims, ensure_ascii=False, indent=4)}</pre>
</body>
</html>
"""
)
if __name__ == "__main__":
import uvicorn
uvicorn.run(app, host="127.0.0.1", port=5000, log_level="debug")
|
[] |
[] |
[
"API_AUDIENCE",
"API_ISSUER"
] |
[]
|
["API_AUDIENCE", "API_ISSUER"]
|
python
| 2 | 0 | |
backend/backend/settings.py
|
"""
Django settings for backend project.
Generated by 'django-admin startproject' using Django 3.1.7.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
import os
from pathlib import Path
from dotenv import load_dotenv
load_dotenv(override=True) # loads the configs from .env
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = str(os.getenv("SECRET_KEY"))
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = bool(int(os.getenv("DEBUG", 0)))
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
"django.contrib.admin",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.messages",
"django.contrib.staticfiles",
"django.contrib.postgres",
"rest_framework",
"api",
]
MIDDLEWARE = [
"django.middleware.security.SecurityMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
]
REST_FRAMEWORK = {
"DEFAULT_RENDERER_CLASSES": [
"rest_framework.renderers.JSONRenderer",
],
"DEFAULT_PAGINATION_CLASS": "rest_framework.pagination.PageNumberPagination", # noqa: E501
"PAGE_SIZE": 20,
}
ROOT_URLCONF = "backend.urls"
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [],
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
],
},
},
]
WSGI_APPLICATION = "backend.wsgi.application"
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
# DATABASES = {
# "default": {
# "ENGINE": "django.db.backends.sqlite3",
# "NAME": BASE_DIR / "db.sqlite3",
# }
# }
DATABASES = {
"default": {
"ENGINE": "django.db.backends.postgresql",
"NAME": str(os.getenv("UPRN_DB_NAME")),
"USER": str(os.getenv("UPRN_DB_USER")),
"PASSWORD": str(os.getenv("UPRN_DB_PASSWORD")),
"HOST": str(os.getenv("UPRN_DB_HOST")),
"PORT": str(os.getenv("UPRN_DB_PORT")),
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
"NAME": "django.contrib.auth.password_validation.UserAttributeSimilarityValidator", # noqa: E501
},
{
"NAME": "django.contrib.auth.password_validation.MinimumLengthValidator", # noqa: E501
},
{
"NAME": "django.contrib.auth.password_validation.CommonPasswordValidator", # noqa: E501
},
{
"NAME": "django.contrib.auth.password_validation.NumericPasswordValidator", # noqa: E501
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = "en-gb"
TIME_ZONE = "UTC"
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = "/static/"
STATIC_ROOT = "static"
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = "django.db.models.BigAutoField"
|
[] |
[] |
[
"UPRN_DB_USER",
"UPRN_DB_HOST",
"SECRET_KEY",
"UPRN_DB_NAME",
"UPRN_DB_PORT",
"DEBUG",
"UPRN_DB_PASSWORD"
] |
[]
|
["UPRN_DB_USER", "UPRN_DB_HOST", "SECRET_KEY", "UPRN_DB_NAME", "UPRN_DB_PORT", "DEBUG", "UPRN_DB_PASSWORD"]
|
python
| 7 | 0 | |
pv/wsgi.py
|
"""
WSGI config for pv project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
import sys
sys.path.append('/srv/pv/pv')
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "pv.settings")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
plugins/modules/oracle_db.py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
DOCUMENTATION = '''
---
module: oracle_db
short_description: Manage an Oracle database
description:
- Create/delete a database using dbca
- If a responsefile is available, that will be used. If initparams is defined, those will be attached to the createDatabase command
- If no responsefile is created, the database will be created based on all other parameters
version_added: "2.4.0.0"
options:
oracle_home:
description:
- The home where the database will be created
required: False
aliases: ['oh']
db_name:
description:
- The name of the database
required: True
default: None
aliases: ['db','database_name','name']
sid:
description:
- The instance name
required: False
default: None
db_unique_name:
description:
- The database db_unique_name
required: False
default: None
aliases: ['dbunqn','unique_name']
sys_password:
description:
- Password for the sys user
required: False
default: None
aliases: ['syspw','sysdbapassword','sysdbapw']
system_password:
description:
- Password for the system user
- If not set, defaults to sys_password
required: False
default: None
aliases: ['systempw']
dbsnmp_password:
description:
- Password for the dbsnmp user
- If not set, defaults to sys_password
required: False
default: None
aliases: ['dbsnmppw']
responsefile:
description:
- The name of responsefile
required: True
default: None
template:
description:
- The template the database will be based off
required: False
default: General_Purpose.dbc
cdb:
description:
- Should the database be a container database
required: False
default: False
aliases: ['container']
choices: ['True','False']
datafile_dest:
description:
- Where the database files should be placed (ASM diskgroup or filesystem path)
required: False
default: False
aliases: ['dfd']
recoveryfile_dest:
description:
- Where the database files should be placed (ASM diskgroup or filesystem path)
required: False
default: False
aliases: ['rfd']
storage_type:
description:
- Type of underlying storage (Filesystem or ASM)
required: False
default: FS
aliases: ['storage']
choices: ['FS','ASM']
dbconfig_type:
description:
- Type of database (SI,RAC,RON)
required: False
default: SI
choices: ['SI','RAC','RACONENODE']
db_type:
description:
- Default Type of database (MULTIPURPOSE, OLTP, DATA_WAREHOUSING)
required: False
default: MULTIPURPOSE
choices: ['MULTIPURPOSE','OLTP','DATA_WAREHOUSING']
racone_service:
description:
- If dbconfig_type = RACONENODE, a service has to be created along with the DB. This is the name of that service
- If no name is defined, the service will be called "{{ db_name }}_ronserv"
required: False
default: None
aliases: ['ron_service']
characterset:
description:
- The database characterset
required: False
default: AL32UTF8
memory_percentage:
description:
- The database total memory in % of available memory
required: False
memory_totalmb:
description:
- The database total memory in MB. Defaults to 1G
required: False
default: ['1024']
nodelist:
description:
- The list of nodes a RAC DB should be created on
required: False
amm:
description:
- Should Automatic Memory Management be used (memory_target, memory_max_target)
required: False
Default: False
choices: ['True','False']
initparams:
description:
- List of key=value pairs
- e.g
init_params:
- sga_target=1G
- sga_max_size=1G
required: False
customscripts:
description:
- List of scripts to run after database is created
- e.g
customScripts:
- /tmp/xxx.sql
- /tmp/yyy.sql
required: False
default_tablespace_type:
description:
- Database default tablespace type (DEFAULT_TBS_TYPE)
default: smallfile
choices: ['smallfile','bigfile']
default_tablespace:
description:
- Database default tablespace
default: smallfile
required: False
default_temp_tablespace:
description:
- Database default temporary tablespace
required: False
archivelog:
description:
- Puts the database is archivelog mode
required: False
default: false
choices: ['True','False']
type: bool
force_logging:
description:
- Enables force logging for the Database
required: False
default: false
choices: ['True','False']
type: bool
supplemental_logging:
description:
- Enables supplemental (minimal) logging for the Database (basically 'add supplemental log data')
required: False
default: false
choices: ['True','False']
type: bool
flashback:
description:
- Enables flashback for the database
required: False
default: false
choices: ['True','False']
type: bool
state:
description:
- The intended state of the database
default: present
choices: ['present','absent']
hostname:
description:
- The host of the database if using dbms_service
required: false
default: localhost
aliases: ['host']
port:
description:
- The listener port to connect to the database if using dbms_service
required: false
default: 1521
notes:
- cx_Oracle needs to be installed
requirements: [ "cx_Oracle" ]
author: Mikael Sandström, [email protected], @oravirt
'''
EXAMPLES = '''
# Create a DB (non-cdb)
oracle_db:
oh=/u01/app/oracle/12.2.0.1/db1
db_name=orclcdb
syspw=Oracle_123
state=present
storage=ASM
dfd=+DATA
rfd=+DATA
default_tablespace_type: bigfile
- hosts: all
gather_facts: true
vars:
oracle_home: /u01/app/oracle/12.2.0.1/db1
dbname: orclcdb
dbunqname: "{{ dbname}}_unq"
container: True
dbsid: "{{ dbname }}"
hostname: "{{ ansible_hostname }}"
oracle_env:
ORACLE_HOME: "{{ oracle_home }}"
LD_LIBRARY_PATH: "{{ oracle_home }}/lib"
myaction: present
rspfile: "/tmp/dbca_{{dbname}}.rsp"
initparameters:
- memory_target=0
- memory_max_target=0
- sga_target=1500M
- sga_max_size=1500M
dfd: +DATA
rfd: +FRA
storage: ASM
dbtype: SI
#ron_service: my_ron_service
#clnodes: racnode-dc1-1,racnode-dc1-2
tasks:
- name: Manage database
oracle_db:
service_name={{ dbname }}
hostname={{ hostname}}
user=sys
password=Oracle_123
state={{ myaction }}
db_name={{ dbname }}
sid={{ dbsid |default(omit)}}
db_unique_name={{ dbunqname |default(omit) }}
sys_password=Oracle_123
system_password=Oracle_123
responsefile={{ rspfile |default(omit) }}
cdb={{ container |default (omit)}}
initparams={{ initparameters |default(omit)}}
datafile_dest={{ dfd }}
recoveryfile_dest={{rfd}}
storage_type={{storage}}
dbconfig_type={{dbtype}}
racone_service={{ ron_service|default(omit)}}
amm=False
memory_totalmb=1024
nodelist={{ clnodes |default(omit) }}
environment: "{{ oracle_env }}"
run_once: True
'''
import os, re, time
try:
import cx_Oracle
except ImportError:
cx_oracle_exists = False
else:
cx_oracle_exists = True
def get_version(module, msg, oracle_home):
command = '%s/bin/sqlplus -V' % (oracle_home)
(rc, stdout, stderr) = module.run_command(command)
if rc != 0:
msg = 'Error - STDOUT: %s, STDERR: %s, COMMAND: %s' % (stdout, stderr, command)
module.fail_json(msg=msg, changed=False)
else:
return stdout.split(' ')[2][0:4]
# Check if the database exists
def check_db_exists(module, msg, oracle_home, db_name, sid, db_unique_name ):
if sid is None:
sid = ''
if gimanaged:
if db_unique_name != None:
checkdb = db_unique_name
else:
checkdb = db_name
command = "%s/bin/srvctl config database -d %s " % (oracle_home, checkdb)
(rc, stdout, stderr) = module.run_command(command)
if rc != 0:
if 'PRCD-1229' in stdout: #<-- DB is created, but with a different ORACLE_HOME
msg='Database %s already exists in a different home. Stdout -> %s' % (db_name, stdout)
module.fail_json(msg=msg, changed=False)
elif '%s' % (db_name) in stdout: #<-- db doesn't exist
return False
else:
msg = 'Error: command is %s. stdout is %s' % (command, stdout)
return False
elif 'Database name: %s' % (db_name) in stdout: #<-- Database already exist
return True
else:
msg = '%s' % (stdout)
return True
else:
existingdbs = []
oratabfile = '/etc/oratab'
if os.path.exists(oratabfile):
with open(oratabfile) as oratab:
for line in oratab:
if line.startswith('#') or line.startswith(' '):
continue
elif re.search(db_name +':', line) or re.search(sid +':', line):
existingdbs.append(line)
if not existingdbs: #<-- db doesn't exist
return False
else:
for dbs in existingdbs:
if sid != '':
if '%s:' % db_name in dbs or '%s:' % sid in dbs:
if dbs.split(':')[1] != oracle_home.rstrip('/'): #<-- DB is created, but with a different ORACLE_HOME
msg = 'Database %s already exists in a different ORACLE_HOME (%s)' % (db_name, dbs.split(':')[1])
module.fail_json(msg=msg, changed=False)
elif dbs.split(':')[1] == oracle_home.rstrip('/'): #<-- Database already exist
return True
else:
if '%s:' % db_name in dbs:
if dbs.split(':')[1]!= oracle_home.rstrip('/'): #<-- DB is created, but with a different ORACLE_HOME
msg = 'Database %s already exists in a different ORACLE_HOME (%s)' % (db_name, dbs.split(':')[1])
module.fail_json(msg=msg, changed=False)
elif dbs.split(':')[1] == oracle_home.rstrip('/'): #<-- Database already exist
return True
def create_db (module, msg, oracle_home, sys_password, system_password, dbsnmp_password, db_name, sid, db_unique_name, responsefile, template, cdb,
local_undo, datafile_dest, recoveryfile_dest, storage_type, dbconfig_type, racone_service, characterset, memory_percentage, memory_totalmb,
nodelist, db_type, amm, initparams, customscripts, datapatch, domain):
initparam = ' -initParams '
paramslist = ''
scriptlist = ''
command = "%s/bin/dbca -createDatabase -silent " % (oracle_home)
if responsefile != None:
if os.path.exists(responsefile):
command += ' -responseFile %s ' % (responsefile)
else:
msg='Responsefile %s doesn\'t exist' % (responsefile)
module.fail_json(msg=msg, changed=False)
if db_unique_name is not None:
initparam += 'db_name=%s,db_unique_name=%s,' % (db_name,db_unique_name)
if domain is not None:
initparam += 'db_domain=%s' % domain
if initparams is not None:
paramslist = ",".join(initparams)
initparam += '%s' % (paramslist)
command += ' -gdbName %s' % (db_name)
if sid != None:
command += ' -sid %s' % (sid)
if sys_password is not None:
command += ' -sysPassword \"%s\"' % (sys_password)
if system_password is not None:
command += ' -systemPassword \"%s\"' % (system_password)
else:
system_password = sys_password
command += ' -systemPassword \"%s\"' % (system_password)
if dbsnmp_password is not None:
command += ' -dbsnmpPassword \"%s\"' % (dbsnmp_password)
else:
dbsnmp_password = sys_password
command += ' -dbsnmpPassword \"%s\"' % (dbsnmp_password)
if dbconfig_type == 'RAC':
if nodelist != None:
nodelist = ",".join(nodelist)
command += ' -nodelist %s ' % (nodelist)
else:
command += ' -gdbName %s' % (db_name)
if sid != None:
command += ' -sid %s' % (sid)
if sys_password is not None:
command += ' -sysPassword \"%s\"' % (sys_password)
if system_password is not None:
command += ' -systemPassword \"%s\"' % (system_password)
else:
system_password = sys_password
command += ' -systemPassword \"%s\"' % (system_password)
if dbsnmp_password is not None:
command += ' -dbsnmpPassword \"%s\"' % (dbsnmp_password)
else:
dbsnmp_password = sys_password
command += ' -dbsnmpPassword \"%s\"' % (dbsnmp_password)
if template:
command += ' -templateName \"%s\"' % (template)
if major_version > '11.2':
if cdb == True:
command += ' -createAsContainerDatabase true '
if local_undo == True:
command += ' -useLocalUndoForPDBs true'
else:
command += ' -useLocalUndoForPDBs false'
else:
command += ' -createAsContainerDatabase false '
if datafile_dest != None:
command += ' -datafileDestination %s ' % (datafile_dest)
if recoveryfile_dest != None:
command += ' -recoveryAreaDestination %s ' % (recoveryfile_dest)
if storage_type != None:
command += ' -storageType %s ' % (storage_type)
if dbconfig_type != None:
if dbconfig_type == 'SI':
dbconfig_type = 'SINGLE'
if major_version == '12.2':
command += ' -databaseConfigType %s ' % (dbconfig_type)
elif major_version == '12.1':
command += ' -databaseConfType %s ' % (dbconfig_type)
if dbconfig_type == 'RACONENODE':
if racone_service is None:
racone_service = db_name+'_ronserv'
command += ' -RACOneNodeServiceName %s ' % (racone_service)
if characterset != None:
command += ' -characterSet %s ' % (characterset)
if memory_percentage != None:
command += ' -memoryPercentage %s ' % (memory_percentage)
if memory_totalmb != None:
command += ' -totalMemory %s ' % (memory_totalmb)
if dbconfig_type == 'RAC':
if nodelist != None:
nodelist = ",".join(nodelist)
command += ' -nodelist %s ' % (nodelist)
if db_type != None:
command += ' -databaseType %s ' % (db_type)
if amm != None:
if major_version == '12.2':
if amm == True:
command += ' -memoryMgmtType AUTO '
else:
command += ' -memoryMgmtType AUTO_SGA '
elif major_version == '12.1':
command += ' -automaticMemoryManagement %s ' % (str(amm).lower())
elif major_version == '11.2':
if amm == True:
command += ' -automaticMemoryManagement '
if customscripts is not None:
scriptlist = ",".join(customscripts)
command += ' -customScripts %s ' % (scriptlist)
if db_unique_name != None:
initparam += 'db_name=%s,db_unique_name=%s,' % (db_name,db_unique_name)
if initparams != None:
paramslist = ",".join(initparams)
initparam += ' %s' % (paramslist)
if initparam != ' -initParams ' or paramslist != "":
command += initparam
# msg = "command: %s" % (command)
# module.fail_json(msg=msg, changed=False)
(rc, stdout, stderr) = module.run_command(command)
if rc != 0:
msg = 'Error - STDOUT: %s, STDERR: %s, COMMAND: %s' % (stdout, stderr, command)
module.fail_json(msg=msg, changed=False)
else:
if output == 'short':
return True
else:
verbosemsg = 'STDOUT: %s, COMMAND: %s' % (stdout, command)
verboselist.append(verbosemsg)
return True,verboselist
# module.exit_json(msg=verbosemsg, changed=True)
# elif rc == 0 and datapatch:
# if run_datapatch(module, msg, oracle_home, db_name, db_unique_name, sys_password):
# return True
# else:
# return True
# def run_datapatch(module, msg, oracle_home, db_name, db_unique_name, sys_password):
#
# cursor = getconn(module,msg)
# sid_sql = 'select instance_name from v$instance'
# sid_ = execute_sql_get(module,msg,cursor,sid_sql)
# os.environ['ORACLE_SID'] = sid_[0][0]
#
# if major_version > '11.2':
# command = '%s/OPatch/datapatch -verbose' % (oracle_home)
# (rc, stdout, stderr) = module.run_command(command)
# if rc != 0:
# msg = 'Error - STDOUT: %s, STDERR: %s, COMMAND: %s' % (stdout, stderr, command)
# module.fail_json(msg=msg, changed=False)
# else:
# return True
# else:
# datapatch_sql = '''
# connect / as sysdba
# @?/rdbms/admin/catbundle.sql psu apply
# exit
# '''
# sqlplus_bin = '%s/bin/sqlplus' % (oracle_home)
# p = subprocess.Popen([sqlplus_bin,'/nolog'],stdin=subprocess.PIPE,
# stdout=subprocess.PIPE,stderr=subprocess.PIPE)
# (stdout,stderr) = p.communicate(datapatch_sql.encode('utf-8'))
# rc = p.returncode
# if rc != 0:
# msg = 'Error - STDOUT: %s, STDERR: %s, COMMAND: %s' % (stdout, stderr, datapatch_sql)
# module.fail_json(msg=msg, changed=False)
# else:
# return True
def remove_db (module, msg, oracle_home, db_name, sid, db_unique_name, sys_password):
cursor = getconn(module,msg)
israc = ''
israc_sql = 'select parallel,instance_name,host_name from v$instance'
israc_ = execute_sql_get(module,msg,cursor,israc_sql)
remove_db = ''
if gimanaged:
if db_unique_name is not None:
remove_db = db_unique_name
elif sid is not None and israc_[0][0] == 'YES':
remove_db = db_name
elif sid is not None and israc_[0][0] == 'NO':
remove_db = sid
else:
remove_db = db_name
else:
if sid is not None:
remove_db = sid
else:
remove_db = db_name
command = "%s/bin/dbca -deleteDatabase -silent -sourceDB %s -sysDBAUserName sys -sysDBAPassword %s" % (oracle_home, remove_db, sys_password)
(rc, stdout, stderr) = module.run_command(command)
if rc != 0:
msg = 'Removal of database %s failed: %s' % (db_name, stdout)
module.fail_json(msg=msg, changed=False)
else:
if output == 'short':
return True
else:
msg = 'STDOUT: %s, COMMAND: %s' % (stdout, command)
module.exit_json(msg=msg, changed=True)
def ensure_db_state (module,msg,oracle_home,db_name,db_unique_name,sid, archivelog, force_logging,supplemental_logging,
flashback, default_tablespace_type,default_tablespace,default_temp_tablespace, timezone):
cursor = getconn(module,msg)
israc = ''
archcomp = ''
flcomp = ''
fbcomp = ''
slcomp = ''
alterdb_sql = 'alter database'
propsql = "select lower(property_value) from database_properties where property_name in ('DEFAULT_TBS_TYPE','DEFAULT_PERMANENT_TABLESPACE','DEFAULT_TEMP_TABLESPACE') order by 1"
tzsql = "select lower(property_value) from database_properties where property_name = 'DBTIMEZONE'"
curr_time_zone = execute_sql_get(module,msg,cursor,tzsql)
def_tbs_type,def_tbs,def_temp_tbs = execute_sql_get(module,msg,cursor,propsql)
israc_sql = 'select parallel,instance_name,host_name from v$instance'
israc_ = execute_sql_get(module,msg,cursor,israc_sql)
instance_name = israc_[0][1]
host_name = israc_[0][2]
change_restart_sql = []
change_db_sql = []
log_check_sql = 'select log_mode,force_logging, flashback_on from v$database'
log_check_ = execute_sql_get(module,msg,cursor,log_check_sql)
if major_version >= '19.0':
supp_log_check_sql = 'select SUPPLEMENTAL_LOG_DATA_MIN,SUPPLEMENTAL_LOG_DATA_PL,SUPPLEMENTAL_LOG_DATA_SR,SUPPLEMENTAL_LOG_DATA_PK,SUPPLEMENTAL_LOG_DATA_UI from v$database'
supp_log_check_ = execute_sql_get(module,msg,cursor,supp_log_check_sql)
if supp_log_check_[0][0] != slcomp:
if supplemental_logging == True:
slcomp = 'YES'
slsql = alterdb_sql + ' add supplemental log data'
else:
slcomp = 'NO'
slsql = alterdb_sql + ' drop supplemental log data'
change_db_sql.append(slsql)
if israc_[0][0] == 'NO':
israc = False
else:
israc = True
if archivelog == True:
archcomp = 'ARCHIVELOG'
archsql = alterdb_sql + ' archivelog'
else:
archcomp = 'NOARCHIVELOG'
archsql = alterdb_sql + ' noarchivelog'
if force_logging == True:
flcomp = 'YES'
flsql = alterdb_sql + ' force logging'
else:
flcomp = 'NO'
flsql = alterdb_sql + ' no force logging'
if flashback == True:
fbcomp = 'YES'
fbsql = alterdb_sql + ' flashback on'
else:
fbcomp = 'NO'
fbsql = alterdb_sql + ' flashback off'
if def_tbs_type[0] != default_tablespace_type:
deftbstypesql = 'alter database set default %s tablespace ' % (default_tablespace_type)
change_db_sql.append(deftbstypesql)
if default_tablespace is not None and def_tbs[0] != default_tablespace:
deftbssql = 'alter database default tablespace %s' % (default_tablespace)
change_db_sql.append(deftbssql)
if default_temp_tablespace is not None and def_temp_tbs[0] != default_temp_tablespace:
deftempsql = 'alter database default temporary tablespace %s' % (default_temp_tablespace)
change_db_sql.append(deftempsql)
if timezone is not None and curr_time_zone[0][0] != timezone:
deftzsql = 'alter database set time_zone = \'%s\'' % (timezone)
change_db_sql.append(deftzsql)
if log_check_[0][0] != archcomp:
change_restart_sql.append(archsql)
if log_check_[0][1] != flcomp:
change_db_sql.append(flsql)
if log_check_[0][2] != fbcomp:
change_db_sql.append(fbsql)
if len(change_db_sql) > 0 or len(change_restart_sql) > 0:
if log_check_[0][0] == 'ARCHIVELOG' and log_check_[0][2] == 'YES' and not archivelog and not flashback: # Flashback database needs to be turned off before archivelog is turned off
if len(change_db_sql) > 0: # <- Apply changes that does not require a restart
apply_norestart_changes(module,msg,change_db_sql)
if len(change_restart_sql) > 0: # <- Apply changes that requires a restart
apply_restart_changes(module,msg,oracle_home,db_name,db_unique_name,sid,instance_name,host_name,israc,archcomp,change_restart_sql)
else:
if len(change_restart_sql) > 0: # <- Apply changes that requires a restart
apply_restart_changes(module,msg,oracle_home,db_name,db_unique_name,sid,instance_name,host_name,israc,archcomp,change_restart_sql)
if len(change_db_sql) > 0: # <- Apply changes that does not require a restart
apply_norestart_changes(module,msg,change_db_sql)
msg = ('Database %s has been put in the intended state - Archivelog: %s, Force Logging: %s, Flashback: %s, Supplemental Logging: %s, Timezone: %s' %
(db_name, archivelog,force_logging,flashback,supplemental_logging, timezone))
module.exit_json(msg=msg, changed=True)
else:
if newdb:
msg = 'Database %s successfully created created (%s) ' % (db_name,archcomp)
if output == 'verbose':
msg += ' ,'.join(verboselist)
changed = True
else:
msg = ('Database %s already exists and is in the intended state - Archivelog: %s, Force Logging: %s, Flashback: %s, Supplemental Logging: %s, Timezone: %s' %
(db_name, archivelog,force_logging,flashback, supplemental_logging, timezone))
changed = False
module.exit_json(msg=msg, changed=changed)
def apply_restart_changes(module,msg,oracle_home,db_name,db_unique_name,sid,instance_name,host_name,israc,archcomp,change_restart_sql):
if stop_db(module,msg,oracle_home,db_name,db_unique_name,sid):
if start_instance(module,msg,oracle_home,db_name, db_unique_name,sid,'mount', instance_name, host_name,israc):
time.sleep(10) #<- To allow the DB to register with the listener
cursor = getconn(module,msg)
for sql in change_restart_sql:
execute_sql(module,msg,cursor,sql)
if stop_db(module,msg,oracle_home,db_name,db_unique_name,sid):
if start_db(module,msg,oracle_home,db_name,db_unique_name, sid):
if newdb:
msg = 'Database %s successfully created (%s) ' % (db_name, archcomp)
if output == 'verbose':
msg += ' ,'.join(verboselist)
changed = True
else:
msg = 'Database %s has been put in the intended state - (%s) ' % (db_name,archcomp)
if output == 'verbose':
msg += ' ,'.join(verboselist)
changed = True
def apply_norestart_changes(module,msg,change_db_sql):
cursor = getconn(module,msg)
for sql in change_db_sql:
execute_sql(module,msg,cursor,sql)
def stop_db (module, msg, oracle_home, db_name, db_unique_name, sid):
if gimanaged:
if db_unique_name is not None:
db_name = db_unique_name
command = '%s/bin/srvctl stop database -d %s -o immediate' % (oracle_home,db_name)
(rc, stdout, stderr) = module.run_command(command)
if rc != 0:
msg = 'Error - STDOUT: %s, STDERR: %s, COMMAND: %s' % (stdout, stderr, command)
module.fail_json(msg=msg, changed=False)
else:
return True
else:
if sid is not None:
os.environ['ORACLE_SID'] = sid
else:
os.environ['ORACLE_SID'] = db_name
shutdown_sql = '''
connect / as sysdba
shutdown immediate;
exit
'''
sqlplus_bin = '%s/bin/sqlplus' % (oracle_home)
p = subprocess.Popen([sqlplus_bin,'/nolog'],stdin=subprocess.PIPE,
stdout=subprocess.PIPE,stderr=subprocess.PIPE)
(stdout,stderr) = p.communicate(shutdown_sql.encode('utf-8'))
rc = p.returncode
if rc != 0:
msg = 'Error - STDOUT: %s, STDERR: %s, COMMAND: %s' % (stdout, stderr, shutdown_sql)
module.fail_json(msg=msg, changed=False)
else:
return True
def start_db (module,msg, oracle_home, db_name, db_unique_name, sid):
if gimanaged:
if db_unique_name is not None:
db_name = db_unique_name
command = '%s/bin/srvctl start database -d %s' % (oracle_home,db_name)
(rc, stdout, stderr) = module.run_command(command)
if rc != 0:
msg = 'Error - STDOUT: %s, STDERR: %s, COMMAND: %s' % (stdout, stderr, command)
module.fail_json(msg=msg, changed=False)
else:
return True
else:
if sid is not None:
os.environ['ORACLE_SID'] = sid
else:
os.environ['ORACLE_SID'] = db_name
startup_sql = '''
connect / as sysdba
startup;
exit
'''
sqlplus_bin = '%s/bin/sqlplus' % (oracle_home)
p = subprocess.Popen([sqlplus_bin,'/nolog'],stdin=subprocess.PIPE,
stdout=subprocess.PIPE,stderr=subprocess.PIPE)
(stdout,stderr) = p.communicate(startup_sql.encode('utf-8'))
rc = p.returncode
if rc != 0:
msg = 'Error - STDOUT: %s, STDERR: %s, COMMAND: %s' % (stdout, stderr, startup_sql)
module.fail_json(msg=msg, changed=False)
else:
return True
def start_instance (module,msg, oracle_home, db_name, db_unique_name,sid, open_mode, instance_name, host_name, israc):
if gimanaged:
if db_unique_name is not None:
db_name = db_unique_name
if israc:
command = '%s/bin/srvctl start instance -d %s -i %s' % (oracle_home, db_name, instance_name)
else:
command = '%s/bin/srvctl start database -d %s ' % (oracle_home, db_name)
if open_mode is not None:
command += ' -o %s ' % (open_mode)
(rc, stdout, stderr) = module.run_command(command)
if rc != 0:
msg = 'Error - STDOUT: %s, STDERR: %s, COMMAND: %s' % (stdout, stderr, command)
module.fail_json(msg=msg, changed=False)
else:
return True
else:
if sid is not None:
os.environ['ORACLE_SID'] = sid
else:
os.environ['ORACLE_SID'] = db_name
startup_sql = '''
connect / as sysdba
startup mount;
exit
'''
sqlplus_bin = '%s/bin/sqlplus' % (oracle_home)
p = subprocess.Popen([sqlplus_bin,'/nolog'],stdin=subprocess.PIPE,
stdout=subprocess.PIPE,stderr=subprocess.PIPE)
(stdout,stderr) = p.communicate(startup_sql.encode('utf-8'))
rc = p.returncode
if rc != 0:
msg = 'Error - STDOUT: %s, STDERR: %s, COMMAND: %s' % (stdout, stderr, shutdown_sql)
module.fail_json(msg=msg, changed=False)
else:
return True
def execute_sql_get(module, msg, cursor, sql):
try:
cursor.execute(sql)
result = (cursor.fetchall())
except cx_Oracle.DatabaseError as exc:
error, = exc.args
msg = 'Something went wrong while executing sql_get - %s sql: %s' % (error.message, sql)
module.fail_json(msg=msg, changed=False)
return False
return result
def execute_sql(module, msg, cursor, sql):
try:
cursor.execute(sql)
except cx_Oracle.DatabaseError as exc:
error, = exc.args
msg = 'Something went wrong while executing sql - %s sql: %s' % (error.message, sql)
module.fail_json(msg=msg, changed=False)
return False
return True
def getconn(module,msg):
hostname = os.uname()[1]
wallet_connect = '/@%s' % service_name
try:
if (not user and not password ): # If neither user or password is supplied, the use of an oracle wallet is assumed
connect = wallet_connect
conn = cx_Oracle.connect(wallet_connect, mode=cx_Oracle.SYSDBA)
elif (user and password ):
dsn = cx_Oracle.makedsn(host=hostname, port=port, service_name=service_name, )
connect = dsn
conn = cx_Oracle.connect(user, password, dsn, mode=cx_Oracle.SYSDBA)
elif (not(user) or not(password)):
module.fail_json(msg='Missing username or password for cx_Oracle')
except cx_Oracle.DatabaseError as exc:
error, = exc.args
msg = 'Could not connect to database - %s, connect descriptor: %s' % (error.message, connect)
module.fail_json(msg=msg, changed=False)
cursor = conn.cursor()
return cursor
def main():
msg = ['']
cursor = None
global gimanaged
global major_version
global user
global password
global service_name
global hostname
global port
global israc
global newdb
global output
global verbosemsg
global verboselist
global domain
verbosemsg = ''
verboselist = []
newdb = False
module = AnsibleModule(
argument_spec = dict(
oracle_home = dict(default=None, aliases = ['oh']),
db_name = dict(required=True, aliases = ['db','database_name','name']),
sid = dict(required=False),
db_unique_name = dict(required=False, aliases = ['dbunqn','unique_name']),
sys_password = dict(required=False, no_log=True, aliases = ['syspw','sysdbapassword','sysdbapw']),
system_password = dict(required=False, no_log=True, aliases = ['systempw']),
dbsnmp_password = dict(required=False, no_log=True, aliases = ['dbsnmppw']),
responsefile = dict(required=False),
template = dict(default='General_Purpose.dbc'),
cdb = dict(default=False, type='bool', aliases= ['container']),
local_undo = dict(default=True, type='bool'),
datafile_dest = dict(required=False, aliases= ['dfd']),
recoveryfile_dest = dict(required=False, aliases= ['rfd']),
storage_type = dict(default='FS', aliases= ['storage'],choices = ['FS','ASM']),
dbconfig_type = dict(default='SI',choices = ['SI','RAC','RACONENODE']),
db_type = dict(default='MULTIPURPOSE',choices = ['MULTIPURPOSE','DATA_WAREHOUSING','OLTP']),
racone_service = dict(required=False,aliases = ['ron_service']),
characterset = dict(default='AL32UTF8'),
memory_percentage = dict(required=False),
memory_totalmb = dict(default='1024'),
nodelist = dict(required=False, type='list'),
amm = dict(default=False, type='bool', aliases = ['automatic_memory_management']),
initparams = dict(required=False, type='list'),
customscripts = dict(required=False, type='list'),
default_tablespace_type = dict(default='smallfile',choices = ['smallfile','bigfile']),
default_tablespace = dict(required=False),
default_temp_tablespace = dict(required=False),
archivelog = dict(default=False, type='bool'),
force_logging = dict(default=False, type='bool'),
supplemental_logging = dict(default=False, type='bool'),
flashback = dict(default=False, type='bool'),
datapatch = dict(default=True, type='bool'),
domain = dict(required=False),
timezone = dict(required=False),
output = dict(default="short", choices = ["short","verbose"]),
state = dict(default="present", choices = ["present", "absent", "started"]),
hostname = dict(required=False, default = 'localhost', aliases = ['host']),
port = dict(required=False, default = 1521),
),
mutually_exclusive=[['memory_percentage', 'memory_totalmb']]
)
oracle_home = module.params["oracle_home"]
db_name = module.params["db_name"]
sid = module.params["sid"]
db_unique_name = module.params["db_unique_name"]
sys_password = module.params["sys_password"]
system_password = module.params["system_password"]
dbsnmp_password = module.params["dbsnmp_password"]
responsefile = module.params["responsefile"]
template = module.params["template"]
cdb = module.params["cdb"]
local_undo = module.params["local_undo"]
datafile_dest = module.params["datafile_dest"]
recoveryfile_dest = module.params["recoveryfile_dest"]
storage_type = module.params["storage_type"]
dbconfig_type = module.params["dbconfig_type"]
racone_service = module.params["racone_service"]
characterset = module.params["characterset"]
memory_percentage = module.params["memory_percentage"]
memory_totalmb = module.params["memory_totalmb"]
nodelist = module.params["nodelist"]
db_type = module.params["db_type"]
amm = module.params["amm"]
initparams = module.params["initparams"]
customscripts = module.params["customscripts"]
default_tablespace_type = module.params["default_tablespace_type"]
default_tablespace = module.params["default_tablespace"]
default_temp_tablespace = module.params["default_temp_tablespace"]
archivelog = module.params["archivelog"]
force_logging = module.params["force_logging"]
supplemental_logging = module.params["supplemental_logging"]
flashback = module.params["flashback"]
datapatch = module.params["datapatch"]
domain = module.params["domain"]
timezone = module.params["timezone"]
output = module.params["output"]
state = module.params["state"]
hostname = module.params["hostname"]
port = module.params["port"]
#ld_library_path = '%s/lib' % (oracle_home)
if oracle_home is not None:
os.environ['ORACLE_HOME'] = oracle_home.rstrip('/')
#os.environ['LD_LIBRARY_PATH'] = ld_library_path
elif 'ORACLE_HOME' in os.environ:
oracle_home = os.environ['ORACLE_HOME']
#ld_library_path = os.environ['LD_LIBRARY_PATH']
else:
msg = 'ORACLE_HOME variable not set. Please set it and re-run the command'
module.fail_json(msg=msg, changed=False)
if not cx_oracle_exists:
msg = "The cx_Oracle module is required. 'pip install cx_Oracle' should do the trick. If cx_Oracle is installed, make sure ORACLE_HOME & LD_LIBRARY_PATH is set"
module.fail_json(msg=msg)
# Decide whether to use srvctl or sqlplus
if os.path.exists('/etc/oracle/olr.loc'):
gimanaged = True
else:
gimanaged = False
if not cx_oracle_exists:
msg = "The cx_Oracle module is required. 'pip install cx_Oracle' should do the trick. If cx_Oracle is installed, make sure ORACLE_HOME & LD_LIBRARY_PATH is set"
module.fail_json(msg=msg)
# Connection details for database
user = 'sys'
password = sys_password
if db_unique_name is not None:
service_name = db_unique_name
else:
service_name = db_name
if domain is not None and domain != '':
service_name = "%s.%s" % (service_name,domain)
# Get the Oracle version
major_version = get_version(module,msg,oracle_home)
if state == 'started':
msg = "oracle_home: %s db_name: %s sid: %s db_unique_name: %s" % (oracle_home, db_name, sid, db_unique_name)
if not check_db_exists(module, msg, oracle_home,db_name, sid, db_unique_name):
msg = "Database not found. %s" % msg
module.fail_json(msg=msg, changed=False)
else:
if start_db (module, msg, oracle_home, db_name, db_unique_name, sid):
msg = "Database started."
module.exit_json(msg=msg, changed=True)
else:
msg = "Startup failed. %s" % msg
module.fail_json(msg=msg, changed=False)
elif state == 'present':
if not check_db_exists(module, msg, oracle_home,db_name, sid, db_unique_name):
if create_db(module, msg, oracle_home, sys_password, system_password, dbsnmp_password, db_name, sid, db_unique_name, responsefile, template, cdb, local_undo, datafile_dest, recoveryfile_dest,
storage_type, dbconfig_type, racone_service, characterset, memory_percentage, memory_totalmb, nodelist, db_type, amm, initparams, customscripts,datapatch, domain):
newdb = True
ensure_db_state (module,msg,oracle_home,db_name,db_unique_name,sid, archivelog, force_logging, supplemental_logging, flashback, default_tablespace_type,default_tablespace,default_temp_tablespace, timezone)
else:
module.fail_json(msg=msg, changed=False)
else:
ensure_db_state (module,msg,oracle_home,db_name,db_unique_name,sid, archivelog, force_logging, supplemental_logging, flashback, default_tablespace_type,default_tablespace,default_temp_tablespace, timezone)
# msg = 'Database %s already exists' % (db_name)
# module.exit_json(msg=msg, changed=False)
elif state == 'absent':
if check_db_exists(module, msg, oracle_home, db_name, sid, db_unique_name):
if remove_db(module, msg, oracle_home, db_name, sid, db_unique_name, sys_password):
msg = 'Successfully removed database %s' % (db_name)
module.exit_json(msg=msg, changed=True)
else:
module.fail_json(msg=msg, changed=False)
else:
msg = 'Database %s doesn\'t exist' % (db_name)
module.exit_json(msg=msg, changed=False)
module.exit_json(msg="Unhandled exit", changed=False)
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
|
[] |
[] |
[
"ORACLE_SID",
"ORACLE_HOME",
"LD_LIBRARY_PATH"
] |
[]
|
["ORACLE_SID", "ORACLE_HOME", "LD_LIBRARY_PATH"]
|
python
| 3 | 0 | |
core/deployment/src/main/java/io/quarkus/deployment/pkg/steps/NativeImageBuildStep.java
|
package io.quarkus.deployment.pkg.steps;
import java.io.BufferedReader;
import java.io.File;
import java.io.IOException;
import java.io.InputStream;
import java.io.InputStreamReader;
import java.nio.charset.StandardCharsets;
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Locale;
import java.util.Map;
import java.util.concurrent.CountDownLatch;
import org.jboss.logging.Logger;
import io.quarkus.bootstrap.util.IoUtils;
import io.quarkus.deployment.annotations.BuildStep;
import io.quarkus.deployment.builditem.substrate.SubstrateSystemPropertyBuildItem;
import io.quarkus.deployment.pkg.NativeConfig;
import io.quarkus.deployment.pkg.PackageConfig;
import io.quarkus.deployment.pkg.builditem.ArtifactResultBuildItem;
import io.quarkus.deployment.pkg.builditem.NativeImageBuildItem;
import io.quarkus.deployment.pkg.builditem.NativeImageSourceJarBuildItem;
import io.quarkus.deployment.pkg.builditem.OutputTargetBuildItem;
public class NativeImageBuildStep {
private static final Logger log = Logger.getLogger(NativeImageBuildStep.class);
private static final String DEBUG_BUILD_PROCESS_PORT = "5005";
private static final String GRAALVM_HOME = "GRAALVM_HOME";
/**
* Name of the <em>system</em> property to retrieve JAVA_HOME
*/
private static final String JAVA_HOME_SYS = "java.home";
/**
* Name of the <em>environment</em> variable to retrieve JAVA_HOME
*/
private static final String JAVA_HOME_ENV = "JAVA_HOME";
private static final boolean IS_LINUX = System.getProperty("os.name").toLowerCase(Locale.ROOT).contains("linux");
private static final boolean IS_WINDOWS = System.getProperty("os.name").toLowerCase(Locale.ROOT).contains("windows");
/**
* The name of the environment variable containing the system path.
*/
private static final String PATH = "PATH";
@BuildStep(onlyIf = NativeBuild.class)
ArtifactResultBuildItem result(NativeImageBuildItem image) {
return new ArtifactResultBuildItem(image.getPath(), PackageConfig.NATIVE, Collections.emptyMap());
}
@BuildStep
public NativeImageBuildItem build(NativeConfig nativeConfig, NativeImageSourceJarBuildItem nativeImageSourceJarBuildItem,
OutputTargetBuildItem outputTargetBuildItem,
PackageConfig packageConfig,
List<SubstrateSystemPropertyBuildItem> substrateProperties) {
Path runnerJar = nativeImageSourceJarBuildItem.getPath();
log.info("Building native image from " + runnerJar);
Path outputDir = nativeImageSourceJarBuildItem.getPath().getParent();
final String runnerJarName = runnerJar.getFileName().toString();
boolean vmVersionOutOfDate = isThisGraalVMVersionObsolete();
HashMap<String, String> env = new HashMap<>(System.getenv());
List<String> nativeImage;
String noPIE = "";
if (!"".equals(nativeConfig.containerRuntime) || nativeConfig.containerBuild) {
String containerRuntime = nativeConfig.containerRuntime.isEmpty() ? "docker" : nativeConfig.containerRuntime;
// E.g. "/usr/bin/docker run -v {{PROJECT_DIR}}:/project --rm quarkus/graalvm-native-image"
nativeImage = new ArrayList<>();
Collections.addAll(nativeImage, containerRuntime, "run", "-v",
outputDir.toAbsolutePath() + ":/project:z");
if (IS_LINUX) {
if ("docker".equals(containerRuntime)) {
String uid = getLinuxID("-ur");
String gid = getLinuxID("-gr");
if (uid != null & gid != null & !"".equals(uid) & !"".equals(gid)) {
Collections.addAll(nativeImage, "--user", uid + ":" + gid);
}
} else if ("podman".equals(containerRuntime)) {
// Needed to avoid AccessDeniedExceptions
nativeImage.add("--userns=keep-id");
}
}
nativeImage.addAll(nativeConfig.containerRuntimeOptions);
if (nativeConfig.debugBuildProcess && nativeConfig.publishDebugBuildProcessPort) {
// publish the debug port onto the host if asked for
nativeImage.add("--publish=" + DEBUG_BUILD_PROCESS_PORT + ":" + DEBUG_BUILD_PROCESS_PORT);
}
Collections.addAll(nativeImage, "--rm", nativeConfig.builderImage);
} else {
if (IS_LINUX) {
noPIE = detectNoPIE();
}
String graal = nativeConfig.graalvmHome;
File java = nativeConfig.javaHome;
if (graal != null) {
env.put(GRAALVM_HOME, graal);
} else {
graal = env.get(GRAALVM_HOME);
}
if (java == null) {
// try system property first - it will be the JAVA_HOME used by the current JVM
String home = System.getProperty(JAVA_HOME_SYS);
if (home == null) {
// No luck, somewhat a odd JVM not enforcing this property
// try with the JAVA_HOME environment variable
home = env.get(JAVA_HOME_ENV);
}
if (home != null) {
java = new File(home);
}
}
nativeImage = Collections.singletonList(getNativeImageExecutable(graal, java, env).getAbsolutePath());
}
try {
List<String> command = new ArrayList<>(nativeImage);
if (nativeConfig.cleanupServer) {
List<String> cleanup = new ArrayList<>(nativeImage);
cleanup.add("--server-shutdown");
ProcessBuilder pb = new ProcessBuilder(cleanup.toArray(new String[0]));
pb.directory(outputDir.toFile());
pb.redirectInput(ProcessBuilder.Redirect.INHERIT);
pb.redirectOutput(ProcessBuilder.Redirect.INHERIT);
pb.redirectError(ProcessBuilder.Redirect.INHERIT);
Process process = pb.start();
process.waitFor();
}
Boolean enableSslNative = false;
for (SubstrateSystemPropertyBuildItem prop : substrateProperties) {
//todo: this should be specific build items
if (prop.getKey().equals("quarkus.ssl.native") && prop.getValue() != null) {
enableSslNative = Boolean.parseBoolean(prop.getValue());
} else if (prop.getKey().equals("quarkus.jni.enable") && prop.getValue() != null) {
nativeConfig.enableJni |= Boolean.parseBoolean(prop.getValue());
} else if (prop.getKey().equals("quarkus.native.enable-all-security-services") && prop.getValue() != null) {
nativeConfig.enableAllSecurityServices |= Boolean.parseBoolean(prop.getValue());
} else if (prop.getKey().equals("quarkus.native.enable-all-charsets") && prop.getValue() != null) {
nativeConfig.addAllCharsets |= Boolean.parseBoolean(prop.getValue());
} else {
// todo maybe just -D is better than -J-D in this case
if (prop.getValue() == null) {
command.add("-J-D" + prop.getKey());
} else {
command.add("-J-D" + prop.getKey() + "=" + prop.getValue());
}
}
}
if (enableSslNative) {
nativeConfig.enableHttpsUrlHandler = true;
nativeConfig.enableJni = true;
nativeConfig.enableAllSecurityServices = true;
}
if (nativeConfig.additionalBuildArgs != null) {
command.addAll(nativeConfig.additionalBuildArgs);
}
command.add("--initialize-at-build-time=");
command.add("-H:InitialCollectionPolicy=com.oracle.svm.core.genscavenge.CollectionPolicy$BySpaceAndTime"); //the default collection policy results in full GC's 50% of the time
command.add("-jar");
command.add(runnerJarName);
//https://github.com/oracle/graal/issues/660
command.add("-J-Djava.util.concurrent.ForkJoinPool.common.parallelism=1");
if (nativeConfig.enableFallbackImages) {
command.add("-H:FallbackThreshold=5");
} else {
//Default: be strict as those fallback images aren't very useful
//and tend to cover up real problems.
command.add("-H:FallbackThreshold=0");
}
if (nativeConfig.reportErrorsAtRuntime) {
command.add("-H:+ReportUnsupportedElementsAtRuntime");
}
if (nativeConfig.reportExceptionStackTraces) {
command.add("-H:+ReportExceptionStackTraces");
}
if (nativeConfig.debugSymbols) {
command.add("-g");
}
if (nativeConfig.debugBuildProcess) {
command.add("-J-Xrunjdwp:transport=dt_socket,address=" + DEBUG_BUILD_PROCESS_PORT + ",server=y,suspend=y");
}
if (nativeConfig.enableReports) {
command.add("-H:+PrintAnalysisCallTree");
}
if (nativeConfig.dumpProxies) {
command.add("-Dsun.misc.ProxyGenerator.saveGeneratedFiles=true");
if (nativeConfig.enableServer) {
log.warn(
"Options dumpProxies and enableServer are both enabled: this will get the proxies dumped in an unknown external working directory");
}
}
if (nativeConfig.nativeImageXmx.isPresent()) {
command.add("-J-Xmx" + nativeConfig.nativeImageXmx.get());
}
List<String> protocols = new ArrayList<>(2);
if (nativeConfig.enableHttpUrlHandler) {
protocols.add("http");
}
if (nativeConfig.enableHttpsUrlHandler) {
protocols.add("https");
}
if (nativeConfig.addAllCharsets) {
command.add("-H:+AddAllCharsets");
} else {
command.add("-H:-AddAllCharsets");
}
if (!protocols.isEmpty()) {
command.add("-H:EnableURLProtocols=" + String.join(",", protocols));
}
if (nativeConfig.enableAllSecurityServices) {
command.add("--enable-all-security-services");
}
if (!noPIE.isEmpty()) {
command.add("-H:NativeLinkerOption=" + noPIE);
}
if (nativeConfig.enableRetainedHeapReporting) {
command.add("-H:+PrintRetainedHeapHistogram");
}
if (nativeConfig.enableCodeSizeReporting) {
command.add("-H:+PrintCodeSizeReport");
}
if (!nativeConfig.enableIsolates) {
command.add("-H:-SpawnIsolates");
}
if (nativeConfig.enableJni) {
command.add("-H:+JNI");
} else {
command.add("-H:-JNI");
}
if (!nativeConfig.enableServer && !IS_WINDOWS) {
command.add("--no-server");
}
if (nativeConfig.enableVmInspection) {
command.add("-H:+AllowVMInspection");
}
if (nativeConfig.autoServiceLoaderRegistration) {
command.add("-H:+UseServiceLoaderFeature");
//When enabling, at least print what exactly is being added:
command.add("-H:+TraceServiceLoaderFeature");
} else {
command.add("-H:-UseServiceLoaderFeature");
}
if (nativeConfig.fullStackTraces) {
command.add("-H:+StackTrace");
} else {
command.add("-H:-StackTrace");
}
String executableName = outputTargetBuildItem.getBaseName() + packageConfig.runnerSuffix;
command.add(executableName);
log.info(String.join(" ", command));
CountDownLatch errorReportLatch = new CountDownLatch(1);
ProcessBuilder pb = new ProcessBuilder(command.toArray(new String[0]));
pb.directory(outputDir.toFile());
pb.redirectInput(ProcessBuilder.Redirect.INHERIT);
pb.redirectOutput(ProcessBuilder.Redirect.INHERIT);
Process process = pb.start();
new Thread(new ErrorReplacingProcessReader(process.getErrorStream(), outputDir.resolve("reports").toFile(),
errorReportLatch)).start();
errorReportLatch.await();
if (process.waitFor() != 0) {
throw new RuntimeException("Image generation failed");
}
Path generatedImage = outputDir.resolve(executableName);
IoUtils.copy(generatedImage,
outputTargetBuildItem.getOutputDirectory().resolve(executableName));
Files.delete(generatedImage);
String finalPath = outputTargetBuildItem.getBaseName();
System.setProperty("native.image.path", finalPath);
return new NativeImageBuildItem(Paths.get(finalPath));
} catch (Exception e) {
throw new RuntimeException("Failed to build native image", e);
}
}
//FIXME remove after transition period
private boolean isThisGraalVMVersionObsolete() {
final String vmName = System.getProperty("java.vm.name");
log.info("Running Quarkus native-image plugin on " + vmName);
final List<String> obsoleteGraalVmVersions = Arrays.asList("1.0.0", "19.0.", "19.1.");
final boolean vmVersionIsObsolete = obsoleteGraalVmVersions.stream().anyMatch(vmName::contains);
if (vmVersionIsObsolete) {
log.error("Out of date build of GraalVM detected! Please upgrade to GraalVM 19.2.0.");
return true;
}
return false;
}
private static File getNativeImageExecutable(String graalVmHome, File javaHome, Map<String, String> env) {
String imageName = IS_WINDOWS ? "native-image.cmd" : "native-image";
if (graalVmHome != null) {
File file = Paths.get(graalVmHome, "bin", imageName).toFile();
if (file.exists()) {
return file;
}
}
if (javaHome != null) {
File file = new File(javaHome, "bin/" + imageName);
if (file.exists()) {
return file;
}
}
// System path
String systemPath = env.get(PATH);
if (systemPath != null) {
String[] pathDirs = systemPath.split(File.pathSeparator);
for (String pathDir : pathDirs) {
File dir = new File(pathDir);
if (dir.isDirectory()) {
File file = new File(dir, imageName);
if (file.exists()) {
return file;
}
}
}
}
throw new RuntimeException("Cannot find the `" + imageName + "` in the GRAALVM_HOME, JAVA_HOME and System " +
"PATH. Install it using `gu install native-image`");
}
private static String getLinuxID(String option) {
Process process;
try {
StringBuilder responseBuilder = new StringBuilder();
String line;
ProcessBuilder idPB = new ProcessBuilder().command("id", option);
idPB.redirectError(new File("/dev/null"));
idPB.redirectInput(new File("/dev/null"));
process = idPB.start();
try (InputStream inputStream = process.getInputStream()) {
try (BufferedReader reader = new BufferedReader(new InputStreamReader(inputStream, StandardCharsets.UTF_8))) {
while ((line = reader.readLine()) != null) {
responseBuilder.append(line);
}
safeWaitFor(process);
return responseBuilder.toString();
}
} catch (Throwable t) {
safeWaitFor(process);
throw t;
}
} catch (IOException e) { //from process.start()
//swallow and return null id
return null;
}
}
static void safeWaitFor(Process process) {
boolean intr = false;
try {
for (;;)
try {
process.waitFor();
return;
} catch (InterruptedException ex) {
intr = true;
}
} finally {
if (intr)
Thread.currentThread().interrupt();
}
}
private static String detectNoPIE() {
String argument = testGCCArgument("-no-pie");
return argument.length() == 0 ? testGCCArgument("-nopie") : argument;
}
private static String testGCCArgument(String argument) {
try {
Process gcc = new ProcessBuilder("cc", "-v", "-E", argument, "-").start();
gcc.getOutputStream().close();
if (gcc.waitFor() == 0) {
return argument;
}
} catch (IOException | InterruptedException e) {
// eat
}
return "";
}
}
|
[] |
[] |
[] |
[]
|
[]
|
java
| 0 | 0 | |
twitter_countryGeo/twitter-geo/etool/queue.py
|
#!/usr/bin/env python
import sys
import json
import re
import logging
import os
import os.path
import codecs
import time
import conf
import logs
import kqueue
log = logging.getLogger(__name__)
# constant to select bind() for attaching the socket
BIND = 1
# constant to select connect() for attaching the socket
CONNECT = 2
SERVICE = ""
INITTED = False
KCONNECTION = None
def init(args=None):
# init logger
# load/get the config
# eventually this needs a search path for the config
# should be env(QFU_CONFIG);./queue.conf;/etc/embers/queue.conf;tcp://localhost:3473
# use 3473 as the global control channel
global SERVICE, INITTED
cf = None
conf.init(args)
if args and args.service:
SERVICE = args.service
else:
SERVICE = os.environ.get('UPSTART_JOB', "")
INITTED = True
def connect(force_new=False):
global KCONNECTION
if force_new:
return kqueue.connect()
else:
if not KCONNECTION:
KCONNECTION = kqueue.connect()
return KCONNECTION
class JsonMarshal(object):
def __init__(self, encoding='utf8', **kw):
# raises an error if you get a bogus encoding
codecs.lookup(encoding)
self.encoding = encoding
self.remove_newline = kw.get('remove_newline', False)
def encode(self, obj):
msg = json.dumps(obj, encoding=self.encoding, ensure_ascii=False)
# U+0085(Next Line), U+2028(Line Separator), U+2029(Paragraph Separator)
if self.remove_newline:
msg = re.sub(ur'[\u0085\u2028\u2029\n\r\f\v]+', ur'\\n', msg)
#msg = re.sub(ur'[\u0085\u2028\u2029\n\r\f\v]+|\\n|\\r|\\f|\\v', '\\n', msg)
#msg = msg.replace("…", '')
if isinstance(msg, str):
msg = unicode(msg)
return msg
def decode(self, data):
return json.loads(data, encoding=self.encoding)
#def send(self, socket, data, flags=0):
# socket.send_unicode(data, encoding=self.encoding, flags=flags)
#def recv(self, socket, flags=0):
# b = socket.recv(flags=flags)
# return unicode(b, encoding=self.encoding, errors='replace')
class UnicodeMarshal(JsonMarshal):
def __init__(self, **kw):
super(UnicodeMarshal, self).__init__(**kw)
def encode(self, obj):
return unicode(obj)
def decode(self, data):
# exception if this is not decodeable (str, stream etc.)
return unicode(data)
# send and recv are handled in JsonMarshall
class RawMarshal(object):
def encode(self, obj):
return obj
def decode(self, obj):
return obj
#def send(self, socket, data, flags=0):
# if isinstance(data, unicode):
# socket.send_unicode(data, flags)
# else:
# socket.send(data, flags=flags)
#def recv(self, socket, flags=0):
# return socket.recv(flags=flags)
class StreamCaptureProbe(object):
def __init__(self, encoding='utf8', stream=sys.stdout):
self._s = codecs.getwriter(encoding)(stream)
self._s.flush() # make sure its good
def __call__(self, action, message):
if action == Queue.SENT:
self._s.write(message)
self._s.write('\n')
self._s.flush()
class QueueStatsProbe(object):
def __init__(self, interval_min=5):
self.interval = datetime.timedelta(minutes=interval_min)
self.start = datetime.datetime.now()
self.sent_bytes = 0
self.sent_msg = 0
self.recv_bytes = 0
self.recv_msg = 0
def __call__(self, action, message):
if action == Queue.SENT:
self.sent_bytes += len(message)
self.sent_msg += 1
if action == Queue.RECEIVED:
self.recv_bytes += len(message)
self.recv_msg += 1
# TODO - if delta past period report the stats
class Queue(object):
"""Docstring for Queue """
SENT = 1
RECEIVED = 2
def __init__(self, ename, mode, qname="", no_ack=True, capture=False,
remove_newline=False, marshal=None, force_new_connection=False):
"""@todo: to be defined
:param ename: @todo
:param mode: @todo
:param qname: @todo
:param no_ack: @todo
:param capture: @todo
:param remove_newline: @todo
"""
if not INITTED:
log.warn("QUEUE INIT Not called, calling")
init()
self._ename = ename
self._mode = mode
self._qname = qname
self._no_ack = no_ack
self._probes = [] # probes for tracing events
self._last_poll = None
self._marshal = marshal or JsonMarshal()
self.connection = connect(force_new_connection)
if not isinstance(self._ename, list):
self._ename = [self._ename]
exclusive = (SERVICE == "")
self._exchanges = [kqueue.Exchange(e[0], type="fanout", durable=False) for e in self._ename]
self._queues = [kqueue.Queue(e[1], ex, exclusive=exclusive)
for e, ex in zip(self._ename, self._exchanges)]
self._name = [e[0] for e in self._ename]
def open(self):
"""@todo: Docstring for open
:returns: @todo
"""
if not INITTED:
init()
if "r" in self._mode:
self._queue = kqueue.KReadQueue(self.connection,
self._queues,
no_ack=self._no_ack,
queue_declare=True)
elif "w" in self._mode:
self._queue = kqueue.KWriteQueue(self.connection,
self._queues[0],
exchange_declare=True)
def read(self):
"""Reads one message from the queue
:returns: @todo
"""
if self._last_poll is not None:
msg = self._last_poll
self._last_poll = None
else:
msg = self._queue.get(block=True)
msg = msg.payload
self.notify(Queue.RECEIVED, msg)
msg = self._marshal.decode(msg)
return msg
def read_without_polling(self):
"""Reads socket without first polling it, guaranteed block if no data
exists.
:returns: @todo
"""
return self.read()
def poll(self, timeout=None, flags=0):
if self._last_poll is not None:
return True
else:
try:
msg = self._queue.get(block=True, timeout=timeout)
except kqueue.Empty:
msg = None
self._last_poll = msg
return self._last_poll is not None
def write(self, data):
"""@todo: Docstring for write
:param data: @todo
:returns: @todo
"""
data = self._marshal.encode(data)
self._queue.put(data)
self.notify(Queue.SENT, data)
def get_name(self):
if not self._name:
return None
elif isinstance(self._name, basestring):
return self._name
else:
return ",".join(self._name)
# be an iterator
# http://docs.python.org/library/stdtypes.html#iterator-types
def __iter__(self):
return self
def next(self):
return self.read()
# support contextmanager
# see http://docs.python.org/library/stdtypes.html#context-manager-types
# with queue.open(...) as q: ...
def __enter__(self):
return self
def __exit__(self, ex_type, ex_val, ex_trace):
self.close()
# tell any open control channels we are exiting
return False
def close(self):
"""@todo: Docstring for close
:returns: @todo
"""
pass
# probes for tracing messages
# this is how you can do dumps of messages as they are read/written
# and stuff like collecting metrics on messages
def add_probe(self, probe):
assert hasattr(probe, '__call__'), "Object must be callable."
self._probes.append(probe)
def notify(self, action, msg):
for p in self._probes:
try:
p(action, json.dumps(msg))
except KeyboardInterrupt:
raise
except:
log.exception('Failed to notify probe.')
class StreamQueue(object):
"""
An object to make a stream (typically stdin or stdout)
conform to the Queue interface so we can write code that treats
them interchangeably.
"""
def __init__(self, stream,
mode='r',
name=None,
encoding='utf8',
marshal=JsonMarshal(),
end_of_record='\n',
**ignore):
assert stream, "Need to a stream to read or write to."
assert marshal, "Need a message marshaller to encode and decode messages."
self._marshal = marshal
self.end_of_record = end_of_record
if encoding:
if mode == 'w':
self._stream = codecs.getwriter(encoding)(stream, 'replace')
else: # default read
self._stream = codecs.getreader(encoding)(stream, 'replace')
else: # accept what they give you
self._stream = stream
if not name:
self._name = None
else:
self._name = name
def get_name(self):
if not self._name:
return None
elif isinstance(self._name, basestring):
return self._name
else:
l = len(self._name)
if l == 1:
return self._name[0]
elif l > 1:
sout = self._name[0]
for i in range(1, l):
sout = sout + "," + self._name[i]
return sout
else:
return None
def poll(self, timeout=None, flags=0): # zmq.POLLIN):
raise NotImplementedError
def read(self, flags=0):
"""Read the next item from the stream.
This deals with blank lines and EOF by passing
on the values from the stream's read(). Blanks lines
are a string with a newline (and maybe other whitespace)
and EOF is returned as ''. I.e. not s.read() => EOF.
"""
msg = self._stream.readline()
if msg.strip(): # skip empty lines
return self._marshal.decode(msg)
else: # pass it on - blank line is '\n', EOF is ''
return msg
def write(self, obj, flags=0):
if not obj:
return
msg = self._marshal.encode(obj).strip()
self._stream.write(msg)
self._stream.write(self.end_of_record)
def __iter__(self):
self._iter = self._stream.__iter__()
return self
def next(self):
if self._iter:
msg = self._iter.next()
if msg.strip(): # skip blank lines
return self._marshal.decode(msg)
else:
return msg
else:
raise Exception('No iterator initialized')
def close(self): # No action necessary. Stubbed so this class can follow the usage patterns of other I/O classes
return
def __enter__(self):
self._ctx = self._stream.__enter__()
return self._ctx
def __exit__(self, ex_type, ex_val, ex_trace):
if self._ctx:
return self._ctx.__exit__()
else:
return False
def resolve_address(qname, qtype="r", attach=None):
"""
Resolve qname into a queue specification,
either from embers.conf or by treating it as a
fully qualified name if it is not in the conf.
Minimal check on form of fully qualified name.
The attach parameter overrides the default attachment type
(BIND or CONNECT) for queues doing special connections.
"""
#(host, port) = conf.get_queue_info(qname)
if qtype in ("w", ): # (zmq.PUB, zmq.REP):
result = (qname, "")
elif qtype in ("r", ):
result = (qname, SERVICE)
else:
assert False, "Invalid type, Queue no longer supports zmq"
return result
def get_conf_entry(qname):
"""
Return the entire JSON expression for a given qname.
"""
return conf.get_conf_entry(qname)
def open(name, mode='r', capture=False, service=None, exclusive=None, **kw):
"""
Open a queue with file-like semantics. E.g.:
q = open('sample-1', 'w') - publish
q = open('sample-1', 'r') - subscribe
options:
name - a queue name, either a full ZMQ-style URL or a name found in queue.conf
mode - the queue open more. One of r (SUB), w (PUB), r+ (REP), w+ (REQ).
marshal - class to use to marshal messages, default JsonMarshal
capture - capture and log messages as they are sent. Can be True, or a stream, or a Capture instance.
"""
# this is somewhat goofy, but once you have
# a metaphor you might as well run it into the ground
assert mode in {"r", "w"}, 'Mode %s is not a valid mode. Use one of r, w'
typ = mode
service = service or SERVICE
# special case '-' -> use stdin or stdout
if isinstance(name, list) and '-' in name or name == '-':
if mode in ('w', ):
s = sys.stdout
name = 'stdout'
else:
s = sys.stdin
name = 'stdin'
log.info('Reading from stdin' if name == 'stdin' else 'Writing to stdout')
return StreamQueue(s, name=name, mode=mode, **kw)
# normal queue case
if typ in ("w", ):
if not name:
name = conf.get_default_queue_names(service, 'out')
log.info('Writing to %s' % name)
else:
if not name:
name = conf.get_default_queue_names(service, 'in')
log.info('Reading from %s' % name)
if isinstance(name, basestring):
addr = [resolve_address(name,
qtype=typ,
attach=kw.get('attach', None))]
else:
addr = [resolve_address(n,
qtype=typ,
attach=kw.get('attach', None))
for n in name]
if "qname" in kw:
qname = kw["qname"]
addr = [(e[0], qname) for e in addr]
result = Queue(addr, typ, **kw)
assert addr, "Could not resolve an address from %s." % (name,)
result.open()
if capture:
result.add_probe(StreamCaptureProbe())
return result
def main():
"""
A little utility to handle reading and writing streams
to and from a queue.
--pub <queue> : publish what's read from stdin to <queue>
--sub <queue> : read from <queue> and write the messages to stdout
--cat : when used with --pub, write all published messages to stdout
--clean : check in incoming and outgoing messages.
Verify the message is correct JSON and add
an embersId if needed.
--log_file : Path to write the log file to
--log_level : Logging level
Other standard EMBERS options (e.g. --verbose).
"""
import args
import message
global log
ap = args.get_parser()
ap.add_argument('--clean', action="store_true",
help='Verify message format and add standard fields such as embersId.')
ap.add_argument('--addfeed', action="store_true", help='Add feed and feedPath fields to published message.')
ap.add_argument('--cat', action="store_true", help='Write all published messages to stdout.')
ap.add_argument('--rm', nargs="+", help="delete queue")
arg = ap.parse_args()
log = logs.getLogger(log_name=arg.log_file)
logs.init(arg, l=arg.log_level, logfile=arg.log_file)
init(arg)
if arg.rm and not arg.sub:
for queue in arg.rm:
print "Deleting", queue,
queue = kqueue.Queue(queue)
queue.maybe_bind(connect())
queue.delete()
print "."
return
try:
# need to use the raw/utf handler unless we are doing clean
marshal = UnicodeMarshal()
if arg.clean or arg.addfeed:
marshal = JsonMarshal()
if arg.sub is None and os.environ.get('UPSTART_JOB') is None:
arg.sub = '-' # stdin
subq = open(arg.sub, 'r') #, marshal=marshal, ssh_key=arg.ssh_key, ssh_conn=arg.tunnel)
if arg.pub is None and os.environ.get('UPSTART_JOB') is None:
arg.pub = '-' # stdout
pubq = open(arg.pub, 'w', capture=arg.cat, marshal=marshal)
except Exception as e:
log.exception("Exception opening queues: %s" % e)
# "Human-readable" queue name can be retrieved as
#
# sname = subq.get_name()
# pname = pubq.get_name()
rc = 0
try:
it = subq.__iter__()
while True:
m = ''
try:
m = it.next()
if arg.clean:
m = message.clean(m)
if m:
if arg.addfeed:
m = message.add_embers_ids(m, feed=pubq.get_name(), feedPath=pubq.get_name())
pubq.write(m)
except StopIteration:
break
except KeyboardInterrupt:
break
except Exception as e:
rc += 1
if m:
log.exception('Could not process message %s: %s' % (m, e))
else:
log.exception('Unknown processing error %s' % e)
except KeyboardInterrupt:
pass
except Exception as e:
rc = 1
log.exception('Top level exception %s' % e)
return rc
if __name__ == '__main__':
sys.exit(main())
|
[] |
[] |
[
"UPSTART_JOB"
] |
[]
|
["UPSTART_JOB"]
|
python
| 1 | 0 | |
ServidorPython/python32_web/Lib/site-packages/prompt_toolkit/output/win32.py
|
from __future__ import unicode_literals
from ctypes import windll, byref, ArgumentError, c_char, c_long, c_ulong, c_uint, pointer
from ctypes.wintypes import DWORD
from prompt_toolkit.renderer import Output
from prompt_toolkit.styles import ANSI_COLOR_NAMES
from prompt_toolkit.utils import get_cwidth
from prompt_toolkit.win32_types import CONSOLE_SCREEN_BUFFER_INFO, STD_OUTPUT_HANDLE, STD_INPUT_HANDLE, COORD, SMALL_RECT
from .color_depth import ColorDepth
import os
import six
__all__ = [
'Win32Output',
]
def _coord_byval(coord):
"""
Turns a COORD object into a c_long.
This will cause it to be passed by value instead of by reference. (That is what I think at least.)
When running ``ptipython`` is run (only with IPython), we often got the following error::
Error in 'SetConsoleCursorPosition'.
ArgumentError("argument 2: <class 'TypeError'>: wrong type",)
argument 2: <class 'TypeError'>: wrong type
It was solved by turning ``COORD`` parameters into a ``c_long`` like this.
More info: http://msdn.microsoft.com/en-us/library/windows/desktop/ms686025(v=vs.85).aspx
"""
return c_long(coord.Y * 0x10000 | coord.X & 0xFFFF)
#: If True: write the output of the renderer also to the following file. This
#: is very useful for debugging. (e.g.: to see that we don't write more bytes
#: than required.)
_DEBUG_RENDER_OUTPUT = False
_DEBUG_RENDER_OUTPUT_FILENAME = r'prompt-toolkit-windows-output.log'
class NoConsoleScreenBufferError(Exception):
"""
Raised when the application is not running inside a Windows Console, but
the user tries to instantiate Win32Output.
"""
def __init__(self):
# Are we running in 'xterm' on Windows, like git-bash for instance?
xterm = 'xterm' in os.environ.get('TERM', '')
if xterm:
message = ('Found %s, while expecting a Windows console. '
'Maybe try to run this program using "winpty" '
'or run it in cmd.exe instead. Or otherwise, '
'in case of Cygwin, use the Python executable '
'that is compiled for Cygwin.' % os.environ['TERM'])
else:
message = 'No Windows console found. Are you running cmd.exe?'
super(NoConsoleScreenBufferError, self).__init__(message)
class Win32Output(Output):
"""
I/O abstraction for rendering to Windows consoles.
(cmd.exe and similar.)
"""
def __init__(self, stdout, use_complete_width=False):
self.use_complete_width = use_complete_width
self._buffer = []
self.stdout = stdout
self.hconsole = windll.kernel32.GetStdHandle(STD_OUTPUT_HANDLE)
self._in_alternate_screen = False
self._hidden = False
self.color_lookup_table = ColorLookupTable()
# Remember the default console colors.
info = self.get_win32_screen_buffer_info()
self.default_attrs = info.wAttributes if info else 15
if _DEBUG_RENDER_OUTPUT:
self.LOG = open(_DEBUG_RENDER_OUTPUT_FILENAME, 'ab')
def fileno(self):
" Return file descriptor. "
return self.stdout.fileno()
def encoding(self):
" Return encoding used for stdout. "
return self.stdout.encoding
def write(self, data):
if self._hidden:
data = ' ' * get_cwidth(data)
self._buffer.append(data)
def write_raw(self, data):
" For win32, there is no difference between write and write_raw. "
self.write(data)
def get_size(self):
from prompt_toolkit.layout.screen import Size
info = self.get_win32_screen_buffer_info()
# We take the width of the *visible* region as the size. Not the width
# of the complete screen buffer. (Unless use_complete_width has been
# set.)
if self.use_complete_width:
width = info.dwSize.X
else:
width = info.srWindow.Right - info.srWindow.Left
height = info.srWindow.Bottom - info.srWindow.Top + 1
# We avoid the right margin, windows will wrap otherwise.
maxwidth = info.dwSize.X - 1
width = min(maxwidth, width)
# Create `Size` object.
return Size(rows=height, columns=width)
def _winapi(self, func, *a, **kw):
"""
Flush and call win API function.
"""
self.flush()
if _DEBUG_RENDER_OUTPUT:
self.LOG.write(('%r' % func.__name__).encode('utf-8') + b'\n')
self.LOG.write(b' ' + ', '.join(['%r' % i for i in a]).encode('utf-8') + b'\n')
self.LOG.write(b' ' + ', '.join(['%r' % type(i) for i in a]).encode('utf-8') + b'\n')
self.LOG.flush()
try:
return func(*a, **kw)
except ArgumentError as e:
if _DEBUG_RENDER_OUTPUT:
self.LOG.write((' Error in %r %r %s\n' % (func.__name__, e, e)).encode('utf-8'))
def get_win32_screen_buffer_info(self):
"""
Return Screen buffer info.
"""
# NOTE: We don't call the `GetConsoleScreenBufferInfo` API through
# `self._winapi`. Doing so causes Python to crash on certain 64bit
# Python versions. (Reproduced with 64bit Python 2.7.6, on Windows
# 10). It is not clear why. Possibly, it has to do with passing
# these objects as an argument, or through *args.
# The Python documentation contains the following - possibly related - warning:
# ctypes does not support passing unions or structures with
# bit-fields to functions by value. While this may work on 32-bit
# x86, it's not guaranteed by the library to work in the general
# case. Unions and structures with bit-fields should always be
# passed to functions by pointer.
# Also see:
# - https://github.com/ipython/ipython/issues/10070
# - https://github.com/jonathanslenders/python-prompt-toolkit/issues/406
# - https://github.com/jonathanslenders/python-prompt-toolkit/issues/86
self.flush()
sbinfo = CONSOLE_SCREEN_BUFFER_INFO()
success = windll.kernel32.GetConsoleScreenBufferInfo(self.hconsole, byref(sbinfo))
# success = self._winapi(windll.kernel32.GetConsoleScreenBufferInfo,
# self.hconsole, byref(sbinfo))
if success:
return sbinfo
else:
raise NoConsoleScreenBufferError
def set_title(self, title):
"""
Set terminal title.
"""
assert isinstance(title, six.text_type)
self._winapi(windll.kernel32.SetConsoleTitleW, title)
def clear_title(self):
self._winapi(windll.kernel32.SetConsoleTitleW, '')
def erase_screen(self):
start = COORD(0, 0)
sbinfo = self.get_win32_screen_buffer_info()
length = sbinfo.dwSize.X * sbinfo.dwSize.Y
self.cursor_goto(row=0, column=0)
self._erase(start, length)
def erase_down(self):
sbinfo = self.get_win32_screen_buffer_info()
size = sbinfo.dwSize
start = sbinfo.dwCursorPosition
length = ((size.X - size.X) + size.X * (size.Y - sbinfo.dwCursorPosition.Y))
self._erase(start, length)
def erase_end_of_line(self):
"""
"""
sbinfo = self.get_win32_screen_buffer_info()
start = sbinfo.dwCursorPosition
length = sbinfo.dwSize.X - sbinfo.dwCursorPosition.X
self._erase(start, length)
def _erase(self, start, length):
chars_written = c_ulong()
self._winapi(windll.kernel32.FillConsoleOutputCharacterA,
self.hconsole, c_char(b' '), DWORD(length), _coord_byval(start),
byref(chars_written))
# Reset attributes.
sbinfo = self.get_win32_screen_buffer_info()
self._winapi(windll.kernel32.FillConsoleOutputAttribute,
self.hconsole, sbinfo.wAttributes, length, _coord_byval(start),
byref(chars_written))
def reset_attributes(self):
" Reset the console foreground/background color. "
self._winapi(windll.kernel32.SetConsoleTextAttribute, self.hconsole,
self.default_attrs)
self._hidden = False
def set_attributes(self, attrs, color_depth):
fgcolor, bgcolor, bold, underline, italic, blink, reverse, self._hidden = attrs
# Start from the default attributes.
attrs = self.default_attrs
if color_depth != ColorDepth.DEPTH_1_BIT:
# Override the last four bits: foreground color.
if fgcolor:
attrs = attrs & ~0xf
attrs |= self.color_lookup_table.lookup_fg_color(fgcolor)
# Override the next four bits: background color.
if bgcolor:
attrs = attrs & ~0xf0
attrs |= self.color_lookup_table.lookup_bg_color(bgcolor)
# Reverse: swap these four bits groups.
if reverse:
attrs = (attrs & ~0xff) | ((attrs & 0xf) << 4) | ((attrs & 0xf0) >> 4)
self._winapi(windll.kernel32.SetConsoleTextAttribute, self.hconsole, attrs)
def disable_autowrap(self):
# Not supported by Windows.
pass
def enable_autowrap(self):
# Not supported by Windows.
pass
def cursor_goto(self, row=0, column=0):
pos = COORD(x=column, y=row)
self._winapi(windll.kernel32.SetConsoleCursorPosition, self.hconsole, _coord_byval(pos))
def cursor_up(self, amount):
sr = self.get_win32_screen_buffer_info().dwCursorPosition
pos = COORD(sr.X, sr.Y - amount)
self._winapi(windll.kernel32.SetConsoleCursorPosition, self.hconsole, _coord_byval(pos))
def cursor_down(self, amount):
self.cursor_up(-amount)
def cursor_forward(self, amount):
sr = self.get_win32_screen_buffer_info().dwCursorPosition
# assert sr.X + amount >= 0, 'Negative cursor position: x=%r amount=%r' % (sr.X, amount)
pos = COORD(max(0, sr.X + amount), sr.Y)
self._winapi(windll.kernel32.SetConsoleCursorPosition, self.hconsole, _coord_byval(pos))
def cursor_backward(self, amount):
self.cursor_forward(-amount)
def flush(self):
"""
Write to output stream and flush.
"""
if not self._buffer:
# Only flush stdout buffer. (It could be that Python still has
# something in its buffer. -- We want to be sure to print that in
# the correct color.)
self.stdout.flush()
return
data = ''.join(self._buffer)
if _DEBUG_RENDER_OUTPUT:
self.LOG.write(('%r' % data).encode('utf-8') + b'\n')
self.LOG.flush()
# Print characters one by one. This appears to be the best solution
# in oder to avoid traces of vertical lines when the completion
# menu disappears.
for b in data:
written = DWORD()
retval = windll.kernel32.WriteConsoleW(self.hconsole, b, 1, byref(written), None)
assert retval != 0
self._buffer = []
def get_rows_below_cursor_position(self):
info = self.get_win32_screen_buffer_info()
return info.srWindow.Bottom - info.dwCursorPosition.Y + 1
def scroll_buffer_to_prompt(self):
"""
To be called before drawing the prompt. This should scroll the console
to left, with the cursor at the bottom (if possible).
"""
# Get current window size
info = self.get_win32_screen_buffer_info()
sr = info.srWindow
cursor_pos = info.dwCursorPosition
result = SMALL_RECT()
# Scroll to the left.
result.Left = 0
result.Right = sr.Right - sr.Left
# Scroll vertical
win_height = sr.Bottom - sr.Top
if 0 < sr.Bottom - cursor_pos.Y < win_height - 1:
# no vertical scroll if cursor already on the screen
result.Bottom = sr.Bottom
else:
result.Bottom = max(win_height, cursor_pos.Y)
result.Top = result.Bottom - win_height
# Scroll API
self._winapi(windll.kernel32.SetConsoleWindowInfo, self.hconsole, True, byref(result))
def enter_alternate_screen(self):
"""
Go to alternate screen buffer.
"""
if not self._in_alternate_screen:
GENERIC_READ = 0x80000000
GENERIC_WRITE = 0x40000000
# Create a new console buffer and activate that one.
handle = self._winapi(windll.kernel32.CreateConsoleScreenBuffer, GENERIC_READ|GENERIC_WRITE,
DWORD(0), None, DWORD(1), None)
self._winapi(windll.kernel32.SetConsoleActiveScreenBuffer, handle)
self.hconsole = handle
self._in_alternate_screen = True
def quit_alternate_screen(self):
"""
Make stdout again the active buffer.
"""
if self._in_alternate_screen:
stdout = self._winapi(windll.kernel32.GetStdHandle, STD_OUTPUT_HANDLE)
self._winapi(windll.kernel32.SetConsoleActiveScreenBuffer, stdout)
self._winapi(windll.kernel32.CloseHandle, self.hconsole)
self.hconsole = stdout
self._in_alternate_screen = False
def enable_mouse_support(self):
ENABLE_MOUSE_INPUT = 0x10
handle = windll.kernel32.GetStdHandle(STD_INPUT_HANDLE)
original_mode = DWORD()
self._winapi(windll.kernel32.GetConsoleMode, handle, pointer(original_mode))
self._winapi(windll.kernel32.SetConsoleMode, handle, original_mode.value | ENABLE_MOUSE_INPUT)
def disable_mouse_support(self):
ENABLE_MOUSE_INPUT = 0x10
handle = windll.kernel32.GetStdHandle(STD_INPUT_HANDLE)
original_mode = DWORD()
self._winapi(windll.kernel32.GetConsoleMode, handle, pointer(original_mode))
self._winapi(windll.kernel32.SetConsoleMode, handle, original_mode.value & ~ ENABLE_MOUSE_INPUT)
def hide_cursor(self):
pass
def show_cursor(self):
pass
@classmethod
def win32_refresh_window(cls):
"""
Call win32 API to refresh the whole Window.
This is sometimes necessary when the application paints background
for completion menus. When the menu disappears, it leaves traces due
to a bug in the Windows Console. Sending a repaint request solves it.
"""
# Get console handle
handle = windll.kernel32.GetConsoleWindow()
RDW_INVALIDATE = 0x0001
windll.user32.RedrawWindow(handle, None, None, c_uint(RDW_INVALIDATE))
class FOREGROUND_COLOR:
BLACK = 0x0000
BLUE = 0x0001
GREEN = 0x0002
CYAN = 0x0003
RED = 0x0004
MAGENTA = 0x0005
YELLOW = 0x0006
GRAY = 0x0007
INTENSITY = 0x0008 # Foreground color is intensified.
class BACKGROUND_COLOR:
BLACK = 0x0000
BLUE = 0x0010
GREEN = 0x0020
CYAN = 0x0030
RED = 0x0040
MAGENTA = 0x0050
YELLOW = 0x0060
GRAY = 0x0070
INTENSITY = 0x0080 # Background color is intensified.
def _create_ansi_color_dict(color_cls):
" Create a table that maps the 16 named ansi colors to their Windows code. "
return {
'ansidefault': color_cls.BLACK,
'ansiblack': color_cls.BLACK,
'ansigray': color_cls.GRAY,
'ansibrightblack': color_cls.BLACK | color_cls.INTENSITY,
'ansiwhite': color_cls.GRAY | color_cls.INTENSITY,
# Low intensity.
'ansired': color_cls.RED,
'ansigreen': color_cls.GREEN,
'ansiyellow': color_cls.YELLOW,
'ansiblue': color_cls.BLUE,
'ansimagenta': color_cls.MAGENTA,
'ansicyan': color_cls.CYAN,
# High intensity.
'ansibrightred': color_cls.RED | color_cls.INTENSITY,
'ansibrightgreen': color_cls.GREEN | color_cls.INTENSITY,
'ansibrightyellow': color_cls.YELLOW | color_cls.INTENSITY,
'ansibrightblue': color_cls.BLUE | color_cls.INTENSITY,
'ansibrightmagenta': color_cls.MAGENTA | color_cls.INTENSITY,
'ansibrightcyan': color_cls.CYAN | color_cls.INTENSITY,
}
FG_ANSI_COLORS = _create_ansi_color_dict(FOREGROUND_COLOR)
BG_ANSI_COLORS = _create_ansi_color_dict(BACKGROUND_COLOR)
assert set(FG_ANSI_COLORS) == set(ANSI_COLOR_NAMES)
assert set(BG_ANSI_COLORS) == set(ANSI_COLOR_NAMES)
class ColorLookupTable(object):
"""
Inspired by pygments/formatters/terminal256.py
"""
def __init__(self):
self._win32_colors = self._build_color_table()
self.best_match = {} # Cache
@staticmethod
def _build_color_table():
"""
Build an RGB-to-256 color conversion table
"""
FG = FOREGROUND_COLOR
BG = BACKGROUND_COLOR
return [
(0x00, 0x00, 0x00, FG.BLACK, BG.BLACK),
(0x00, 0x00, 0xaa, FG.BLUE, BG.BLUE),
(0x00, 0xaa, 0x00, FG.GREEN, BG.GREEN),
(0x00, 0xaa, 0xaa, FG.CYAN, BG.CYAN),
(0xaa, 0x00, 0x00, FG.RED, BG.RED),
(0xaa, 0x00, 0xaa, FG.MAGENTA, BG.MAGENTA),
(0xaa, 0xaa, 0x00, FG.YELLOW, BG.YELLOW),
(0x88, 0x88, 0x88, FG.GRAY, BG.GRAY),
(0x44, 0x44, 0xff, FG.BLUE | FG.INTENSITY, BG.BLUE | BG.INTENSITY),
(0x44, 0xff, 0x44, FG.GREEN | FG.INTENSITY, BG.GREEN | BG.INTENSITY),
(0x44, 0xff, 0xff, FG.CYAN | FG.INTENSITY, BG.CYAN | BG.INTENSITY),
(0xff, 0x44, 0x44, FG.RED | FG.INTENSITY, BG.RED | BG.INTENSITY),
(0xff, 0x44, 0xff, FG.MAGENTA | FG.INTENSITY, BG.MAGENTA | BG.INTENSITY),
(0xff, 0xff, 0x44, FG.YELLOW | FG.INTENSITY, BG.YELLOW | BG.INTENSITY),
(0x44, 0x44, 0x44, FG.BLACK | FG.INTENSITY, BG.BLACK | BG.INTENSITY),
(0xff, 0xff, 0xff, FG.GRAY | FG.INTENSITY, BG.GRAY | BG.INTENSITY),
]
def _closest_color(self, r, g, b):
distance = 257 * 257 * 3 # "infinity" (>distance from #000000 to #ffffff)
fg_match = 0
bg_match = 0
for r_, g_, b_, fg_, bg_ in self._win32_colors:
rd = r - r_
gd = g - g_
bd = b - b_
d = rd * rd + gd * gd + bd * bd
if d < distance:
fg_match = fg_
bg_match = bg_
distance = d
return fg_match, bg_match
def _color_indexes(self, color):
indexes = self.best_match.get(color, None)
if indexes is None:
try:
rgb = int(str(color), 16)
except ValueError:
rgb = 0
r = (rgb >> 16) & 0xff
g = (rgb >> 8) & 0xff
b = rgb & 0xff
indexes = self._closest_color(r, g, b)
self.best_match[color] = indexes
return indexes
def lookup_fg_color(self, fg_color):
"""
Return the color for use in the
`windll.kernel32.SetConsoleTextAttribute` API call.
:param fg_color: Foreground as text. E.g. 'ffffff' or 'red'
"""
# Foreground.
if fg_color in FG_ANSI_COLORS:
return FG_ANSI_COLORS[fg_color]
else:
return self._color_indexes(fg_color)[0]
def lookup_bg_color(self, bg_color):
"""
Return the color for use in the
`windll.kernel32.SetConsoleTextAttribute` API call.
:param bg_color: Background as text. E.g. 'ffffff' or 'red'
"""
# Background.
if bg_color in BG_ANSI_COLORS:
return BG_ANSI_COLORS[bg_color]
else:
return self._color_indexes(bg_color)[1]
|
[] |
[] |
[
"TERM"
] |
[]
|
["TERM"]
|
python
| 1 | 0 | |
cmd/deployment-check/main.go
|
package main
import (
"context"
"os"
"os/signal"
"path/filepath"
"syscall"
"time"
kh "github.com/kuberhealthy/kuberhealthy/v2/pkg/checks/external/checkclient"
"github.com/kuberhealthy/kuberhealthy/v2/pkg/kubeClient"
log "github.com/sirupsen/logrus"
apiv1 "k8s.io/api/core/v1"
"k8s.io/client-go/kubernetes"
)
var (
// K8s config file for the client.
kubeConfigFile = filepath.Join(os.Getenv("HOME"), ".kube", "config")
// Image the check deploy. If configured to do a rolling-update, this is the first
// image used in the deployment.
checkImageURLEnv = os.Getenv("CHECK_IMAGE")
checkImageURL string
// Image the check will rolling-update to if configured to do a rolling update.
checkImageURLBEnv = os.Getenv("CHECK_IMAGE_ROLL_TO")
checkImageURLB string
// Deployment name that will be used for the check (in case an existing deployment uses a similar name).
checkDeploymentNameEnv = os.Getenv("CHECK_DEPLOYMENT_NAME")
checkDeploymentName string
// Service name that will be used for the check (in case an existing service uses a similar name).
checkServiceNameEnv = os.Getenv("CHECK_SERVICE_NAME")
checkServiceName string
// Container port that will be exposed for the deployment [default = 80] (for HTTP).
checkContainerPortEnv = os.Getenv("CHECK_CONTAINER_PORT")
checkContainerPort int32
// Load balancer port that will be exposed for the deployment [default = 80] (for HTTP).
checkLoadBalancerPortEnv = os.Getenv("CHECK_LOAD_BALANCER_PORT")
checkLoadBalancerPort int32
// Namespace the check deployment will be created in [default = kuberhealthy].
checkNamespaceEnv = os.Getenv("CHECK_NAMESPACE")
checkNamespace string
// Number of replicas the deployment will bring up [default = 2].
checkDeploymentReplicasEnv = os.Getenv("CHECK_DEPLOYMENT_REPLICAS")
checkDeploymentReplicas int
// Toleration values for the deployment check
checkDeploymentTolerationsEnv = os.Getenv("TOLERATIONS")
checkDeploymentTolerations []apiv1.Toleration
// Node selectors for the deployment check
checkDeploymentNodeSelectorsEnv = os.Getenv("NODE_SELECTOR")
checkDeploymentNodeSelectors = make(map[string]string)
// ServiceAccount that will deploy the test deployment [default = default]
checkServiceAccountEnv = os.Getenv("CHECK_SERVICE_ACCOUNT")
checkServiceAccount string
// Deployment pod resource requests and limits.
millicoreRequestEnv = os.Getenv("CHECK_POD_CPU_REQUEST")
millicoreRequest int
millicoreLimitEnv = os.Getenv("CHECK_POD_CPU_LIMIT")
millicoreLimit int
memoryRequestEnv = os.Getenv("CHECK_POD_MEM_REQUEST")
memoryRequest int
memoryLimitEnv = os.Getenv("CHECK_POD_MEM_LIMIT")
memoryLimit int
// Check time limit.
checkTimeLimit time.Duration
// Boolean value if a rolling-update is requested.
rollingUpdateEnv = os.Getenv("CHECK_DEPLOYMENT_ROLLING_UPDATE")
rollingUpdate bool
// Additional container environment variables if a custom image is used for the deployment.
additionalEnvVarsEnv = os.Getenv("ADDITIONAL_ENV_VARS")
additionalEnvVars = make(map[string]string)
// Seconds allowed for the shutdown process to complete.
shutdownGracePeriodEnv = os.Getenv("SHUTDOWN_GRACE_PERIOD")
shutdownGracePeriod time.Duration
// Time object used for the check.
now time.Time
ctx context.Context
ctxCancel context.CancelFunc
// Interrupt signal channels.
signalChan chan os.Signal
doneChan chan bool
debugEnv = os.Getenv("DEBUG")
debug bool
// K8s client used for the check.
client *kubernetes.Clientset
)
const (
// Default container name.
defaultCheckContainerName = "deployment-container"
// Default images used for check.
defaultCheckImageURL = "nginxinc/nginx-unprivileged:1.17.8"
defaultCheckImageURLB = "nginxinc/nginx-unprivileged:1.17.9"
// Default container port used for check.
defaultCheckContainerPort = int32(8080)
// Default load balancer port used for check.
defaultCheckLoadBalancerPort = int32(80)
// Default k8s manifest resource names.
defaultCheckDeploymentName = "deployment-deployment"
defaultCheckServiceName = "deployment-svc"
// Default k8s service account name.
defaultCheckServieAccount = "default"
// Default namespace for the check to run in.
defaultCheckNamespace = "kuberhealthy"
// Default number of replicas the deployment should bring up.
defaultCheckDeploymentReplicas = 2
defaultCheckTimeLimit = time.Duration(time.Minute * 15)
defaultShutdownGracePeriod = time.Duration(time.Second * 30) // grace period for the check to shutdown after receiving a shutdown signal
)
func init() {
// Parse incoming debug settings.
parseDebugSettings()
// Parse all incoming input environment variables and crash if an error occurs
// during parsing process.
parseInputValues()
// Allocate channels.
signalChan = make(chan os.Signal, 3)
doneChan = make(chan bool)
}
func main() {
// Create a timestamp reference for the deployment;
// also to reference against deployments that should be cleaned up.
now = time.Now()
log.Debugln("Allowing this check", checkTimeLimit, "to finish.")
ctx, ctxCancel = context.WithTimeout(context.Background(), checkTimeLimit)
// Create a kubernetes client.
var err error
client, err = kubeClient.Create(kubeConfigFile)
if err != nil {
errorMessage := "failed to create a kubernetes client with error: " + err.Error()
reportErrorsToKuberhealthy([]string{errorMessage})
return
}
log.Infoln("Kubernetes client created.")
// Start listening to interrupts.
go listenForInterrupts(ctx, ctxCancel)
// Catch panics.
var r interface{}
defer func() {
r = recover()
if r != nil {
log.Infoln("Recovered panic:", r)
reportToKuberhealthy(false, []string{r.(string)})
}
}()
// Start deployment check.
runDeploymentCheck(ctx)
}
// listenForInterrupts watches the signal and done channels for termination.
func listenForInterrupts(ctx context.Context, cancel context.CancelFunc) {
// Relay incoming OS interrupt signals to the signalChan.
signal.Notify(signalChan, os.Interrupt, os.Kill, syscall.SIGTERM, syscall.SIGINT)
sig := <-signalChan // This is a blocking operation -- the routine will stop here until there is something sent down the channel.
log.Infoln("Received an interrupt signal from the signal channel.")
log.Debugln("Signal received was:", sig.String())
log.Debugln("Cancelling context.")
cancel() // Causes all functions within the check to return without error and abort. NOT an error
// condition; this is a response to an external shutdown signal.
// Clean up pods here.
log.Infoln("Shutting down.")
select {
case sig = <-signalChan:
// If there is an interrupt signal, interrupt the run.
log.Warnln("Received a second interrupt signal from the signal channel.")
log.Debugln("Signal received was:", sig.String())
case err := <-cleanUpAndWait(ctx):
// If the clean up is complete, exit.
log.Infoln("Received a complete signal, clean up completed.")
if err != nil {
log.Errorln("failed to clean up check resources properly:", err.Error())
}
case <-time.After(time.Duration(shutdownGracePeriod)):
// Exit if the clean up took to long to provide a response.
log.Infoln("Clean up took too long to complete and timed out.")
}
os.Exit(0)
}
// cleanUpAndWait cleans up things and returns a signal down the returned channel when completed.
func cleanUpAndWait(ctx context.Context) chan error {
// Watch for the clean up process to complete.
doneChan := make(chan error)
go func() {
doneChan <- cleanUp(ctx)
}()
return doneChan
}
// reportErrorsToKuberhealthy reports the specified errors for this check run.
func reportErrorsToKuberhealthy(errs []string) {
log.Errorln("Reporting errors to Kuberhealthy:", errs)
reportToKuberhealthy(false, errs)
}
// reportOKToKuberhealthy reports that there were no errors on this check run to Kuberhealthy.
func reportOKToKuberhealthy() {
log.Infoln("Reporting success to Kuberhealthy.")
reportToKuberhealthy(true, []string{})
}
// reportToKuberhealthy reports the check status to Kuberhealthy.
func reportToKuberhealthy(ok bool, errs []string) {
var err error
if ok {
err = kh.ReportSuccess()
if err != nil {
log.Fatalln("error reporting to kuberhealthy:", err.Error())
}
return
}
err = kh.ReportFailure(errs)
if err != nil {
log.Fatalln("error reporting to kuberhealthy:", err.Error())
}
return
}
|
[
"\"HOME\"",
"\"CHECK_IMAGE\"",
"\"CHECK_IMAGE_ROLL_TO\"",
"\"CHECK_DEPLOYMENT_NAME\"",
"\"CHECK_SERVICE_NAME\"",
"\"CHECK_CONTAINER_PORT\"",
"\"CHECK_LOAD_BALANCER_PORT\"",
"\"CHECK_NAMESPACE\"",
"\"CHECK_DEPLOYMENT_REPLICAS\"",
"\"TOLERATIONS\"",
"\"NODE_SELECTOR\"",
"\"CHECK_SERVICE_ACCOUNT\"",
"\"CHECK_POD_CPU_REQUEST\"",
"\"CHECK_POD_CPU_LIMIT\"",
"\"CHECK_POD_MEM_REQUEST\"",
"\"CHECK_POD_MEM_LIMIT\"",
"\"CHECK_DEPLOYMENT_ROLLING_UPDATE\"",
"\"ADDITIONAL_ENV_VARS\"",
"\"SHUTDOWN_GRACE_PERIOD\"",
"\"DEBUG\""
] |
[] |
[
"SHUTDOWN_GRACE_PERIOD",
"CHECK_DEPLOYMENT_NAME",
"CHECK_POD_CPU_LIMIT",
"CHECK_NAMESPACE",
"CHECK_POD_MEM_REQUEST",
"CHECK_POD_MEM_LIMIT",
"CHECK_DEPLOYMENT_ROLLING_UPDATE",
"CHECK_SERVICE_NAME",
"CHECK_LOAD_BALANCER_PORT",
"CHECK_POD_CPU_REQUEST",
"CHECK_IMAGE",
"TOLERATIONS",
"ADDITIONAL_ENV_VARS",
"CHECK_DEPLOYMENT_REPLICAS",
"CHECK_CONTAINER_PORT",
"NODE_SELECTOR",
"CHECK_SERVICE_ACCOUNT",
"DEBUG",
"CHECK_IMAGE_ROLL_TO",
"HOME"
] |
[]
|
["SHUTDOWN_GRACE_PERIOD", "CHECK_DEPLOYMENT_NAME", "CHECK_POD_CPU_LIMIT", "CHECK_NAMESPACE", "CHECK_POD_MEM_REQUEST", "CHECK_POD_MEM_LIMIT", "CHECK_DEPLOYMENT_ROLLING_UPDATE", "CHECK_SERVICE_NAME", "CHECK_LOAD_BALANCER_PORT", "CHECK_POD_CPU_REQUEST", "CHECK_IMAGE", "TOLERATIONS", "ADDITIONAL_ENV_VARS", "CHECK_DEPLOYMENT_REPLICAS", "CHECK_CONTAINER_PORT", "NODE_SELECTOR", "CHECK_SERVICE_ACCOUNT", "DEBUG", "CHECK_IMAGE_ROLL_TO", "HOME"]
|
go
| 20 | 0 | |
bq-workers/gitlab-parser/main.py
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import base64
import os
import json
import shared
from flask import Flask, request
app = Flask(__name__)
@app.route("/", methods=["POST"])
def index():
"""
Receives messages from a push subscription from Pub/Sub.
Parses the message, and inserts it into BigQuery.
"""
event = None
envelope = request.get_json()
# Check that data has been posted
if not envelope:
raise Exception("Expecting JSON payload")
# Check that message is a valid pub/sub message
if "message" not in envelope:
raise Exception("Not a valid Pub/Sub Message")
msg = envelope["message"]
if "attributes" not in msg:
raise Exception("Missing pubsub attributes")
try:
attr = msg["attributes"]
# Header Event info
if "headers" in attr:
headers = json.loads(attr["headers"])
# Process Gitlab Events
if "X-Gitlab-Event" in headers:
event = process_gitlab_event(headers, msg)
shared.insert_row_into_bigquery(event)
except Exception as e:
entry = {
"severity": "WARNING",
"msg": "Data not saved to BigQuery",
"errors": str(e),
"json_payload": envelope
}
print(json.dumps(entry))
return "", 204
def process_gitlab_event(headers, msg):
# Unique hash for the event
signature = shared.create_unique_id(msg)
# source = "gitlab"
if "Mock" in headers:
source += "mock"
types = {"push", "merge_request",
"note", "tag_push", "issue",
"pipeline", "job", "deployment",
"build"}
metadata = json.loads(base64.b64decode(msg["data"]).decode("utf-8").strip())
source = metadata["repository"]["name"]
event_type = metadata["object_kind"]
if event_type not in types:
raise Exception("Unsupported Gitlab event: '%s'" % event_type)
if event_type in ("push", "tag_push"):
e_id = metadata["checkout_sha"]
for commit in metadata["commits"]:
if commit["id"] == e_id:
time_created = commit["timestamp"]
if event_type in ("merge_request", "note", "issue", "pipeline"):
event_object = metadata["object_attributes"]
e_id = event_object["id"]
time_created = (
event_object.get("updated_at") or
event_object.get("finished_at") or
event_object.get("created_at"))
if event_type in ("job"):
e_id = metadata["build_id"]
time_created = (
event_object.get("finished_at") or
event_object.get("started_at"))
if event_type in ("deployment"):
e_id = metadata["deployment_id"]
# Deployment timestamps come in a format like "2021-04-28 21:50:00 +0200"
# BigQuery does not accept this as a valid format
# Removing the extra timezone information below
time_created = metadata["status_changed_at"][:-6]
if event_type in ("build"):
e_id = metadata["build_id"]
time_created = (
metadata.get("build_finished_at") or
metadata.get("build_started_at") or
metadata.get("build_created_at"))
gitlab_event = {
"event_type": event_type,
"id": e_id,
"metadata": json.dumps(metadata),
# If time_created not supplied by event, default to pub/sub publishTime
"time_created": time_created or msg["publishTime"],
"signature": signature,
"msg_id": msg["message_id"],
"source": source,
}
return gitlab_event
if __name__ == "__main__":
PORT = int(os.getenv("PORT")) if os.getenv("PORT") else 8080
# This is used when running locally. Gunicorn is used to run the
# application on Cloud Run. See entrypoint in Dockerfile.
app.run(host="127.0.0.1", port=PORT, debug=True)
|
[] |
[] |
[
"PORT"
] |
[]
|
["PORT"]
|
python
| 1 | 0 | |
setup.py
|
# Copyright 2020 Google LLC
# Copyright 2021 Fraunhofer FKIE
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Setuptools for Atheris."""
import os
import shutil
import subprocess
import sys
import tempfile
import setuptools
from setuptools import Extension
from setuptools import setup
from setuptools.command.build_ext import build_ext
__version__ = os.getenv("ATHERIS_VERSION", "1.0.11")
if len(sys.argv) > 1 and sys.argv[1] == "print_version":
print(__version__)
quit()
clang_install_instructions = """download and build the latest version of Clang:
git clone https://github.com/llvm/llvm-project.git
cd llvm-project
mkdir build
cd build
cmake -DLLVM_ENABLE_PROJECTS='clang;compiler-rt' -G "Unix Makefiles" ../llvm
make -j 100 # This step is very slow
Then, set $CLANG_BIN="$(pwd)/bin/clang" and run pip again.
You should use this same Clang for building any Python extensions you plan to fuzz.
"""
too_old_error = """Your libFuzzer version is too old; set either $CLANG_BIN to point to a more recent Clang, or $LIBFUZZER_VERSION to point directly to a more recent libFuzzer .a file. If needed, """ + clang_install_instructions
no_libfuzzer_error = """Failed to find libFuzzer; set either $CLANG_BIN to point to your Clang binary, or $LIBFUZZER_LIB to point directly to your libFuzzer .a file. If needed, """ + clang_install_instructions
if sys.platform == "darwin":
too_old_error = ("Your libFuzzer version is too old.\nPlease" +
clang_install_instructions + "Do not use Apple "
"Clang; Apple Clang does not come with libFuzzer.")
no_libfuzzer_error = ("Failed to find libFuzzer; you may be building using "
"Apple Clang. Apple Clang does not come with "
"libFuzzer.\nPlease " + clang_install_instructions)
class PybindIncludeGetter(object):
"""Helper class to determine the pybind11 include path.
The purpose of this class is to postpone importing pybind11
until it is actually installed, so that the ``get_include()``
method can be invoked.
"""
def __str__(self):
import pybind11 # pylint: disable=g-import-not-at-top
return pybind11.get_include()
def check_libfuzzer_version(libfuzzer):
"""Verifies that the specified libFuzzer is of a sufficiently high version."""
current_path = os.path.dirname(os.path.realpath(__file__))
try:
version = subprocess.check_output(
[current_path + "/setup_utils/check_libfuzzer_version.sh", libfuzzer])
except subprocess.CalledProcessError as e:
sys.stderr.write("Failed to check libFuzzer version: %s" % e.stderr)
sys.stderr.write("Assuming libFuxzzer is up-to-date.")
return "up-to-date"
version = version.strip().decode("utf-8")
return version
def upgrade_libfuzzer(libfuzzer):
"""Hacky code for upgrading libFuzzer to be compatible with Atheris."""
current_path = os.path.dirname(os.path.realpath(__file__))
try:
new_libfuzzer = subprocess.check_output(
[current_path + "/setup_utils/upgrade_libfuzzer.sh", libfuzzer])
except subprocess.CalledProcessError as e:
sys.stderr.write("libFuzzer upgrade failed: %s" % e.stderr)
return libfuzzer
new_libfuzzer = new_libfuzzer.strip().decode("utf-8")
return new_libfuzzer
def get_libfuzzer_lib():
"""Returns path to the libFuzzer .a library."""
libfuzzer_lib = os.getenv("LIBFUZZER_LIB", "")
if libfuzzer_lib:
return libfuzzer_lib
current_path = os.path.dirname(os.path.realpath(__file__))
try:
libfuzzer = subprocess.check_output(
[current_path + "/setup_utils/find_libfuzzer.sh"])
except subprocess.CalledProcessError as e:
sys.stderr.write(no_libfuzzer_error + "\n")
raise RuntimeError(no_libfuzzer_error)
libfuzzer = libfuzzer.strip().decode("utf-8")
return libfuzzer
ext_modules = [
Extension(
"atheris.atheris",
sorted([
"atheris.cc",
"util.cc",
"fuzzed_data_provider.cc",
]),
include_dirs=[
# Path to pybind11 headers
PybindIncludeGetter(),
],
language="c++"),
Extension(
"atheris.core_with_libfuzzer",
sorted([
"core.cc",
"tracer.cc",
"util.cc",
]),
include_dirs=[
# Path to pybind11 headers
PybindIncludeGetter(),
],
language="c++"),
Extension(
"atheris.core_without_libfuzzer",
sorted([
"core.cc",
"tracer.cc",
"util.cc",
]),
include_dirs=[
# Path to pybind11 headers
PybindIncludeGetter(),
],
language="c++"),
]
# cf http://bugs.python.org/issue26689
def has_flag(compiler, flagname):
"""Return a boolean indicating whether a flag name."""
with tempfile.NamedTemporaryFile("w", suffix=".cpp", delete=False) as f:
f.write("int main (int argc, char **argv) { return 0; }")
fname = f.name
try:
compiler.compile([fname], extra_postargs=[flagname])
except setuptools.distutils.errors.CompileError:
return False
finally:
try:
os.remove(fname)
except OSError:
pass
return True
def cpp_flag(compiler):
"""Return the highest-supported -std=c++[11/14/17] compiler flag."""
if os.getenv("FORCE_MIN_VERSION"):
# Use for testing, to make sure Atheris supports C++11
flags = ["-std=c++11"]
elif os.getenv("FORCE_VERSION"):
flags = ["-std=c++" + os.getenv("FORCE_VERSION")]
else:
flags = [
#"-std=c++17", C++17 disabled unless explicitly requested, to work
# around https://github.com/pybind/pybind11/issues/1818
"-std=c++14",
"-std=c++11"]
for flag in flags:
if has_flag(compiler, flag):
return flag
raise RuntimeError("Unsupported compiler -- at least C++11 support "
"is needed!")
class BuildExt(build_ext):
"""A custom build extension for adding compiler-specific options."""
def build_extensions(self):
libfuzzer = get_libfuzzer_lib()
orig_libfuzzer = libfuzzer
orig_libfuzzer_name = os.path.basename(libfuzzer)
version = check_libfuzzer_version(libfuzzer)
if sys.platform == "darwin" and version != "up-to-date":
raise RuntimeError(too_old_error)
if version == "outdated-unrecoverable":
raise RuntimeError(too_old_error)
elif version == "outdated-recoverable":
sys.stderr.write("Your libFuzzer version is too old, but it's possible "
"to attempt an in-place upgrade. Trying that now.\n")
libfuzzer = upgrade_libfuzzer(libfuzzer)
if check_libfuzzer_version(libfuzzer) != "up-to-date":
sys.stderr.write("Upgrade failed.")
raise RuntimeError(too_old_error)
elif version != "up-to-date":
raise RuntimeError("Unexpected up-to-date status: " + version)
sys.stderr.write("Your libFuzzer is up-to-date.\n")
c_opts = []
l_opts = []
if sys.platform == "darwin":
darwin_opts = ["-stdlib=libc++", "-mmacosx-version-min=10.7"]
c_opts += darwin_opts
l_opts += darwin_opts
ct = self.compiler.compiler_type
if ct == "unix":
c_opts.append(cpp_flag(self.compiler))
for ext in self.extensions:
ext.define_macros = [("VERSION_INFO",
"'{}'".format(self.distribution.get_version())),
("ATHERIS_MODULE_NAME", ext.name.split(".")[1])]
ext.extra_compile_args = c_opts
if ext.name == "atheris.core_with_libfuzzer":
ext.extra_link_args = l_opts + [libfuzzer]
else:
ext.extra_link_args = l_opts
build_ext.build_extensions(self)
try:
self.deploy_file(libfuzzer, orig_libfuzzer_name)
except Exception as e:
sys.stderr.write(str(e))
sys.stderr.write("\n")
# Deploy versions of ASan and UBSan that have been merged with libFuzzer
asan_name = orig_libfuzzer.replace(".fuzzer_no_main-", ".asan-")
merged_asan_name = "asan_with_fuzzer.so"
self.merge_deploy_libfuzzer_sanitizer(
libfuzzer, asan_name, merged_asan_name,
"asan_preinit.cc.o asan_preinit.cpp.o")
ubsan_name = orig_libfuzzer.replace(".fuzzer_no_main-",
".ubsan_standalone-")
merged_ubsan_name = "ubsan_with_fuzzer.so"
self.merge_deploy_libfuzzer_sanitizer(
libfuzzer, ubsan_name, merged_ubsan_name,
"ubsan_init_standalone_preinit.cc.o ubsan_init_standalone_preinit.cpp.o"
)
ubsanxx_name = orig_libfuzzer.replace(".fuzzer_no_main-",
".ubsan_standalone_cxx-")
merged_ubsanxx_name = "ubsan_cxx_with_fuzzer.so"
self.merge_deploy_libfuzzer_sanitizer(
libfuzzer, ubsanxx_name, merged_ubsanxx_name,
"ubsan_init_standalone_preinit.cc.o ubsan_init_standalone_preinit.cpp.o"
)
def deploy_file(self, name, target_filename):
atheris = self.get_ext_fullpath("atheris")
dest_file = os.path.join(os.path.dirname(atheris), target_filename)
shutil.copy(name, dest_file)
def merge_libfuzzer_sanitizer(self, libfuzzer, sanitizer, strip_preinit):
"""Generate a .so that contains both libFuzzer and a sanitizer."""
current_path = os.path.dirname(os.path.realpath(__file__))
new_sanitizer = subprocess.check_output([
os.path.join(current_path, "setup_utils/merge_libfuzzer_sanitizer.sh"),
libfuzzer, sanitizer, strip_preinit
])
return new_sanitizer.strip().decode("utf-8")
def merge_deploy_libfuzzer_sanitizer(self, libfuzzer, lib_name,
merged_lib_name, preinit):
try:
merged_lib = self.merge_libfuzzer_sanitizer(libfuzzer, lib_name, preinit)
self.deploy_file(merged_lib, merged_lib_name)
except Exception as e:
sys.stderr.write(str(e))
sys.stderr.write("\n")
setup(
name="atheris",
version=__version__,
author="Ian Eldred Pudney",
author_email="[email protected]",
url="https://pypi.org/project/atheris/",
description="A coverage-guided fuzzer for Python and Python extensions.",
long_description=open("README.md", "r").read(),
long_description_content_type="text/markdown",
packages=["atheris"],
ext_modules=ext_modules,
setup_requires=["pybind11>=2.5.0"],
cmdclass={"build_ext": BuildExt},
zip_safe=False,
)
|
[] |
[] |
[
"LIBFUZZER_LIB",
"ATHERIS_VERSION",
"FORCE_VERSION",
"FORCE_MIN_VERSION"
] |
[]
|
["LIBFUZZER_LIB", "ATHERIS_VERSION", "FORCE_VERSION", "FORCE_MIN_VERSION"]
|
python
| 4 | 0 | |
questions/import/Import.go
|
package main
import (
"bytes"
"errors"
"fmt"
"net/http"
"os"
"strconv"
"strings"
"github.com/360EntSecGroup-Skylar/excelize"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/service/dynamodb"
"github.com/aws/aws-sdk-go/service/dynamodb/dynamodbattribute"
"github.com/aws/aws-sdk-go/service/s3"
"github.com/google/uuid"
"github.com/thoas/go-funk"
)
//GetQuestions get all questions for a survey in dynamodb
func GetQuestions(app App, surveyID string) ([]Question, error) {
params := &dynamodb.QueryInput{
KeyConditionExpression: aws.String("#survey_id = :survey_id AND #question_id >= :question_id"),
ExpressionAttributeNames: map[string]*string{
"#survey_id": aws.String("survey_id"),
"#question_id": aws.String("question_id"),
},
ExpressionAttributeValues: map[string]*dynamodb.AttributeValue{
":survey_id": &dynamodb.AttributeValue{
S: aws.String(surveyID),
},
":question_id": &dynamodb.AttributeValue{
S: aws.String("!"), //`!` used because it's a smaller character based on ASCII table
},
},
TableName: aws.String(os.Getenv("QUESTION_TABLE")),
}
questions := make([]Question, 0)
//query all questions for a given surveyID
if err := app.DynamoService.QueryPages(params, queryHandler(&questions)); err != nil {
return nil, err
}
return questions, nil
}
//queryHandler returns a handle for dynamodb.QueryPages in `GetQuestions`.
//it also populate *[]Question from the arguments based on the results.
func queryHandler(questions *[]Question) func(page *dynamodb.QueryOutput, lastPage bool) bool {
// Unmarshal the slice of dynamodb attribute values
// into a slice of Question structs
return func(page *dynamodb.QueryOutput, lastPage bool) bool {
var questionsInAPage []Question
err := dynamodbattribute.UnmarshalListOfMaps(page.Items, &questionsInAPage)
if err != nil {
fmt.Printf("\nCould not unmarshal AWS data: err = %v\n", err)
return true
}
//append the questions in this page to master array from arguments
*questions = append(*questions, questionsInAPage...)
//if lastPage is true, return false. Otherwise, return true.
//false => end. true => continue
return !lastPage
}
}
//Import questions to dynamodb table
func Import(request Request) error {
file, err := excelize.OpenReader(bytes.NewReader(request.Content))
if err != nil {
return err
}
for _, sheet := range file.GetSheetMap() {
importSingleSheet(request, file, sheet)
}
//save updated excel to s3.
return saveUpdatedExcel(request, file)
}
func saveUpdatedExcel(request Request, file *excelize.File) error {
var buffer bytes.Buffer
file.Write(&buffer)
_, err := request.App.S3Service.PutObject(&s3.PutObjectInput{
Bucket: aws.String(request.Bucket),
Key: aws.String("__processed/" + request.Key),
ACL: aws.String("private"),
Body: bytes.NewReader(buffer.Bytes()),
ContentLength: aws.Int64(int64(len(buffer.Bytes()))),
ContentType: aws.String(http.DetectContentType(buffer.Bytes())),
ContentDisposition: aws.String("attachment"),
ServerSideEncryption: aws.String("AES256"),
})
return err
}
//importSingleSheet process a single sheet from `Import`
func importSingleSheet(request Request, file *excelize.File, sheet string) error {
surveyID := strings.ToUpper(strings.TrimSpace(file.GetCellValue(sheet, "B1")))
oldQuestions := make([]Question, 0)
newQuestions := make([]Question, 0)
//get old questions from dynamodb
if len(surveyID) > 0 {
questionsOutput, err := GetQuestions(request.App, surveyID)
if err != nil {
return err
}
oldQuestions = questionsOutput
} else {
surveyID = uuid.New().String()
}
//get new questions from excel
rows := file.GetRows(sheet)
for rowIdx, row := range rows {
//ignore headers and unrelated data. Row should start at index 2 (3rd row)
if rowIdx < 2 {
continue
}
rowQuestion := Question{
SurveyID: surveyID,
}
//iterate columns in a row
for colIdx, colValue := range row {
value := strings.TrimSpace(colValue)
switch colIdx {
case 0:
rowQuestion.QuestionText = value
case 1:
rowQuestion.Scale = strings.ToUpper(value)
case 2:
rowQuestion.QuestionID = value
}
}
if len(strings.TrimSpace(rowQuestion.QuestionID)) <= 0 {
rowQuestion.QuestionID = uuid.New().String()
}
newQuestions = append(newQuestions, rowQuestion)
}
//write new question
if err := saveNewQuestions(request, newQuestions); err != nil {
return err
}
//identify deleted questions by comparing newQuestions and oldQuestions
questionsForDeletion := registerOldQuestionsForDeletion(oldQuestions, newQuestions)
if len(questionsForDeletion) > 0 {
if err := deleteUnusedQuestions(request, questionsForDeletion); err != nil {
return err
}
}
//update the excel sheet to have _ID in it.
return updateExcelSheet(request, newQuestions, surveyID, file, sheet)
}
func updateExcelSheet(request Request, newQuestions []Question, surveyID string, file *excelize.File, sheet string) error {
file.SetCellValue(sheet, "C2", "__ID[AUTO GENERATED.DO NOT EDIT.]")
questionStartingRowIndex := 3
for i, question := range newQuestions {
file.SetCellValue(sheet, "C"+strconv.Itoa(questionStartingRowIndex+i), question.QuestionID)
}
return nil
}
func deleteUnusedQuestions(request Request, questionsForDeletion []Question) error {
writeRequests := make([]*dynamodb.WriteRequest, 0)
for _, question := range questionsForDeletion {
writeRequest, err := dynamodbattribute.MarshalMap(Question{
SurveyID: question.SurveyID,
QuestionID: question.QuestionID,
})
if err != nil {
return err
}
writeRequests = append(writeRequests, &dynamodb.WriteRequest{
DeleteRequest: &dynamodb.DeleteRequest{
Key: writeRequest,
},
})
}
return batchWriteDynamoDB(request, writeRequests)
}
func batchWriteDynamoDB(request Request, writeRequests []*dynamodb.WriteRequest) error {
writeRequestsChunks := funk.Chunk(writeRequests, 25).([][]*dynamodb.WriteRequest)
for _, chunk := range writeRequestsChunks {
output, err := request.App.DynamoService.BatchWriteItem(&dynamodb.BatchWriteItemInput{
RequestItems: map[string][]*dynamodb.WriteRequest{
os.Getenv("QUESTION_TABLE"): chunk,
},
})
if err != nil {
return err
}
if len(output.UnprocessedItems) > 0 {
return errors.New("There's an unprocessed item in `saveNewQuestions`")
}
}
return nil
}
func saveNewQuestions(request Request, newQuestions []Question) error {
writeRequests := make([]*dynamodb.WriteRequest, 0)
for _, question := range newQuestions {
writeRequest, err := dynamodbattribute.MarshalMap(question)
if err != nil {
return err
}
writeRequests = append(writeRequests, &dynamodb.WriteRequest{
PutRequest: &dynamodb.PutRequest{
Item: writeRequest,
},
})
}
return batchWriteDynamoDB(request, funk.Shuffle(writeRequests).([]*dynamodb.WriteRequest))
}
//registerOldQuestionsForDeletion compare oldQuestions and newQuestions.
//Then give an array of questions which is a subset of oldQuestions.
//Every item in the returned array is not available in the newQuestions. Therefore, we should delete it.
func registerOldQuestionsForDeletion(oldQuestions []Question, newQuestions []Question) []Question {
questionsForDeletion := make([]Question, 0)
//early termination if no old questions exist
if len(oldQuestions) <= 0 {
return questionsForDeletion
}
//generate map of new question IDs to make comparison more efficient
newQuestionIDs := make(map[string]Question)
for _, question := range newQuestions {
newQuestionIDs[question.QuestionID] = question
}
//for each old question, check whether it exists in newQuestions by comparing it to the map we just created
for _, question := range oldQuestions {
//if the old question does not exist in the new questions, register the questions for deletion.
if _, ok := newQuestionIDs[question.QuestionID]; !ok {
questionsForDeletion = append(questionsForDeletion, question)
}
}
return questionsForDeletion
}
|
[
"\"QUESTION_TABLE\"",
"\"QUESTION_TABLE\""
] |
[] |
[
"QUESTION_TABLE"
] |
[]
|
["QUESTION_TABLE"]
|
go
| 1 | 0 | |
example/object/download.go
|
package main
import (
"context"
"net/http"
"net/url"
"os"
"fmt"
"github.com/tencentyun/cos-go-sdk-v5"
"github.com/tencentyun/cos-go-sdk-v5/debug"
)
func log_status(err error) {
if err == nil {
return
}
if cos.IsNotFoundError(err) {
// WARN
fmt.Println("WARN: Resource is not existed")
} else if e, ok := cos.IsCOSError(err); ok {
fmt.Printf("ERROR: Code: %v\n", e.Code)
fmt.Printf("ERROR: Message: %v\n", e.Message)
fmt.Printf("ERROR: Resource: %v\n", e.Resource)
fmt.Printf("ERROR: RequestId: %v\n", e.RequestID)
// ERROR
} else {
fmt.Printf("ERROR: %v\n", err)
// ERROR
}
}
func main() {
u, _ := url.Parse("https://test-1259654469.cos.ap-guangzhou.myqcloud.com")
b := &cos.BaseURL{BucketURL: u}
c := cos.NewClient(b, &http.Client{
Transport: &cos.AuthorizationTransport{
SecretID: os.Getenv("COS_SECRETID"),
SecretKey: os.Getenv("COS_SECRETKEY"),
Transport: &debug.DebugRequestTransport{
RequestHeader: false,
RequestBody: false,
ResponseHeader: false,
ResponseBody: false,
},
},
})
opt := &cos.MultiDownloadOptions{
ThreadPoolSize: 5,
}
resp, err := c.Object.Download(
context.Background(), "test", "./test1G", opt,
)
log_status(err)
fmt.Printf("done, %v\n", resp.Header)
}
|
[
"\"COS_SECRETID\"",
"\"COS_SECRETKEY\""
] |
[] |
[
"COS_SECRETKEY",
"COS_SECRETID"
] |
[]
|
["COS_SECRETKEY", "COS_SECRETID"]
|
go
| 2 | 0 | |
test/storage.go
|
package test
import (
"bytes"
"context"
"fmt"
"io/ioutil"
"os"
"path"
"runtime"
"testing"
// account "github.com/cloudfly/ecenter/pkg/account"
"github.com/cloudfly/ecenter/pkg/store"
"github.com/stretchr/testify/require"
)
var (
addr = "127.0.0.1:3306"
user = "root"
password = "123456"
testDBName = "alarmd_test"
)
func init() {
if v := os.Getenv("MYSQL_PASSWORD"); v != "" {
password = v
}
if v := os.Getenv("MYSQL_USER"); v != "" {
user = v
}
if v := os.Getenv("MYSQL_ADDR"); v != "" {
addr = v
}
if v := os.Getenv("MYSQL_DATABASE"); v != "" {
testDBName = v
}
}
func ParseSQLFile(t *testing.T) map[string]string {
data := make(map[string]string)
_, filename, _, _ := runtime.Caller(0)
content, err := ioutil.ReadFile(path.Dir(filename) + "/../database.sql")
require.NoError(t, err)
for _, statement := range bytes.Split(content, []byte("\n\n")) {
if statement := bytes.TrimSpace(statement); len(statement) > 0 {
fields := bytes.Fields(statement)
tableName := bytes.Trim(fields[2], "`")
data[string(tableName)] = string(statement)
}
}
return data
}
func InitMySQL(t *testing.T) *store.MySQL {
DestroyMySQL(t)
mysql, err := store.NewMySQL(addr, user, password, testDBName)
require.NoError(t, err)
db, err := mysql.Connect(context.Background())
require.NoError(t, err)
defer db.Close()
statements := ParseSQLFile(t)
for _, statement := range statements {
_, err = db.Exec(string(statement))
require.NoError(t, err)
}
/*
ctx := context.Background()
accounts, err := account.New(mysql)
require.NoError(t, err)
_, err = accounts.AddUser(ctx, "kuwu", "苦无", "test", account.UPAdmin)
_, err = accounts.AddUser(ctx, "linglong", "玲珑", "linglong", account.DefaultUserPermission)
require.NoError(t, err)
_, err = accounts.AddGroup(ctx, "app:grail", false, "", "kuwu")
require.NoError(t, err)
_, err = accounts.AddGroup(ctx, "app:grail:operator", false, "", "kuwu")
require.NoError(t, err)
*/
return mysql
}
func DestroyMySQL(t *testing.T) {
mysql, err := store.NewMySQL(addr, user, password, testDBName)
require.NoError(t, err)
db, err := mysql.Connect(context.Background())
require.NoError(t, err)
defer db.Close()
tables := ParseSQLFile(t)
for table := range tables {
_, err = db.Exec(fmt.Sprintf("DROP TABLE IF EXISTS `%s`", table))
require.NoError(t, err)
}
}
|
[
"\"MYSQL_PASSWORD\"",
"\"MYSQL_USER\"",
"\"MYSQL_ADDR\"",
"\"MYSQL_DATABASE\""
] |
[] |
[
"MYSQL_USER",
"MYSQL_ADDR",
"MYSQL_DATABASE",
"MYSQL_PASSWORD"
] |
[]
|
["MYSQL_USER", "MYSQL_ADDR", "MYSQL_DATABASE", "MYSQL_PASSWORD"]
|
go
| 4 | 0 | |
core/Route.go
|
package core
import (
"errors"
"fmt"
"github.com/dgrijalva/jwt-go"
"github.com/zhenorzz/goploy/model"
"github.com/zhenorzz/goploy/web"
"io/fs"
"io/ioutil"
"log"
"mime"
"net/http"
"net/url"
"os"
"strconv"
"strings"
)
// Goploy callback param
type Goploy struct {
UserInfo model.User
Namespace model.Namespace
Request *http.Request
ResponseWriter http.ResponseWriter
URLQuery url.Values
Body []byte
}
// 路由定义
type route struct {
pattern string // 正则表达式
method string // Method specifies the HTTP method (GET, POST, PUT, etc.).
roles []string //允许的角色
callback func(gp *Goploy) *Response //Controller函数
middlewares []func(gp *Goploy) error //中间件
}
// Router is route slice and global middlewares
type Router struct {
whiteList map[string]struct{}
routes []route
middlewares []func(gp *Goploy) error //中间件
}
// Start a router
func (rt *Router) Start() {
if os.Getenv("ENV") == "production" {
subFS, err := fs.Sub(web.Dist, "dist")
if err != nil {
log.Fatal(err)
}
http.Handle("/static/", http.FileServer(http.FS(subFS)))
http.Handle("/favicon.ico", http.FileServer(http.FS(subFS)))
}
http.Handle("/", rt)
}
func (rt *Router) RegisterWhiteList(whiteList map[string]struct{}) {
rt.whiteList = whiteList
}
// Add router
// pattern path
// callback where path should be handle
func (rt *Router) Add(pattern, method string, callback func(gp *Goploy) *Response, middleware ...func(gp *Goploy) error) *Router {
r := route{pattern: pattern, method: method, callback: callback}
for _, m := range middleware {
r.middlewares = append(r.middlewares, m)
}
rt.routes = append(rt.routes, r)
return rt
}
// Roles Add many permission to the route
func (rt *Router) Roles(role []string) *Router {
rt.routes[len(rt.routes)-1].roles = append(rt.routes[len(rt.routes)-1].roles, role...)
return rt
}
// Role Add permission to the route
func (rt *Router) Role(role string) *Router {
rt.routes[len(rt.routes)-1].roles = append(rt.routes[len(rt.routes)-1].roles, role)
return rt
}
// Middleware global Middleware handle function
func (rt *Router) Middleware(middleware func(gp *Goploy) error) {
rt.middlewares = append(rt.middlewares, middleware)
}
func (rt *Router) ServeHTTP(w http.ResponseWriter, r *http.Request) {
// If in production env, serve file in go server,
// else serve file in npm
if os.Getenv("ENV") == "production" {
if "/" == r.URL.Path {
r, err := web.Dist.Open("dist/index.html")
if err != nil {
log.Fatal(err)
}
defer r.Close()
contents, err := ioutil.ReadAll(r)
w.Header().Set("Content-Type", "text/html; charset=utf-8")
fmt.Fprint(w, string(contents))
return
}
}
gp, response := rt.checkLogin(w, r)
if response != nil {
response.JSON(w)
return
}
response = rt.doRequest(gp)
if response != nil {
response.JSON(w)
}
return
}
func (rt *Router) checkLogin(w http.ResponseWriter, r *http.Request) (*Goploy, *Response) {
var userInfo model.User
var namespace model.Namespace
if _, ok := rt.whiteList[r.URL.Path]; !ok {
// check token
goployTokenCookie, err := r.Cookie(LoginCookieName)
if err != nil {
return nil, &Response{Code: IllegalRequest, Message: "Illegal request"}
}
unParseToken := goployTokenCookie.Value
claims := jwt.MapClaims{}
token, err := jwt.ParseWithClaims(unParseToken, claims, func(token *jwt.Token) (interface{}, error) {
return []byte(os.Getenv("SIGN_KEY")), nil
})
if err != nil || !token.Valid {
return nil, &Response{Code: LoginExpired, Message: "Login expired"}
}
namespaceCookie, err := r.Cookie(NamespaceCookieName)
if err != nil {
return nil, &Response{Code: IllegalRequest, Message: "Illegal namespace"}
}
namespaceID, err := strconv.ParseInt(namespaceCookie.Value, 10, 64)
if err != nil {
return nil, &Response{Code: Deny, Message: "Invalid namespace"}
}
namespaceList, err := GetNamespace(int64(claims["id"].(float64)))
if err != nil {
return nil, &Response{Code: Deny, Message: "Get namespace list error"}
}
for _, ns := range namespaceList {
if ns.ID == namespaceID {
namespace = ns
}
}
if namespace == (model.Namespace{}) {
return nil, &Response{Code: Deny, Message: "Namespace no permission"}
}
userInfo, err = GetUserInfo(int64(claims["id"].(float64)))
if err != nil {
return nil, &Response{Code: Deny, Message: "Get user information error"}
}
goployTokenStr, err := model.User{ID: int64(claims["id"].(float64)), Name: claims["name"].(string)}.CreateToken()
if err == nil {
// update jwt time
cookie := http.Cookie{Name: LoginCookieName, Value: goployTokenStr, Path: "/", MaxAge: 86400, HttpOnly: true}
http.SetCookie(w, &cookie)
}
}
// save the body request data because ioutil.ReadAll will clear the requestBody
var body []byte
if hasContentType(r, "application/json") {
body, _ = ioutil.ReadAll(r.Body)
}
gp := &Goploy{
UserInfo: userInfo,
Namespace: namespace,
Request: r,
ResponseWriter: w,
URLQuery: r.URL.Query(),
Body: body,
}
return gp, nil
}
func (rt *Router) doRequest(gp *Goploy) *Response {
for _, middleware := range rt.middlewares {
err := middleware(gp)
if err != nil {
return &Response{Code: Error, Message: err.Error()}
}
}
for _, route := range rt.routes {
if route.pattern == gp.Request.URL.Path {
if route.method != gp.Request.Method {
return &Response{Code: Deny, Message: "Invalid request method"}
}
if err := route.hasRole(gp.Namespace.Role); err != nil {
return &Response{Code: Deny, Message: err.Error()}
}
for _, middleware := range route.middlewares {
if err := middleware(gp); err != nil {
return &Response{Code: Error, Message: err.Error()}
}
}
return route.callback(gp)
}
}
return &Response{Code: Deny, Message: "No such method"}
}
func (r *route) hasRole(namespaceRole string) error {
if len(r.roles) == 0 {
return nil
}
for _, role := range r.roles {
if role == namespaceRole {
return nil
}
}
return errors.New("no permission")
}
func hasContentType(r *http.Request, mimetype string) bool {
contentType := r.Header.Get("Content-type")
if contentType == "" {
return false
}
for _, v := range strings.Split(contentType, ",") {
t, _, err := mime.ParseMediaType(v)
if err != nil {
break
}
if t == mimetype {
return true
}
}
return false
}
|
[
"\"ENV\"",
"\"ENV\"",
"\"SIGN_KEY\""
] |
[] |
[
"ENV",
"SIGN_KEY"
] |
[]
|
["ENV", "SIGN_KEY"]
|
go
| 2 | 0 | |
src/main/java/application/Config.java
|
package application;
public class Config {
public static final int REFRESH_INTERVAL_MINUTES = Integer.valueOf(System.getenv("REFRESH_INTERVAL_MINUTES"));
public static final int QUICK_REFRESH_INTERVAL_SECONDS = Integer.valueOf(System.getenv("QUICK_REFRESH_INTERVAL_SECONDS"));
public static final double MARGIN_ADJUSTMENT = Double.valueOf(System.getenv().getOrDefault("MARGIN_ADJUSTMENT", "0.0"));
}
|
[
"\"REFRESH_INTERVAL_MINUTES\"",
"\"QUICK_REFRESH_INTERVAL_SECONDS\""
] |
[] |
[
"QUICK_REFRESH_INTERVAL_SECONDS",
"REFRESH_INTERVAL_MINUTES"
] |
[]
|
["QUICK_REFRESH_INTERVAL_SECONDS", "REFRESH_INTERVAL_MINUTES"]
|
java
| 2 | 0 | |
cache/blobs.go
|
package cache
import (
"context"
"fmt"
"io"
"os"
"strconv"
"github.com/containerd/containerd/content"
"github.com/containerd/containerd/diff"
"github.com/containerd/containerd/leases"
"github.com/containerd/containerd/mount"
"github.com/moby/buildkit/session"
"github.com/moby/buildkit/util/compression"
"github.com/moby/buildkit/util/flightcontrol"
"github.com/moby/buildkit/util/winlayers"
digest "github.com/opencontainers/go-digest"
imagespecidentity "github.com/opencontainers/image-spec/identity"
ocispecs "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"golang.org/x/sync/errgroup"
)
var g flightcontrol.Group
const containerdUncompressed = "containerd.io/uncompressed"
var ErrNoBlobs = errors.Errorf("no blobs for snapshot")
// computeBlobChain ensures every ref in a parent chain has an associated blob in the content store. If
// a blob is missing and createIfNeeded is true, then the blob will be created, otherwise ErrNoBlobs will
// be returned. Caller must hold a lease when calling this function.
// If forceCompression is specified but the blob of compressionType doesn't exist, this function creates it.
func (sr *immutableRef) computeBlobChain(ctx context.Context, createIfNeeded bool, compressionType compression.Type, forceCompression bool, s session.Group) error {
if _, ok := leases.FromContext(ctx); !ok {
return errors.Errorf("missing lease requirement for computeBlobChain")
}
if err := sr.finalizeLocked(ctx); err != nil {
return err
}
if isTypeWindows(sr) {
ctx = winlayers.UseWindowsLayerMode(ctx)
}
return computeBlobChain(ctx, sr, createIfNeeded, compressionType, forceCompression, s)
}
type compressor func(dest io.Writer, requiredMediaType string) (io.WriteCloser, error)
func computeBlobChain(ctx context.Context, sr *immutableRef, createIfNeeded bool, compressionType compression.Type, forceCompression bool, s session.Group) error {
eg, ctx := errgroup.WithContext(ctx)
if sr.parent != nil {
eg.Go(func() error {
return computeBlobChain(ctx, sr.parent, createIfNeeded, compressionType, forceCompression, s)
})
}
eg.Go(func() error {
_, err := g.Do(ctx, fmt.Sprintf("%s-%t", sr.ID(), createIfNeeded), func(ctx context.Context) (interface{}, error) {
if sr.getBlob() != "" {
return nil, nil
}
if !createIfNeeded {
return nil, errors.WithStack(ErrNoBlobs)
}
var mediaType string
var compressorFunc compressor
var finalize func(context.Context, content.Store) (map[string]string, error)
switch compressionType {
case compression.Uncompressed:
mediaType = ocispecs.MediaTypeImageLayer
case compression.Gzip:
mediaType = ocispecs.MediaTypeImageLayerGzip
case compression.EStargz:
compressorFunc, finalize = writeEStargz()
mediaType = ocispecs.MediaTypeImageLayerGzip
default:
return nil, errors.Errorf("unknown layer compression type: %q", compressionType)
}
var lower []mount.Mount
if sr.parent != nil {
m, err := sr.parent.Mount(ctx, true, s)
if err != nil {
return nil, err
}
var release func() error
lower, release, err = m.Mount()
if err != nil {
return nil, err
}
if release != nil {
defer release()
}
}
m, err := sr.Mount(ctx, true, s)
if err != nil {
return nil, err
}
upper, release, err := m.Mount()
if err != nil {
return nil, err
}
if release != nil {
defer release()
}
var desc ocispecs.Descriptor
// Determine differ and error/log handling according to the platform, envvar and the snapshotter.
var enableOverlay, fallback, logWarnOnErr bool
if forceOvlStr := os.Getenv("BUILDKIT_DEBUG_FORCE_OVERLAY_DIFF"); forceOvlStr != "" {
enableOverlay, err = strconv.ParseBool(forceOvlStr)
if err != nil {
return nil, errors.Wrapf(err, "invalid boolean in BUILDKIT_DEBUG_FORCE_OVERLAY_DIFF")
}
fallback = false // prohibit fallback on debug
} else if !isTypeWindows(sr) {
enableOverlay, fallback = true, true
switch sr.cm.ManagerOpt.Snapshotter.Name() {
case "overlayfs", "fuse-overlayfs", "stargz":
logWarnOnErr = true // snapshotter should support overlay diff. so print warn log on failure
}
}
if enableOverlay {
computed, ok, err := sr.tryComputeOverlayBlob(ctx, lower, upper, mediaType, sr.ID(), compressorFunc)
if !ok || err != nil {
if !fallback {
if !ok {
return nil, errors.Errorf("overlay mounts not detected (lower=%+v,upper=%+v)", lower, upper)
}
if err != nil {
return nil, errors.Wrapf(err, "failed to compute overlay diff")
}
}
if logWarnOnErr {
logrus.Warnf("failed to compute blob by overlay differ (ok=%v): %v", ok, err)
}
}
if ok {
desc = computed
}
}
if desc.Digest == "" {
desc, err = sr.cm.Differ.Compare(ctx, lower, upper,
diff.WithMediaType(mediaType),
diff.WithReference(sr.ID()),
diff.WithCompressor(compressorFunc),
)
if err != nil {
return nil, err
}
}
if desc.Annotations == nil {
desc.Annotations = map[string]string{}
}
if finalize != nil {
a, err := finalize(ctx, sr.cm.ContentStore)
if err != nil {
return nil, errors.Wrapf(err, "failed to finalize compression")
}
for k, v := range a {
desc.Annotations[k] = v
}
}
info, err := sr.cm.ContentStore.Info(ctx, desc.Digest)
if err != nil {
return nil, err
}
if diffID, ok := info.Labels[containerdUncompressed]; ok {
desc.Annotations[containerdUncompressed] = diffID
} else if mediaType == ocispecs.MediaTypeImageLayer {
desc.Annotations[containerdUncompressed] = desc.Digest.String()
} else {
return nil, errors.Errorf("unknown layer compression type")
}
if err := sr.setBlob(ctx, compressionType, desc); err != nil {
return nil, err
}
return nil, nil
})
if err != nil {
return err
}
if forceCompression {
if err := ensureCompression(ctx, sr, compressionType, s); err != nil {
return errors.Wrapf(err, "failed to ensure compression type of %q", compressionType)
}
}
return nil
})
if err := eg.Wait(); err != nil {
return err
}
return sr.setChains(ctx)
}
// setBlob associates a blob with the cache record.
// A lease must be held for the blob when calling this function
// Caller should call Info() for knowing what current values are actually set
func (sr *immutableRef) setBlob(ctx context.Context, compressionType compression.Type, desc ocispecs.Descriptor) error {
if _, ok := leases.FromContext(ctx); !ok {
return errors.Errorf("missing lease requirement for setBlob")
}
diffID, err := diffIDFromDescriptor(desc)
if err != nil {
return err
}
if _, err := sr.cm.ContentStore.Info(ctx, desc.Digest); err != nil {
return err
}
if compressionType == compression.UnknownCompression {
return errors.Errorf("unhandled layer media type: %q", desc.MediaType)
}
sr.mu.Lock()
defer sr.mu.Unlock()
if sr.getBlob() != "" {
return nil
}
if err := sr.finalize(ctx); err != nil {
return err
}
if err := sr.cm.LeaseManager.AddResource(ctx, leases.Lease{ID: sr.ID()}, leases.Resource{
ID: desc.Digest.String(),
Type: "content",
}); err != nil {
return err
}
sr.queueDiffID(diffID)
sr.queueBlob(desc.Digest)
sr.queueMediaType(desc.MediaType)
sr.queueBlobSize(desc.Size)
if err := sr.commitMetadata(); err != nil {
return err
}
if err := sr.addCompressionBlob(ctx, desc, compressionType); err != nil {
return err
}
return nil
}
func (sr *immutableRef) setChains(ctx context.Context) error {
if _, ok := leases.FromContext(ctx); !ok {
return errors.Errorf("missing lease requirement for setChains")
}
sr.mu.Lock()
defer sr.mu.Unlock()
if sr.getChainID() != "" {
return nil
}
var chainIDs []digest.Digest
var blobChainIDs []digest.Digest
if sr.parent != nil {
chainIDs = append(chainIDs, digest.Digest(sr.parent.getChainID()))
blobChainIDs = append(blobChainIDs, digest.Digest(sr.parent.getBlobChainID()))
}
diffID := digest.Digest(sr.getDiffID())
chainIDs = append(chainIDs, diffID)
blobChainIDs = append(blobChainIDs, imagespecidentity.ChainID([]digest.Digest{digest.Digest(sr.getBlob()), diffID}))
chainID := imagespecidentity.ChainID(chainIDs)
blobChainID := imagespecidentity.ChainID(blobChainIDs)
sr.queueChainID(chainID)
sr.queueBlobChainID(blobChainID)
if err := sr.commitMetadata(); err != nil {
return err
}
return nil
}
func isTypeWindows(sr *immutableRef) bool {
if sr.GetLayerType() == "windows" {
return true
}
if parent := sr.parent; parent != nil {
return isTypeWindows(parent)
}
return false
}
// ensureCompression ensures the specified ref has the blob of the specified compression Type.
func ensureCompression(ctx context.Context, ref *immutableRef, compressionType compression.Type, s session.Group) error {
_, err := g.Do(ctx, fmt.Sprintf("%s-%d", ref.ID(), compressionType), func(ctx context.Context) (interface{}, error) {
desc, err := ref.ociDesc(ctx, ref.descHandlers)
if err != nil {
return nil, err
}
// Resolve converters
layerConvertFunc, err := getConverter(desc, compressionType)
if err != nil {
return nil, err
} else if layerConvertFunc == nil {
if isLazy, err := ref.isLazy(ctx); err != nil {
return nil, err
} else if isLazy {
// This ref can be used as the specified compressionType. Keep it lazy.
return nil, nil
}
return nil, ref.addCompressionBlob(ctx, desc, compressionType)
}
// First, lookup local content store
if _, err := ref.getCompressionBlob(ctx, compressionType); err == nil {
return nil, nil // found the compression variant. no need to convert.
}
// Convert layer compression type
if err := (lazyRefProvider{
ref: ref,
desc: desc,
dh: ref.descHandlers[desc.Digest],
session: s,
}).Unlazy(ctx); err != nil {
return nil, err
}
newDesc, err := layerConvertFunc(ctx, ref.cm.ContentStore, desc)
if err != nil {
return nil, err
}
// Start to track converted layer
if err := ref.addCompressionBlob(ctx, *newDesc, compressionType); err != nil {
return nil, err
}
return nil, nil
})
return err
}
|
[
"\"BUILDKIT_DEBUG_FORCE_OVERLAY_DIFF\""
] |
[] |
[
"BUILDKIT_DEBUG_FORCE_OVERLAY_DIFF"
] |
[]
|
["BUILDKIT_DEBUG_FORCE_OVERLAY_DIFF"]
|
go
| 1 | 0 | |
vtam/utils/RunnerFilterPCRerror.py
|
import math
import multiprocessing
import os
import pandas
import pathlib
from vtam.utils.PathManager import PathManager
from vtam.utils.RunnerVSearch import RunnerVSearch
from vtam.utils.DataframeVariant import DataframeVariant
from vtam.utils.DataframeVariantReadCountLike import DataframeVariantReadCountLike
class RunnerFilterPCRerror(object):
"""Has attributes and methods to run_name the PCR error Filter"""
def __init__(
self,
variant_expected_df,
variant_unexpected_df,
variant_read_count_df):
"""
Initiates object for the PCR error filter
:param variant_expected_df: DataFrame (id, sequence) with expected variants
:param variant_unexpected_df: DataFrame (id, sequence) with unexpected variants
:param variant_read_count_df: DataFrame (run_id, marker_id, sample_id, replicate, variant_id, read_count)
"""
self.__variant_expected_df = variant_expected_df
self.__variant_unexpected_df = variant_unexpected_df
self.__variant_read_count_df = variant_read_count_df
self.__tmp_dir = os.path.join(PathManager.instance().get_tempdir(), self.__class__.__name__)
pathlib.Path(self.__tmp_dir).mkdir(parents=True, exist_ok=True)
def get_variant_read_count_delete_df(self, pcr_error_var_prop):
variant_unexpected_to_expected_ratio_df = self.get_variant_unexpected_to_expected_ratio_df()
# Initiates filter_output_df
filter_output_df = self.__variant_read_count_df.copy()
filter_output_df['filter_delete'] = False
for row in variant_unexpected_to_expected_ratio_df.itertuples():
if float(getattr(row, 'N_ij_unexpected_to_expected_ratio')
) < pcr_error_var_prop:
filter_output_df.loc[(filter_output_df['run_id'] == row.run_id)
& (filter_output_df['marker_id'] == row.marker_id)
& (filter_output_df['sample_id'] == row.sample_id)
& (filter_output_df['variant_id'] == row.variant_id_unexpected), 'filter_delete'] = True
return filter_output_df
def get_vsearch_alignement_df(self):
"""
This function runs vsearch to detect PCR errors (1 mism or gap) between the "db" and the "query" sets
Returns: Pandas DataFrame with output of vsearch and these columnts: query, target, alnlen, ids, mism, gaps
"""
# length of smallest sequence
length_min = min(
self.__variant_expected_df.sequence.apply(len).tolist() +
self.__variant_unexpected_df.sequence.apply(len).tolist())
# calcul identity
identity = math.floor((length_min - 1) / length_min * 100) / 100
#
###################################################################
# 5-1. Make a fasta_path file with all variants of the sample or replicate
###################################################################
variant_expected_fasta_path = os.path.join(
self.__tmp_dir, '{}.fasta'.format("variant_expected"))
variant_expected_df_utils_obj = DataframeVariant(
variant_df=self.__variant_expected_df)
variant_expected_df_utils_obj.to_fasta(
fasta_path=variant_expected_fasta_path)
variant_unexpected_fasta_path = os.path.join(
self.__tmp_dir, '{}.fasta'.format("variant_unexpected"))
variant_unexpected_df_utils_obj = DataframeVariant(
variant_df=self.__variant_unexpected_df)
variant_unexpected_df_utils_obj.to_fasta(
fasta_path=variant_unexpected_fasta_path)
#
# Create object and run_name vsearch
if os.getenv('VTAM_THREADS') is None:
num_threads = multiprocessing.cpu_count()
else:
num_threads = int(os.getenv('VTAM_THREADS'))
vsearch_pcr_error_tsv = os.path.join(
self.__tmp_dir, '{}.tsv'.format("vsearch_pcr_error"))
vsearch_parameters = {
'db': variant_expected_fasta_path,
'usearch_global': variant_unexpected_fasta_path,
'id': str(identity),
'maxrejects': 0,
'maxaccepts': 0,
'userout': vsearch_pcr_error_tsv,
'userfields': "query+target+alnlen+ids+mism+gaps",
'threads': num_threads,
}
vsearch_cluster = RunnerVSearch(parameters=vsearch_parameters)
vsearch_cluster.run()
column_names = [
'variant_id_unexpected',
'variant_id_expected',
'alnlen',
'ids',
'mism',
'gaps']
vsearch_alignement_df = pandas.read_csv(
vsearch_pcr_error_tsv, sep='\t', names=column_names)
return vsearch_alignement_df
def get_variant_unexpected_to_expected_ratio_df(self):
"""Creates a DF with these columns
['run_id', 'marker_id', 'sample_id', 'variant_id_expected', 'N_ij_expected', 'variant_id_unexpected',
'N_ij_unexpected', 'N_ij_unexpected_to_expected_ratio']
"""
############################################################################################
#
# Get variant pairs with 1 sequence difference (mism and/or gaps)
#
#############################################################################################
pcr_error_df = self.get_vsearch_alignement_df()
# Add up mismatch and gap
pcr_error_df[
'sum_mism_gaps'] = pcr_error_df.mism + pcr_error_df.gaps
pcr_error_df = pcr_error_df.loc[
pcr_error_df.sum_mism_gaps == 1, ['variant_id_expected', 'variant_id_unexpected']]
############################################################################################
#
# Append N_ij_expected and N_ij_unexpected
#
############################################################################################
variant_read_count_lfn_instance = DataframeVariantReadCountLike(self.__variant_read_count_df)
N_ij_df = variant_read_count_lfn_instance.get_N_ij_df()
pcr_error_df = pcr_error_df.merge(N_ij_df, left_on=['variant_id_expected'], right_on=['variant_id'])
pcr_error_df.rename(columns={'N_ij': 'N_ij_expected'}, inplace=True)
pcr_error_df.drop('variant_id', axis=1, inplace=True)
pcr_error_df = pcr_error_df.merge(N_ij_df, left_on=[
'run_id', 'marker_id', 'sample_id', 'variant_id_unexpected'], right_on=['run_id', 'marker_id', 'sample_id', 'variant_id'])
pcr_error_df.rename(columns={'N_ij': 'N_ij_unexpected'}, inplace=True)
pcr_error_df.drop('variant_id', axis=1, inplace=True)
############################################################################################
#
# Ratio variant_unexpected_to_expected_ratio_df
#
############################################################################################
# Add two column for the two expected ratio cases ratio 1 and ratio 2
pcr_error_df['N_ij_unexpected_to_expected_ratio'] = pcr_error_df['N_ij_unexpected'] \
/ pcr_error_df['N_ij_expected']
# reorder columns
pcr_error_df = pcr_error_df[[
'run_id', 'marker_id', 'sample_id', 'variant_id_expected', 'N_ij_expected',
'variant_id_unexpected', 'N_ij_unexpected', 'N_ij_unexpected_to_expected_ratio']]
return pcr_error_df
|
[] |
[] |
[
"VTAM_THREADS"
] |
[]
|
["VTAM_THREADS"]
|
python
| 1 | 0 | |
aggregator_extraction.py
|
# @ Copyright Inria, Ecole Polytechnique
# Shared under the MIT license https://opensource.org/licenses/mit-license.php
# This file contains all the functions that are used in the comparison/aggregation detection
# The main part of the code is the function find_aggregators, that will be used elsewhere in the code
# The other functions are auxiliary that are being used in the main one
### IMPORT
# Python libraries import
import nltk
from nltk.parse import CoreNLPParser
from nltk.parse.corenlp import CoreNLPDependencyParser
from nltk.tag.stanford import StanfordNERTagger
import os
# Utils import
from parsing_analysis import get_nodes, get_subtrees
from utils import catch_words, cut_after, get_index
# Generic analysis functions import
from area_extraction import find_areas
from time_extraction import find_time, date_figures
### PARSERS
#Generic path
path = os.getcwd()
#Java path (to be changed)
java_path = "C:/Program Files (x86)/Java/jre1.8.0_251/bin/java.exe"
os.environ['JAVAHOME'] = java_path
#Files of the NER
jar = os.path.join(path, "Stanford_NER/stanford-ner-4.0.0/stanford-ner.jar")
model = os.path.join(path, "Stanford_NER/stanford-ner-4.0.0/classifiers/english.muc.7class.distsim.crf.ser.gz")
#Loading the parsers
parser = CoreNLPParser(url='http://localhost:9000')
dep_parser = CoreNLPDependencyParser(url='http://localhost:9000')
ner_tagger = StanfordNERTagger(model, jar, encoding='utf8')
pos_tagger = CoreNLPParser(url='http://localhost:9000', tagtype='pos')
### FUNCTIONS
# The function cut_in_clause take the whole sentence (as a list of tokens named tok)
# and the comparative words that have been detected so far (comp_words)
# and try to cut the sentence into as many clauses as there are comparative words
# so that each clause contain one and only one comparison.
# The cuts have to be made at specific words (specified in cut_words)
cut_words = ["and", "or", "but", "while"]
def cut_in_clause(tok, comp_words,cut_words):
res = [] #store the list of clauses
s = [] #store the current clause
cuts = [] #store the cut_words found
c = False
#We read the sentence and look for a comparative word.
#Only once found, we look for a cut word and do the cut
#Then, we start a new clause, looking again for a comp word and then a cut word
for t in tok:
if t in comp_words:
c = True
if (c and t.lower() in cut_words):
cuts.append(t.lower())
res.append(s)
s = []
c = False
else:
s.append(t)
if (s != []):
res.append(s)
return (res,cuts)
# The function get_threshold take as input a comparative word (cp_word)
# And look if the word is associated with a numerical value (a threshold)
# To do that, we look at the contextual words around cp_word to find a number
# We also make sure that the number is not already tagged as a date (in date_figures)
# Finally, we check if the number is potentially linked with a unit multiplier
unit_m = {"hundred" : 100, "hundreds" : 100, "thousand" : 1000, "thousands" : 1000, "million" : 1000000, "millions" : 1000000, "billion" : 1000000000, "billions" : 1000000000,
"k" : 1000, 'm' : 1000000, "b" : 1000000000, "bn" : 1000000000, "bil" : 1000000000}
def get_threshold(tok,cp_word,date_figures):
parse = next(parser.parse(tok)) #First, we parse the whole clause
# And then we search the grammatical context of cp_word
# This is most of the time a Prepositional Phrase (PP), a Nominal Phrase (NP) or a Quantifier Phrase (NP)
pp = None
sub = parse.subtrees()
for s in sub:
if (s.label() == "PP" and s.leaves()[0] == cp_word):
pp = s
if pp == None:
pps = get_subtrees(parse, "PP")
for p in pps:
if cp_word in p.leaves():
pp = p
if pp == None:
nps = get_subtrees(parse, "NP")
for n in nps:
if cp_word in n.leaves():
pp = n
if pp == None:
qps = get_subtrees(parse, "QP")
for q in qps:
if cp_word in q.leaves():
pp = q
#If a context is found, we look for the first number appearing after cp_word and not being a date
if pp != None:
i = get_index(pp.leaves(),cp_word) #position of the comp word in the context
fig = get_nodes(pp, "CD") #list of all numbers appearing in the context
n = 0
for f in fig:
if (n==0 and get_index(pp.leaves(),f)>i and (f not in date_figures)):
n=f
#and if that number exists, we check if an unit multiplier is written just after
if n != 0:
k = get_index(tok, n) #position of the number in the clause
mult = 1
try:
mult = unit_m[tok[k+1].lower()]
except:
pass
return(float(n)*mult)
return None
# The function find_aggregators takes the parses of the sentence
# and try to find every comparison and aggregation in it.
# It also takes as input the type of return the user wants (list of countries or list of years)
# and the words in the sentence giving that information
def find_aggregators(parse,parse_d,returned,agg_words):
tok = parse.leaves()
ner = ner_tagger.tag(tok)
pos = pos_tagger.tag(tok)
dep = list(dep_parser.parse(tok))[0]
# We store the numbers in the sentence that are dates, as it is useful when looking for a threshold
figures = date_figures(ner, pos, dep)
# When a comparison or aggregation is in the sentence, the user normally wants a list of something
# But sometimes, there is not any words specifing the type of the list and so the return is set as a value by default
# Here, we set temporarly that return value to a list of countries
# Thus will be useful if a comparison/aggregation is found
# An example query for such a case would be "Highest GDPs in the world"
if returned == "Value":
returned = "Agr_Area"
## Comparative words
# Some comparative words are "threshold-only" and do not require a construction with "than"
th_words = ["over", "under", "below", "above"]
th_inf = ["under", "below"]
# We detect these words
th_ = catch_words(tok,th_words)
th = []
# And just make sure that a threshold is linked to each one (as these words can appear is other contexts)
for t in th_:
if get_threshold(tok, t, figures) != None:
th.append(t)
# The other comparative words (that we will name comp words) require a structure with "than"
# Some of them have to be specified (like "superior") but most of them are recognizied easily
# thanks to specific tags for comparison in the POS tags
cp_words = ["superior", "inferior"]
cp_inf = ["less", "lower", "inferior", "poorer"]
comp_ = get_nodes(parse, "RBR") + get_nodes(parse, "JJR") + catch_words(tok, cp_words)
comp = []
# Then, we only keep the comparative words followed by a "than"
# And we also reorder the words at the same time, adding the threshold words in the common list
k = 0 #determines if a comp word has already been found (used when a "than" is found)
cp = "" #current comp word
for t in tok:
if t in comp_:
if k == 0:
k=1
cp = t
if k == 1:
cp = t
elif t in th:
if k == 1: #this case happens if a threshold word is found after a comp word but before a potential than
#in that case, we cannot reasonably consider the comp word as it would create nested comparisons
k = 0
cp = ""
comp.append(t)
elif t == "than":
if k == 0:
raise Exception("Error 0 : than alone") #in case a "than" is found but without a comp word before
elif k == 1:
k = 0
comp.append(cp)
cp = ""
## Comparisons
# Now that we have all the comparative words, we try to cut the sentence in clauses
# Each clause must contain only one comparison (often there is just one clause)
comparisons = []
n_comp = len(comp)
clauses, cuts = cut_in_clause(tok, comp, cut_words)
if n_comp>0:
if len(clauses) == n_comp:
b = True
for i in range(n_comp):
if comp[i] not in clauses[i]:
b = False
if not b:
raise Exception("Error 1 : problem with clauses")
# Else, everything is okay and we will now treat each clause separately
else:
for i in range(n_comp):
clause = clauses[i]
word = comp[i]
# We parse the clause. That way, we only consider the words of the clause and nothing else
# And of course, the result can differ from the parsing of the whole sentence
clause_sent = " ".join(clause)
clause_parse = next(parser.parse(clause))
clause_dep = list(dep_parser.parse(clause))[0]
clause_ner = ner_tagger.tag(clause)
# Then, we execute the functions find_areas and find_time for the clause
areas = find_areas(clause_sent)
times = find_time(clause_ner, clause_parse,clause_dep)
than_time = times[2]
to_time = times[1]
in_time = times[0]
than_area = areas[2]
in_area = areas[0]
# Here, we initialize the different variables that describe a comparison
comp_type = None #what is the comparator (a threshold, another country/year, or something else)
sens = 'sup' #is the comparison a "more than" or a "less than"
V1 = {} #elements of Value1 (the first value of the comparison, before "than")
V2 = {} #elements of Value2 (the second value of the comparison, after "than")
V = {} #some elements are not part of the comparison and belongs to both values
# Example : "Countries with more population than Germany in 2010" -> we compare everything at the year 2010
# Now, we differentiate the treatment between "list of countries" and "list of years"
# Countries list
if returned == 'Agr_Area':
# If the comparative word is "threshold-only"
if word in th_words:
if word.lower() in th_inf:
sens = "inf"
# Search of a threshold
threshold = get_threshold(clause,word,[])
if threshold == None:
raise Exception("Error 2 : No threshold found")
else:
comp_type = "Threshold"
V2["THRESHOLD"] = threshold
# Search of a time indicator (as we compare values, we cannot have a time series)
if ((in_time != None) and (in_time == to_time)):
V["TIME"] = in_time
# Search of a location indicator
# As the used wants a list of countries, he cannot specify a country in the query
# But he can give a region ("What countries in Asia ...")
region = True
r = []
for c in in_area:
if c[1] == 'country':
region = False
if not region:
raise Exception("Error 3 : Country was mentioned")
else:
for c in in_area:
r.append(c[0])
V["AREA"] = r
# Else, the comparative word must belong to a "than" structure
else:
if 'than' in clause:
if word.lower() in cp_inf:
sens = "inf"
idx = get_index(clause, "than") #position of the "than", useful to fill V1 & V2
# First, we look at the locations
# Here, it is possible to mention a country if it is the comparator
if len(than_area) == 1:
if than_area[0][1] == "country":
V2["AREA"] = than_area[0][0]
comp_type = "Country"
else:
raise Exception("Error 4 : Comparison with a region")
elif len(than_area)>1:
raise Exception("Error 5 : Too many area mentioned")
# It is also possible to mention a region, as before
region = True
r = []
for c in in_area:
if c[1] == 'country':
region = False
if not region:
raise Exception("Error 3 : Country mentioned")
else:
for c in in_area:
r.append(c[0])
V["AREA"] = r
# Then, the time indicators
# If two dates are found on both sides of "than", the first one go in V1 and the other in V2
has_than_time = False
if (len(than_time)==1):
if in_time != None:
if (get_index(clause,str(in_time)) < idx):
V1["TIME"] = in_time
V2["TIME"] = than_time[0]
has_than_time = True
if comp_type == None:
comp_type = "Two"
# Else, the year is general (goes in V)
if not has_than_time:
if len(than_time)==1:
V["TIME"] = than_time[0]
elif ((in_time != None) and (in_time == to_time)):
V["TIME"] = in_time
else: #in case no date is given, either we raise an error or ask the user, or take a default one (to see later)
#raise Exception("Error 6 : Must precise time period")
pass
# If we haven't found yet the type of comparison, we try to find a threshold
# If there is not, the comparison is of type "two" (two different values compared)
if comp_type == None:
thres = get_threshold(clause, 'than', than_time)
if thres != None:
comp_type = "Threshold"
V2["THRESHOLD"] = thres
if comp_type == None:
comp_type = "Two"
else:
raise Exception("Error 7 : comparison without 'than'")
# Years list
elif returned == 'Agr_Time':
# If threshold word
if word in th_words:
if word.lower() in th_inf:
sens = "inf"
threshold = get_threshold(clause,word,[])
if threshold == None:
raise Exception("Error 2 : No threshold found")
else:
comp_type = "Threshold"
V2["THRESHOLD"] = threshold
# As we have a list of years here, we can only have time indicators as a time period (more than one year)
if ((in_time != None) and (to_time != None) and (in_time != to_time)):
V["TIME"] = [in_time,to_time]
else:
V["TIME"] = None
# And conversely, the location indicators can only give one country (to be able to compare)
if (len(in_area) > 1 or (len(in_area) == 1 and in_area[0][1] == 'region')):
raise Exception("Error 5 : Too many area mentioned")
else:
if len(in_area) == 1:
V["AREA"] = in_area[0][0]
else:
V["AREA"] = None
# If than construction
else:
if 'than' in clause:
if word.lower() in cp_inf:
sens = "inf"
idx = get_index(clause, "than")
# Get countries
# We accept if two countries are given on both sides of "than" : goes in V1 & V2
# Else it goes in V and can only be one country
if len(than_area) == 1:
if than_area[0][1] == "country":
if (len(in_area) == 1 and in_area[0][1] == "country"):
V2["AREA"] = than_area[0][0]
V1["AREA"] = in_area[0][0]
comp_type = "Two"
elif (len(in_area) == 0):
V["AREA"] = than_area[0][0]
else:
raise Exception("Error 5 : Too many area mentioned")
else:
raise Exception("Error 4 : Comparison with a region")
elif len(than_area)>1:
raise Exception("Error 5 : Too many area mentioned")
elif (len(than_area) == 0):
if (len(in_area) > 1 or (len(in_area) == 1 and in_area[0][1] == 'region')):
raise Exception("Error 5 : Too many area mentioned")
else:
if len(in_area) == 1:
V["AREA"] = in_area[0][0]
else:
V["AREA"] = None
# Get times
#A specific year can be given by the user as the comparator (comp_type -> "Time")
if (len(than_time)==1):
V2["TIME"] = than_time[0]
comp_type = "Time"
elif(len(than_time)>1):
raise Exception("Error 8 : Too many times mentioned")
#Else, we accept only a time period
if ((in_time != None) and (to_time != None) and (in_time != to_time)):
V["TIME"] = [in_time,to_time]
else:
V["TIME"] = None
# If nothing, we do as before and look for a threshold
if comp_type == None:
thres = get_threshold(clause, 'than', than_time)
if thres != None:
comp_type = "Threshold"
V2["THRESHOLD"] = thres
if comp_type == None:
comp_type = "Two"
else:
raise Exception("Error 7 : comparison without 'than'")
# At the end, we gather everything for that clause and add this to the comparisons list
comparisons.append([comp_type,sens,V,V1,V2])
else:
raise Exception("Error 9 : number of words and clauses")
## Superlative words
# Aggregation words (or superlative words) are mostly found with their specific tag
# Nonetheless, some have to be specified
sp_words = ["top", "minimum", "maximum"]
sup = get_nodes(parse, "RBS") + get_nodes(parse, "JJS") + catch_words(tok, sp_words)
## Aggregations
aggreg = None
sens_sup = None #sense of the aggregation (max or min)
n_sup = 1 #number of items to display
sup_neg = ["least", "lowest", "worst", "minimum"]
#we also need to know the plural form of the words that could be linked to the aggregation
agg_plural = ["areas", "countries", "places", "states", "nations", "years"]
#Sense of the aggregation
if (sup != []):
for s in sup:
if s.lower() in sup_neg:
sens_sup = 'inf'
if sens_sup == None:
sens_sup = 'sup'
# For the number of items, we look at the context of the superlative words + the words linked to them
# These words usually form a context as a Nominal Phrase (NP)
# And in the context, we look for numerical values
sup_ = sup + agg_words
nps = get_subtrees(parse, "NP")
for s in sup_:
for np in nps:
if s in np.leaves():
for a in np.leaves():
try:
n_sup = int(a)
except:
pass
# If no number was found, we look at a potential plural form
# That would correspond to a default value of 10 items
if n_sup == 1:
for w in agg_words:
if w.lower() in agg_plural:
n_sup = 10
if (sup != []):
aggreg = [sens_sup,n_sup]
#Finally, we return all the information found
# 1) The list of comparison (one for each clause)
# 2) The sense and value of the aggregation (if any)
return(comparisons,aggreg)
|
[] |
[] |
[
"JAVAHOME"
] |
[]
|
["JAVAHOME"]
|
python
| 1 | 0 | |
StudentiUniMi/wsgi.py
|
"""
WSGI config for StudentiUniMi project.
It exposes the WSGI callable as a module-level variable named ``application``.
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'StudentiUniMi.settings')
application = get_wsgi_application()
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
lepton/azure_storage.go
|
package lepton
import (
"bytes"
"context"
"encoding/json"
"fmt"
"io"
"log"
"net/url"
"os"
"os/exec"
"path"
"strings"
"github.com/Azure/azure-storage-blob-go/azblob"
)
// AzureStorage provides Azure storage related operations
type AzureStorage struct{}
type qemuInfo struct {
VirtualSize uint32 `json:"virtual-size"`
Filename string `json:"filename"`
Format string `json:"format"`
ActualSize uint32 `json:"actual-size"`
DirtyFlag bool `json:"dirty-flag"`
}
const (
onemb = 1048576
)
func roundup(x, y uint32) uint32 {
n := (x + y - 1) / y
return (n * onemb)
}
func (az *AzureStorage) resizeLength(virtSz uint32) uint32 {
var azureMin uint32 = 20971520 // min disk sz
var max uint32
if azureMin > virtSz {
max = azureMin
} else {
max = virtSz
}
return roundup(max, onemb)
}
// might have to adjust this if disk sz is really large/overflows
func (az *AzureStorage) virtualSize(archPath string) uint32 {
args := []string{
"info", "-f", "raw",
"--output", "json", archPath,
}
cmd := exec.Command("qemu-img", args...)
out, err := cmd.Output()
if err != nil {
fmt.Println(err)
}
qi := &qemuInfo{}
err = json.Unmarshal([]byte(out), qi)
return qi.VirtualSize
}
func (az *AzureStorage) resizeImage(basePath string, newPath string, resizeSz uint32) {
in, err := os.Open(basePath)
if err != nil {
fmt.Println(err)
}
defer in.Close()
out, err := os.Create(newPath)
if err != nil {
fmt.Println(err)
}
defer out.Close()
_, err = io.Copy(out, in)
if err != nil {
fmt.Println(err)
}
szstr := fmt.Sprint(resizeSz)
args := []string{
"resize", "-f", "raw",
newPath, szstr,
}
cmd := exec.Command("qemu-img", args...)
_, err = cmd.Output()
if err != nil {
fmt.Println(err)
}
}
// CopyToBucket copies archive to bucket
func (az *AzureStorage) CopyToBucket(config *Config, archPath string) error {
// not sure why this is necessary - afaik only gcp does the tarball
// uploads
base := config.CloudConfig.ImageName + ".img"
opshome := GetOpsHome()
imgpath := path.Join(opshome, "images", base)
imgpath = strings.ReplaceAll(imgpath, "-image", "")
// get virtual size
vs := az.virtualSize(imgpath)
rs := az.resizeLength(vs)
debug := false
if debug {
fmt.Printf("virt sz: %d\n", vs)
fmt.Printf("resize sz: %d\n", rs)
}
newpath := "/tmp/" + base
newpath = strings.ReplaceAll(newpath, "-image", "")
// resize
az.resizeImage(imgpath, newpath, rs)
// convert
vhdPath := "/tmp/" + config.CloudConfig.ImageName + ".vhd"
vhdPath = strings.ReplaceAll(vhdPath, "-image", "")
// this is probably just for hyper-v not azure
args := []string{
"convert", "-f", "raw",
"-O", "vpc", "-o", "subformat=fixed,force_size",
newpath, vhdPath,
}
cmd := exec.Command("qemu-img", args...)
err := cmd.Run()
if err != nil {
fmt.Println(err)
}
accountName, accountKey := os.Getenv("AZURE_STORAGE_ACCOUNT"), os.Getenv("AZURE_STORAGE_ACCESS_KEY")
if len(accountName) == 0 || len(accountKey) == 0 {
log.Fatal("Either the AZURE_STORAGE_ACCOUNT or AZURE_STORAGE_ACCESS_KEY environment variable is not set")
}
credential, err := azblob.NewSharedKeyCredential(accountName, accountKey)
if err != nil {
log.Fatal("Invalid credentials with error: " + err.Error())
}
p := azblob.NewPipeline(credential, azblob.PipelineOptions{})
containerName := "quickstart-nanos"
URL, _ := url.Parse(
fmt.Sprintf("https://%s.blob.core.windows.net/%s", accountName, containerName))
containerURL := azblob.NewContainerURL(*URL, p)
// we can skip over this if it already exists
fmt.Printf("Creating a container named %s\n", containerName)
ctx := context.Background()
_, err = containerURL.Create(ctx, azblob.Metadata{}, azblob.PublicAccessNone)
if err != nil {
fmt.Println(err)
}
blobURL := containerURL.NewPageBlobURL(config.CloudConfig.ImageName + ".vhd")
file, err := os.Open(vhdPath)
if err != nil {
fmt.Println(err)
}
defer file.Close()
fi, err := file.Stat()
if err != nil {
fmt.Println(err)
}
max := 4194304
length := fi.Size()
ilength := int(length)
q, r := int(ilength/max), ilength%max
if r != 0 {
q++
}
_, err = blobURL.Create(ctx, length, 0, azblob.BlobHTTPHeaders{},
azblob.Metadata{}, azblob.BlobAccessConditions{})
if err != nil {
log.Fatal(err)
}
for i := 0; i < q; i++ {
page := make([]byte, max)
n, err := file.Read(page)
_, err = blobURL.UploadPages(ctx, int64(i*max), bytes.NewReader(page[:n]), azblob.PageBlobAccessConditions{}, nil)
if err != nil {
fmt.Println(err)
os.Exit(1)
}
}
return nil
}
// DeleteFromBucket deletes key from config's bucket
func (az *AzureStorage) DeleteFromBucket(config *Config, key string) error {
fmt.Println("un-implemented")
return nil
}
|
[
"\"AZURE_STORAGE_ACCOUNT\"",
"\"AZURE_STORAGE_ACCESS_KEY\""
] |
[] |
[
"AZURE_STORAGE_ACCESS_KEY",
"AZURE_STORAGE_ACCOUNT"
] |
[]
|
["AZURE_STORAGE_ACCESS_KEY", "AZURE_STORAGE_ACCOUNT"]
|
go
| 2 | 0 | |
samples/snippets/create_data_labeling_job_image_segmentation_test.py
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import os
import uuid
from google.cloud import aiplatform
import helpers
import create_data_labeling_job_image_segmentation_sample
API_ENDPOINT = os.getenv("DATA_LABELING_API_ENDPOINT")
PROJECT_ID = os.getenv("BUILD_SPECIFIC_GCLOUD_PROJECT")
LOCATION = "us-central1"
DATASET_ID = "5111009432972558336"
INPUTS_SCHEMA_URI = "gs://google-cloud-aiplatform/schema/datalabelingjob/inputs/image_segmentation_1.0.0.yaml"
DISPLAY_NAME = f"temp_create_data_labeling_job_image_segmentation_test_{uuid.uuid4()}"
INSTRUCTIONS_GCS_URI = (
"gs://ucaip-sample-resources/images/datalabeling_instructions.pdf"
)
ANNOTATION_SPEC = {"color": {"red": 1.0}, "displayName": "rose"}
ANNOTATION_SET_NAME = f"temp_image_segmentation_{uuid.uuid4()}"
@pytest.fixture
def shared_state():
state = {}
yield state
@pytest.fixture
def job_client():
client_options = {"api_endpoint": API_ENDPOINT}
job_client = aiplatform.gapic.JobServiceClient(client_options=client_options)
yield job_client
@pytest.fixture(scope="function", autouse=True)
def teardown(capsys, shared_state, job_client):
yield
job_client.cancel_data_labeling_job(name=shared_state["data_labeling_job_name"])
# Verify Data Labelling Job is cancelled, or timeout after 400 seconds
helpers.wait_for_job_state(
get_job_method=job_client.get_data_labeling_job,
name=shared_state["data_labeling_job_name"],
timeout=400,
freq=10,
)
# Delete the data labeling job
response = job_client.delete_data_labeling_job(
name=shared_state["data_labeling_job_name"]
)
print("Delete LRO:", response.operation.name)
delete_data_labeling_job_response = response.result(timeout=300)
print("delete_data_labeling_job_response", delete_data_labeling_job_response)
out, _ = capsys.readouterr()
assert "delete_data_labeling_job_response" in out
# Creating a data labeling job for images
def test_create_data_labeling_job_image_segmentation_sample(capsys, shared_state):
dataset = f"projects/{PROJECT_ID}/locations/{LOCATION}/datasets/{DATASET_ID}"
create_data_labeling_job_image_segmentation_sample.create_data_labeling_job_image_segmentation_sample(
project=PROJECT_ID,
display_name=DISPLAY_NAME,
dataset=dataset,
instruction_uri=INSTRUCTIONS_GCS_URI,
inputs_schema_uri=INPUTS_SCHEMA_URI,
annotation_spec=ANNOTATION_SPEC,
annotation_set_name=ANNOTATION_SET_NAME,
api_endpoint=API_ENDPOINT,
)
out, _ = capsys.readouterr()
# Save resource name of the newly created data labeing job
shared_state["data_labeling_job_name"] = helpers.get_name(out)
|
[] |
[] |
[
"DATA_LABELING_API_ENDPOINT",
"BUILD_SPECIFIC_GCLOUD_PROJECT"
] |
[]
|
["DATA_LABELING_API_ENDPOINT", "BUILD_SPECIFIC_GCLOUD_PROJECT"]
|
python
| 2 | 0 | |
conftest.py
|
#
# Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# This software is distributed under the terms of the MIT License.
#
"""
Fixtures for our tests.
"""
import functools
import os
import pathlib
import re
import subprocess
import tempfile
import textwrap
import typing
from doctest import ELLIPSIS
from unittest.mock import MagicMock
import pytest
import pydsdl
from sybil import Sybil
from sybil.parsers.codeblock import CodeBlockParser
from sybil.parsers.doctest import DocTestParser
from nunavut import Namespace
from nunavut.jinja.jinja2 import DictLoader
from nunavut.lang import LanguageContext
from nunavut.templates import (CONTEXT_FILTER_ATTRIBUTE_NAME,
ENVIRONMENT_FILTER_ATTRIBUTE_NAME,
LANGUAGE_FILTER_ATTRIBUTE_NAME)
@pytest.fixture
def run_nnvg(request): # type: ignore
def _run_nnvg(gen_paths: typing.Any,
args: typing.List[str],
check_result: bool = True,
env: typing.Optional[typing.Dict[str, str]] = None) -> subprocess.CompletedProcess:
"""
Helper to invoke nnvg for unit testing within the proper python coverage wrapper.
"""
coverage_args = ['coverage', 'run', '--parallel-mode', '-m', 'nunavut']
this_env = os.environ.copy()
if env is not None:
this_env.update(env)
return subprocess.run(coverage_args + args,
check=check_result,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env=this_env)
return _run_nnvg
class GenTestPaths:
"""Helper to generate common paths used in our unit tests."""
def __init__(self, test_file: str, keep_temporaries: bool, node_name: str):
test_file_path = pathlib.Path(test_file)
self.test_name = '{}_{}'.format(test_file_path.parent.stem, node_name)
self.test_dir = test_file_path.parent
search_dir = self.test_dir.resolve()
while search_dir.is_dir() and not (search_dir / pathlib.Path('src')).is_dir():
search_dir = search_dir.parent
self.root_dir = search_dir
self.templates_dir = self.test_dir / pathlib.Path('templates')
self.dsdl_dir = self.test_dir / pathlib.Path('dsdl')
self._keep_temp = keep_temporaries
self._out_dir = None # type: typing.Optional[typing.Any]
self._build_dir = None # type: typing.Optional[pathlib.Path]
self._dsdl_dir = None # type: typing.Optional[pathlib.Path]
print('Paths for test "{}" under dir {}'.format(self.test_name, self.test_dir))
print('(root directory: {})'.format(self.root_dir))
@property
def out_dir(self) -> pathlib.Path:
"""
The directory to place test output under for this test case.
"""
if self._out_dir is None:
if self._keep_temp:
self._out_dir = lambda: None
test_output_dir = self._ensure_dir(self.build_dir / pathlib.Path(self.test_name))
setattr(self._out_dir, 'name', str(test_output_dir))
else:
self._out_dir = tempfile.TemporaryDirectory(dir=str(self.build_dir))
return pathlib.Path(self._out_dir.name)
@property
def build_dir(self) -> pathlib.Path:
if self._build_dir is None:
self._build_dir = self._ensure_dir(self.root_dir / pathlib.Path('build'))
return self._build_dir
@staticmethod
def find_outfile_in_namespace(typename: str,
namespace: Namespace,
type_version: pydsdl.Version = None) \
-> typing.Optional[str]:
found_outfile = None # type: typing.Optional[str]
for dsdl_type, outfile in namespace.get_all_types():
if dsdl_type.full_name == typename:
if type_version is not None:
if isinstance(dsdl_type, pydsdl.CompositeType) and type_version == dsdl_type.version:
found_outfile = str(outfile)
break
# else ignore this since it's either a namespace or it's not the version
# of the type we're looking for.
elif found_outfile is not None:
raise RuntimeError('Type {} had more than one version for this test but no type version argument was'
' provided.'.format(typename))
else:
found_outfile = str(outfile)
return found_outfile
@staticmethod
def _ensure_dir(path_dir: pathlib.Path) -> pathlib.Path:
try:
path_dir.mkdir()
except FileExistsError:
pass
if not path_dir.exists() or not path_dir.is_dir():
raise RuntimeWarning('Test directory "{}" was not setup properly. Tests may fail.'.format(path_dir))
return path_dir
@pytest.fixture(scope='function')
def gen_paths(request): # type: ignore
return GenTestPaths(str(request.fspath), request.config.option.keep_generated, request.node.name)
def pytest_addoption(parser): # type: ignore
parser.addoption("--keep-generated", action="store_true", help=textwrap.dedent('''
If set then the temporary directory used to generate files for each test will be left after
the test has completed. Normally this directory is temporary and therefore cleaned up automatically.
:: WARNING ::
This will leave orphaned files on disk. They won't be big but there will be a lot of them.
:: WARNING ::
Do not run tests in parallel when using this option.
'''))
class _UniqueNameEvaluator:
def __init__(self) -> None:
self._found_names = set() # type: typing.Set[str]
def __call__(self, expected_pattern: str, actual_value: str) -> None:
assert re.match(expected_pattern, actual_value) is not None
assert actual_value not in self._found_names
self._found_names.add(actual_value)
@pytest.fixture(scope='function')
def unique_name_evaluator(request): # type: ignore
"""
Class that defined ``assert_is_expected_and_unique`` allowing assertion that a set of values
in a single test adhere to a provided pattern and are unique values (comparted to other values
provided to this method).
.. code-block:: python
def test_is_unique(unique_name_evaluator) -> None:
value0 = '_foo0_'
value1 = '_foo1_'
unique_name_evaluator(r'_foo\\d_', value0)
unique_name_evaluator(r'_foo\\d_', value1)
# This next line should fail because value 0 was already evaluated so it
# is not unique
unique_name_evaluator(r'_foo\\d_', value0)
"""
return _UniqueNameEvaluator()
@pytest.fixture
def assert_language_config_value(request): # type: ignore
"""
Assert that a given configuration value is set for the target language.
"""
def _assert_language_config_value(target_language: typing.Union[typing.Optional[str], LanguageContext],
key: str,
expected_value: typing.Any,
message: typing.Optional[str]) -> None:
if isinstance(target_language, LanguageContext):
lctx = target_language
else:
lctx = LanguageContext(target_language)
language = lctx.get_target_language()
if language is None:
raise AssertionError('Unable to determine target language from provided arguments.')
if expected_value != language.get_config_value(key):
raise AssertionError(message)
return _assert_language_config_value
@pytest.fixture
def configurable_language_context_factory(request): # type: ignore
"""
Use to create a LanguageContext that the test can write configuration overrides for.
Example:
.. code-block:: python
def test_my_test(configurable_language_context_factory):
lctx = configurable_language_context_factory({'nunavut.lang.c': {'foo': 'bar'}},
'c')
assert lctx.get_target_language().get_config_value('foo') == 'bar'
.. invisible-code-block: python
test_my_test(configurable_language_context_factory)
"""
def _make_configurable_language_context(config_overrides: typing.Mapping[str, typing.Mapping[str, typing.Any]],
target_language: typing.Optional[str] = None,
extension: typing.Optional[str] = None,
namespace_output_stem: typing.Optional[str] = None,
omit_serialization_support_for_target: bool = True) \
-> LanguageContext:
from tempfile import NamedTemporaryFile
config_bytes = [] # type: typing.List[bytearray]
def _config_gen(indent: int,
key: str,
value: typing.Union[typing.Dict, typing.Any],
out_config_bytes: typing.List[bytearray]) \
-> None:
line = bytearray('{}{} = '.format(' ' * indent, key), 'utf8')
if isinstance(value, dict):
line += bytearray('\n', 'utf8')
out_config_bytes.append(line)
for subkey, subvalue in value.items():
_config_gen(indent + 1, subkey, subvalue, out_config_bytes)
else:
line += bytearray('{}\n'.format(str(value)), 'utf8')
out_config_bytes.append(line)
for section, config in config_overrides.items():
config_bytes.append(bytearray('[{}]\n'.format(section), 'utf8'))
for key, value in config.items():
_config_gen(0, key, value, config_bytes)
with NamedTemporaryFile() as config_override_file:
config_override_file.writelines(config_bytes)
config_override_file.flush()
return LanguageContext(target_language, extension,
additional_config_files=[pathlib.Path(config_override_file.name)])
return _make_configurable_language_context
@pytest.fixture
def jinja_filter_tester(request): # type: ignore
"""
Use to create fluent but testable documentation for Jinja filters.
Example:
.. code-block: python
from nunavut.templates import template_environment_filter
@template_environment_filter
def filter_dummy(env, input):
return input
# Given
I = 'foo'
# and
template = '{{ I | dummy }}'
# then
rendered = I
jinja_filter_tester(filter_dummy, template, rendered, 'c', I=I)
You can also control the language context:
.. code-block: python
lctx = configurable_language_context_factory({'nunavut.lang.c': {'enable_stropping': False}}, 'c')
jinja_filter_tester(filter_dummy, template, rendered, lctx, I=I)
"""
def _make_filter_test_template(filter_or_list: typing.Union[typing.Callable, typing.List[typing.Callable]],
body: str,
expected: str,
target_language_or_language_context: typing.Union[typing.Optional[str], LanguageContext],
**globals: typing.Optional[typing.Dict[str, typing.Any]]) -> str:
from nunavut.jinja import CodeGenEnvironment
e = CodeGenEnvironment(loader=DictLoader({'test': body}))
if globals is not None:
e.globals.update(globals)
if isinstance(target_language_or_language_context, LanguageContext):
lctx = target_language_or_language_context
else:
lctx = LanguageContext(target_language_or_language_context)
filters = (filter_or_list if isinstance(filter_or_list, list) else [filter_or_list])
for filter in filters:
filter_name = filter.__name__[7:]
if hasattr(filter, ENVIRONMENT_FILTER_ATTRIBUTE_NAME) and \
getattr(filter, ENVIRONMENT_FILTER_ATTRIBUTE_NAME):
e.filters[filter_name] = functools.partial(filter, e)
else:
e.filters[filter_name] = filter
if hasattr(filter, CONTEXT_FILTER_ATTRIBUTE_NAME) and getattr(filter, CONTEXT_FILTER_ATTRIBUTE_NAME):
context = MagicMock()
e.filters[filter_name] = functools.partial(filter, context)
else:
e.filters[filter_name] = filter
if hasattr(filter, LANGUAGE_FILTER_ATTRIBUTE_NAME):
language_name = getattr(filter, LANGUAGE_FILTER_ATTRIBUTE_NAME)
e.filters[filter_name] = functools.partial(filter, lctx.get_language(language_name))
else:
e.filters[filter_name] = filter
target_language_resolved = lctx.get_target_language()
if target_language_resolved is not None:
e.globals.update(target_language_resolved.get_globals())
rendered = str(e.get_template('test').render())
if expected != rendered:
msg = 'Unexpected template output\n\texpected : {}\n\twas : {}'.format(
expected.replace('\n', '\\n'), rendered.replace('\n', '\\n'))
raise AssertionError(msg)
return rendered
return _make_filter_test_template
_sy = Sybil(
parsers=[
DocTestParser(optionflags=ELLIPSIS),
CodeBlockParser(),
],
pattern='**/*',
excludes=[
'**/markupsafe/*',
'**/jinja2/*',
'**/static/*',
'**/.*/*',
'**/.*',
'**/CONTRIBUTING.rst',
'**/verification/*',
'**/prof/*'
],
fixtures=['jinja_filter_tester',
'gen_paths',
'assert_language_config_value',
'configurable_language_context_factory']
)
pytest_collect_file = _sy.pytest()
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
vendor/github.com/moby/buildkit/util/progress/logs/logs.go
|
package logs
import (
"context"
"fmt"
"io"
"math"
"os"
"strconv"
"sync"
"time"
"github.com/moby/buildkit/client"
"github.com/moby/buildkit/identity"
"github.com/moby/buildkit/util/progress"
"github.com/pkg/errors"
"github.com/tonistiigi/units"
)
var defaultMaxLogSize = 1024 * 1024
var defaultMaxLogSpeed = 100 * 1024 // per second
var configCheckOnce sync.Once
func NewLogStreams(ctx context.Context, printOutput bool) (io.WriteCloser, io.WriteCloser) {
return newStreamWriter(ctx, 1, printOutput), newStreamWriter(ctx, 2, printOutput)
}
func newStreamWriter(ctx context.Context, stream int, printOutput bool) io.WriteCloser {
pw, _, _ := progress.FromContext(ctx)
return &streamWriter{
pw: pw,
stream: stream,
printOutput: printOutput,
created: time.Now(),
}
}
type streamWriter struct {
pw progress.Writer
stream int
printOutput bool
created time.Time
size int
clipping bool
clipReasonSpeed bool
}
func (sw *streamWriter) checkLimit(n int) int {
configCheckOnce.Do(func() {
maxLogSize, err := strconv.ParseInt(os.Getenv("BUILDKIT_STEP_LOG_MAX_SIZE"), 10, 32)
if err == nil {
defaultMaxLogSize = int(maxLogSize)
}
maxLogSpeed, err := strconv.ParseInt(os.Getenv("BUILDKIT_STEP_LOG_MAX_SPEED"), 10, 32)
if err == nil {
defaultMaxLogSpeed = int(maxLogSpeed)
}
})
oldSize := sw.size
sw.size += n
maxSize := -1
if defaultMaxLogSpeed != -1 {
maxSize = int(math.Ceil(time.Since(sw.created).Seconds())) * defaultMaxLogSpeed
sw.clipReasonSpeed = true
}
if maxSize > defaultMaxLogSize {
maxSize = defaultMaxLogSize
sw.clipReasonSpeed = false
}
if maxSize < oldSize {
return 0
}
if maxSize != -1 {
if sw.size > maxSize {
return maxSize - oldSize
}
}
return n
}
func (sw *streamWriter) clipLimitMessage() string {
if sw.clipReasonSpeed {
return fmt.Sprintf("%#g/s", units.Bytes(defaultMaxLogSpeed))
}
return fmt.Sprintf("%#g", units.Bytes(defaultMaxLogSize))
}
func (sw *streamWriter) Write(dt []byte) (int, error) {
oldSize := len(dt)
dt = append([]byte{}, dt[:sw.checkLimit(len(dt))]...)
if sw.clipping && oldSize == len(dt) {
sw.clipping = false
}
if !sw.clipping && oldSize != len(dt) {
dt = append(dt, []byte(fmt.Sprintf("\n[output clipped, log limit %s reached]\n", sw.clipLimitMessage()))...)
sw.clipping = true
}
if len(dt) != 0 {
sw.pw.Write(identity.NewID(), client.VertexLog{
Stream: sw.stream,
Data: dt,
})
if sw.printOutput {
switch sw.stream {
case 1:
return os.Stdout.Write(dt)
case 2:
return os.Stderr.Write(dt)
default:
return 0, errors.Errorf("invalid stream %d", sw.stream)
}
}
}
return oldSize, nil
}
func (sw *streamWriter) Close() error {
return sw.pw.Close()
}
|
[
"\"BUILDKIT_STEP_LOG_MAX_SIZE\"",
"\"BUILDKIT_STEP_LOG_MAX_SPEED\""
] |
[] |
[
"BUILDKIT_STEP_LOG_MAX_SPEED",
"BUILDKIT_STEP_LOG_MAX_SIZE"
] |
[]
|
["BUILDKIT_STEP_LOG_MAX_SPEED", "BUILDKIT_STEP_LOG_MAX_SIZE"]
|
go
| 2 | 0 | |
kolibri/core/apps.py
|
from __future__ import unicode_literals
import logging
import os
from django.apps import AppConfig
from django.db.backends.signals import connection_created
from kolibri.core.sqlite.pragmas import CONNECTION_PRAGMAS
from kolibri.core.sqlite.pragmas import START_PRAGMAS
logger = logging.getLogger(__name__)
class KolibriCoreConfig(AppConfig):
name = 'kolibri.core'
def ready(self):
"""
Sets up PRAGMAs.
"""
connection_created.connect(self.activate_pragmas_per_connection)
self.activate_pragmas_on_start()
# Log the settings file that we are running Kolibri with.
# Do this logging here, as this will be after Django has done its processing of
# Any environment variables or --settings command line arguments.
logger.info("Running Kolibri with the following settings: {settings}".format(
settings=os.environ["DJANGO_SETTINGS_MODULE"]))
@staticmethod
def activate_pragmas_per_connection(sender, connection, **kwargs):
"""
Activate SQLite3 PRAGMAs that apply on a per-connection basis. A no-op
right now, but kept around as infrastructure if we ever want to add
PRAGMAs in the future.
"""
if connection.vendor == "sqlite":
cursor = connection.cursor()
# Shorten the default WAL autocheckpoint from 1000 pages to 500
cursor.execute(CONNECTION_PRAGMAS)
# We don't turn on the following pragmas, because they have negligible
# performance impact. For reference, here's what we've tested:
# Don't ensure that the OS has fully flushed
# our data to disk.
# cursor.execute("PRAGMA synchronous=OFF;")
# Store cross-database JOINs in memory.
# cursor.execute("PRAGMA temp_store=MEMORY;")
@staticmethod
def activate_pragmas_on_start():
"""
Activate a set of PRAGMAs that apply to the database itself,
and not on a per connection basis.
:return:
"""
from django.db import connection
if connection.vendor == "sqlite":
cursor = connection.cursor()
# http://www.sqlite.org/wal.html
# WAL's main advantage allows simultaneous reads
# and writes (vs. the default exclusive write lock)
# at the cost of a slight penalty to all reads.
cursor.execute(START_PRAGMAS)
|
[] |
[] |
[
"DJANGO_SETTINGS_MODULE"
] |
[]
|
["DJANGO_SETTINGS_MODULE"]
|
python
| 1 | 0 | |
yolo3_video.py
|
import argparse
import os
import numpy as np
from keras.layers import Conv2D, Input, BatchNormalization, LeakyReLU, ZeroPadding2D, UpSampling2D
from keras.layers.merge import add, concatenate
from keras.models import Model
import struct
import cv2
import time
from pathlib import Path
#np.set_printoptions(threshold=np.nan)
np.set_printoptions(threshold=30)
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"]="0"
argparser = argparse.ArgumentParser(
description='test yolov3 network with coco weights')
argparser.add_argument(
'-w',
'--weights',
help='path to weights file')
argparser.add_argument(
'-v',
'--video',
help='path to video file')
class WeightReader:
def __init__(self, weight_file):
with open(weight_file, 'rb') as w_f:
major, = struct.unpack('i', w_f.read(4))
minor, = struct.unpack('i', w_f.read(4))
revision, = struct.unpack('i', w_f.read(4))
if (major*10 + minor) >= 2 and major < 1000 and minor < 1000:
w_f.read(8)
else:
w_f.read(4)
transpose = (major > 1000) or (minor > 1000)
binary = w_f.read()
self.offset = 0
self.all_weights = np.frombuffer(binary, dtype='float32')
def read_bytes(self, size):
self.offset = self.offset + size
return self.all_weights[self.offset-size:self.offset]
def load_weights(self, model):
for i in range(106):
try:
conv_layer = model.get_layer('conv_' + str(i))
print("loading weights of convolution #" + str(i))
if i not in [81, 93, 105]:
norm_layer = model.get_layer('bnorm_' + str(i))
size = np.prod(norm_layer.get_weights()[0].shape)
beta = self.read_bytes(size) # bias
gamma = self.read_bytes(size) # scale
mean = self.read_bytes(size) # mean
var = self.read_bytes(size) # variance
weights = norm_layer.set_weights([gamma, beta, mean, var])
if len(conv_layer.get_weights()) > 1:
bias = self.read_bytes(np.prod(conv_layer.get_weights()[1].shape))
kernel = self.read_bytes(np.prod(conv_layer.get_weights()[0].shape))
kernel = kernel.reshape(list(reversed(conv_layer.get_weights()[0].shape)))
kernel = kernel.transpose([2,3,1,0])
conv_layer.set_weights([kernel, bias])
else:
kernel = self.read_bytes(np.prod(conv_layer.get_weights()[0].shape))
kernel = kernel.reshape(list(reversed(conv_layer.get_weights()[0].shape)))
kernel = kernel.transpose([2,3,1,0])
conv_layer.set_weights([kernel])
except ValueError:
print("no convolution #" + str(i))
def reset(self):
self.offset = 0
class BoundBox:
def __init__(self, xmin, ymin, xmax, ymax, objness = None, classes = None):
self.xmin = xmin
self.ymin = ymin
self.xmax = xmax
self.ymax = ymax
self.objness = objness
self.classes = classes
self.label = -1
self.score = -1
def get_label(self):
if self.label == -1:
self.label = np.argmax(self.classes)
return self.label
def get_score(self):
if self.score == -1:
self.score = self.classes[self.get_label()]
return self.score
def _conv_block(inp, convs, skip=True):
x = inp
count = 0
for conv in convs:
if count == (len(convs) - 2) and skip:
skip_connection = x
count += 1
if conv['stride'] > 1: x = ZeroPadding2D(((1,0),(1,0)))(x) # peculiar padding as darknet prefer left and top
x = Conv2D(conv['filter'],
conv['kernel'],
strides=conv['stride'],
padding='valid' if conv['stride'] > 1 else 'same', # peculiar padding as darknet prefer left and top
name='conv_' + str(conv['layer_idx']),
use_bias=False if conv['bnorm'] else True)(x)
if conv['bnorm']: x = BatchNormalization(epsilon=0.001, name='bnorm_' + str(conv['layer_idx']))(x)
if conv['leaky']: x = LeakyReLU(alpha=0.1, name='leaky_' + str(conv['layer_idx']))(x)
return add([skip_connection, x]) if skip else x
def _interval_overlap(interval_a, interval_b):
x1, x2 = interval_a
x3, x4 = interval_b
if x3 < x1:
if x4 < x1:
return 0
else:
return min(x2,x4) - x1
else:
if x2 < x3:
return 0
else:
return min(x2,x4) - x3
def _sigmoid(x):
return 1. / (1. + np.exp(-x))
def bbox_iou(box1, box2):
intersect_w = _interval_overlap([box1.xmin, box1.xmax], [box2.xmin, box2.xmax])
intersect_h = _interval_overlap([box1.ymin, box1.ymax], [box2.ymin, box2.ymax])
intersect = intersect_w * intersect_h
w1, h1 = box1.xmax-box1.xmin, box1.ymax-box1.ymin
w2, h2 = box2.xmax-box2.xmin, box2.ymax-box2.ymin
union = w1*h1 + w2*h2 - intersect
return float(intersect) / union
def make_yolov3_model():
input_image = Input(shape=(None, None, 3))
# Layer 0 => 4
x = _conv_block(input_image, [{'filter': 32, 'kernel': 3, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 0},
{'filter': 64, 'kernel': 3, 'stride': 2, 'bnorm': True, 'leaky': True, 'layer_idx': 1},
{'filter': 32, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 2},
{'filter': 64, 'kernel': 3, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 3}])
# Layer 5 => 8
x = _conv_block(x, [{'filter': 128, 'kernel': 3, 'stride': 2, 'bnorm': True, 'leaky': True, 'layer_idx': 5},
{'filter': 64, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 6},
{'filter': 128, 'kernel': 3, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 7}])
# Layer 9 => 11
x = _conv_block(x, [{'filter': 64, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 9},
{'filter': 128, 'kernel': 3, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 10}])
# Layer 12 => 15
x = _conv_block(x, [{'filter': 256, 'kernel': 3, 'stride': 2, 'bnorm': True, 'leaky': True, 'layer_idx': 12},
{'filter': 128, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 13},
{'filter': 256, 'kernel': 3, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 14}])
# Layer 16 => 36
for i in range(7):
x = _conv_block(x, [{'filter': 128, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 16+i*3},
{'filter': 256, 'kernel': 3, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 17+i*3}])
skip_36 = x
# Layer 37 => 40
x = _conv_block(x, [{'filter': 512, 'kernel': 3, 'stride': 2, 'bnorm': True, 'leaky': True, 'layer_idx': 37},
{'filter': 256, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 38},
{'filter': 512, 'kernel': 3, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 39}])
# Layer 41 => 61
for i in range(7):
x = _conv_block(x, [{'filter': 256, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 41+i*3},
{'filter': 512, 'kernel': 3, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 42+i*3}])
skip_61 = x
# Layer 62 => 65
x = _conv_block(x, [{'filter': 1024, 'kernel': 3, 'stride': 2, 'bnorm': True, 'leaky': True, 'layer_idx': 62},
{'filter': 512, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 63},
{'filter': 1024, 'kernel': 3, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 64}])
# Layer 66 => 74
for i in range(3):
x = _conv_block(x, [{'filter': 512, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 66+i*3},
{'filter': 1024, 'kernel': 3, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 67+i*3}])
# Layer 75 => 79
x = _conv_block(x, [{'filter': 512, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 75},
{'filter': 1024, 'kernel': 3, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 76},
{'filter': 512, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 77},
{'filter': 1024, 'kernel': 3, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 78},
{'filter': 512, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 79}], skip=False)
# Layer 80 => 82
yolo_82 = _conv_block(x, [{'filter': 1024, 'kernel': 3, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 80},
{'filter': 255, 'kernel': 1, 'stride': 1, 'bnorm': False, 'leaky': False, 'layer_idx': 81}], skip=False)
# Layer 83 => 86
x = _conv_block(x, [{'filter': 256, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 84}], skip=False)
x = UpSampling2D(2)(x)
x = concatenate([x, skip_61])
# Layer 87 => 91
x = _conv_block(x, [{'filter': 256, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 87},
{'filter': 512, 'kernel': 3, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 88},
{'filter': 256, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 89},
{'filter': 512, 'kernel': 3, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 90},
{'filter': 256, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 91}], skip=False)
# Layer 92 => 94
yolo_94 = _conv_block(x, [{'filter': 512, 'kernel': 3, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 92},
{'filter': 255, 'kernel': 1, 'stride': 1, 'bnorm': False, 'leaky': False, 'layer_idx': 93}], skip=False)
# Layer 95 => 98
x = _conv_block(x, [{'filter': 128, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 96}], skip=False)
x = UpSampling2D(2)(x)
x = concatenate([x, skip_36])
# Layer 99 => 106
yolo_106 = _conv_block(x, [{'filter': 128, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 99},
{'filter': 256, 'kernel': 3, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 100},
{'filter': 128, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 101},
{'filter': 256, 'kernel': 3, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 102},
{'filter': 128, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 103},
{'filter': 256, 'kernel': 3, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 104},
{'filter': 255, 'kernel': 1, 'stride': 1, 'bnorm': False, 'leaky': False, 'layer_idx': 105}], skip=False)
model = Model(input_image, [yolo_82, yolo_94, yolo_106])
return model
def preprocess_input(image, net_h, net_w):
#new_h, new_w, _ = image.shape
new_h = 480
new_w = 640
# determine the new size of the image
if (float(net_w)/new_w) < (float(net_h)/new_h):
new_h = (new_h * net_w)/new_w
new_w = net_w
else:
new_w = (new_w * net_h)/new_h
new_h = net_h
# resize the image to the new size
resized = cv2.resize(image[:,:,::-1]/255., (int(new_w), int(new_h)))
# embed the image into the standard letter box
new_image = np.ones((net_h, net_w, 3)) * 0.5
new_image[int((net_h-new_h)//2):int((net_h+new_h)//2), int((net_w-new_w)//2):int((net_w+new_w)//2), :] = resized
new_image = np.expand_dims(new_image, 0)
return new_image
def decode_netout(netout, anchors, obj_thresh, nms_thresh, net_h, net_w):
grid_h, grid_w = netout.shape[:2]
nb_box = 3
netout = netout.reshape((grid_h, grid_w, nb_box, -1))
nb_class = netout.shape[-1] - 5
boxes = []
netout[..., :2] = _sigmoid(netout[..., :2])
netout[..., 4:] = _sigmoid(netout[..., 4:])
netout[..., 5:] = netout[..., 4][..., np.newaxis] * netout[..., 5:]
netout[..., 5:] *= netout[..., 5:] > obj_thresh
for i in range(grid_h*grid_w):
row = i / grid_w
col = i % grid_w
for b in range(nb_box):
# 4th element is objectness score
objectness = netout[int(row)][int(col)][b][4]
#objectness = netout[..., :4]
if(objectness.all() <= obj_thresh): continue
# first 4 elements are x, y, w, and h
x, y, w, h = netout[int(row)][int(col)][b][:4]
x = (col + x) / grid_w # center position, unit: image width
y = (row + y) / grid_h # center position, unit: image height
w = anchors[2 * b + 0] * np.exp(w) / net_w # unit: image width
h = anchors[2 * b + 1] * np.exp(h) / net_h # unit: image height
# last elements are class probabilities
classes = netout[int(row)][col][b][5:]
box = BoundBox(x-w/2, y-h/2, x+w/2, y+h/2, objectness, classes)
#box = BoundBox(x-w/2, y-h/2, x+w/2, y+h/2, None, classes)
boxes.append(box)
return boxes
def correct_yolo_boxes(boxes, image_h, image_w, net_h, net_w):
if (float(net_w)/image_w) < (float(net_h)/image_h):
new_w = net_w
new_h = (image_h*net_w)/image_w
else:
new_h = net_w
new_w = (image_w*net_h)/image_h
for i in range(len(boxes)):
x_offset, x_scale = (net_w - new_w)/2./net_w, float(new_w)/net_w
y_offset, y_scale = (net_h - new_h)/2./net_h, float(new_h)/net_h
boxes[i].xmin = int((boxes[i].xmin - x_offset) / x_scale * image_w)
boxes[i].xmax = int((boxes[i].xmax - x_offset) / x_scale * image_w)
boxes[i].ymin = int((boxes[i].ymin - y_offset) / y_scale * image_h)
boxes[i].ymax = int((boxes[i].ymax - y_offset) / y_scale * image_h)
def do_nms(boxes, nms_thresh):
if len(boxes) > 0:
nb_class = len(boxes[0].classes)
else:
return
for c in range(nb_class):
sorted_indices = np.argsort([-box.classes[c] for box in boxes])
for i in range(len(sorted_indices)):
index_i = sorted_indices[i]
if boxes[index_i].classes[c] == 0: continue
for j in range(i+1, len(sorted_indices)):
index_j = sorted_indices[j]
if bbox_iou(boxes[index_i], boxes[index_j]) >= nms_thresh:
boxes[index_j].classes[c] = 0
def draw_boxes(image, boxes, labels, obj_thresh):
#highest_conf_label = ''
#highest_conf = 0
for box in boxes:
label_str = ''
label = -1
for i in range(len(labels)):
if box.classes[i] > obj_thresh:
label_str += labels[i]
label = i
print(labels[i] + ': ' + str(box.classes[i]*100) + '%')
#if box.classes[i] > highest_conf:
# highest_conf = box.classes[i]
# highest_conf_label = labels[i]
if label >= 0:
cv2.rectangle(image, (box.xmin,box.ymin), (box.xmax,box.ymax), (0,255,0), 3)
#print(type(box.get_score()))
#print(np.format_float_positional(box.get_score(), precision=2))
cv2.putText(image,
label_str + ' ' + str(np.format_float_positional(box.get_score(), precision=2)),
(box.xmin, box.ymin - 13),
cv2.FONT_HERSHEY_SIMPLEX,
1e-3 * image.shape[0],
(0,255,0), 2)
return image
def _main_(args):
weights_path = args.weights
video_path = args.video
# set some parameters
net_h, net_w = 416, 416
obj_thresh, nms_thresh = 0.65, 0.45
anchors = [[116,90, 156,198, 373,326], [30,61, 62,45, 59,119], [10,13, 16,30, 33,23]]
labels = ["person", "bicycle", "car", "motorbike", "aeroplane", "bus", "train", "truck", \
"boat", "traffic light", "fire hydrant", "stop sign", "parking meter", "bench", \
"bird", "cat", "dog", "horse", "sheep", "cow", "elephant", "bear", "zebra", "giraffe", \
"backpack", "umbrella", "handbag", "tie", "suitcase", "frisbee", "skis", "snowboard", \
"sports ball", "kite", "baseball bat", "baseball glove", "skateboard", "surfboard", \
"tennis racket", "bottle", "wine glass", "cup", "fork", "knife", "spoon", "bowl", "banana", \
"apple", "sandwich", "orange", "broccoli", "carrot", "hot dog", "pizza", "donut", "cake", \
"chair", "sofa", "pottedplant", "bed", "diningtable", "toilet", "tvmonitor", "laptop", "mouse", \
"remote", "keyboard", "cell phone", "microwave", "oven", "toaster", "sink", "refrigerator", \
"book", "clock", "vase", "scissors", "teddy bear", "hair drier", "toothbrush"]
# make the yolov3 model to predict 80 classes on COCO
yolov3 = make_yolov3_model()
# load the weights trained on COCO into the model
weight_reader = WeightReader(weights_path)
weight_reader.load_weights(yolov3)
'''
# set webcam
cap = cv2.VideoCapture(1)
while(True):
ret, image = cap.read()
#image_h, image_w, _ = image.shape
image_w = cap.get(3)
image_h = cap.get(4)
if cv2.waitKey(1) & 0xFF == ord(' '):
new_image = preprocess_input(image, net_h, net_w)
yolos = yolov3.predict(new_image)
boxes = []
for i in range(len(yolos)):
boxes += decode_netout(yolos[i][0], anchors[i], obj_thresh, nms_thresh, net_h, net_w)
correct_yolo_boxes(boxes, image_h, image_w, net_h, net_w)
do_nms(boxes, nms_thresh)
draw_boxes_play_music(image, boxes, labels, obj_thresh)
cv2.imshow('frame',image)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
'''
# preprocess the video
cap = cv2.VideoCapture(video_path)
print("open video file from", video_path)
if Path(video_path).is_file():
print("Video file exists")
else:
print("cannot find video file")
print(cap.isOpened())
while(cap.isOpened()):
ret, image = cap.read()
image_w = cap.get(3)
image_h = cap.get(4)
image = cv2.flip(image, 0)
new_image = preprocess_input(image, net_h, net_w)
yolos = yolov3.predict(new_image)
boxes = []
for i in range(len(yolos)):
# decode the output of the network
boxes += decode_netout(yolos[i][0], anchors[i], obj_thresh, nms_thresh, net_h, net_w)
# correct the sizes of the bounding boxes
correct_yolo_boxes(boxes, image_h, image_w, net_h, net_w)
# suppress non-maximal boxes
do_nms(boxes, nms_thresh)
# draw bounding boxes on the image using labels
draw_boxes(image, boxes, labels, obj_thresh)
# write the image with bounding boxes to video
cv2.imshow('frame',image)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
if __name__ == '__main__':
args = argparser.parse_args()
_main_(args)
|
[] |
[] |
[
"CUDA_DEVICE_ORDER",
"CUDA_VISIBLE_DEVICES"
] |
[]
|
["CUDA_DEVICE_ORDER", "CUDA_VISIBLE_DEVICES"]
|
python
| 2 | 0 | |
src/main/resources/external/src/ddmlib/src/main/java/com/android/ddmlib/Device.java
|
/*
* Copyright (C) 2007 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.android.ddmlib;
import com.android.annotations.NonNull;
import com.android.annotations.Nullable;
import com.android.annotations.VisibleForTesting;
import com.android.annotations.concurrency.GuardedBy;
import com.android.ddmlib.log.LogReceiver;
import com.google.common.base.CharMatcher;
import com.google.common.base.Function;
import com.google.common.base.Joiner;
import com.google.common.base.Splitter;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.Lists;
import com.google.common.collect.Sets;
import com.google.common.util.concurrent.Atomics;
import java.io.BufferedInputStream;
import java.io.File;
import java.io.FileInputStream;
import java.io.IOException;
import java.io.InputStream;
import java.nio.channels.SocketChannel;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.Future;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicReference;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
/**
* A Device. It can be a physical device or an emulator.
*/
final class Device implements IDevice {
/** Emulator Serial Number regexp. */
static final String RE_EMULATOR_SN = "emulator-(\\d+)"; //$NON-NLS-1$
/** Serial number of the device */
private final String mSerialNumber;
/** Name of the AVD */
private String mAvdName = null;
/** State of the device. */
private DeviceState mState = null;
/** Device properties. */
private final PropertyFetcher mPropFetcher = new PropertyFetcher(this);
private final Map<String, String> mMountPoints = new HashMap<String, String>();
private final BatteryFetcher mBatteryFetcher = new BatteryFetcher(this);
@GuardedBy("mClients")
private final List<Client> mClients = new ArrayList<Client>();
/** Maps pid's of clients in {@link #mClients} to their package name. */
private final Map<Integer, String> mClientInfo = new ConcurrentHashMap<Integer, String>();
private DeviceMonitor mMonitor;
private static final String LOG_TAG = "Device";
private static final char SEPARATOR = '-';
private static final String UNKNOWN_PACKAGE = ""; //$NON-NLS-1$
private static final long GET_PROP_TIMEOUT_MS = 100;
private static final long INSTALL_TIMEOUT_MINUTES;
static {
String installTimeout = System.getenv("ADB_INSTALL_TIMEOUT");
long time = 4;
if (installTimeout != null) {
try {
time = Long.parseLong(installTimeout);
} catch (NumberFormatException e) {
// use default value
}
}
INSTALL_TIMEOUT_MINUTES = time;
}
/**
* Socket for the connection monitoring client connection/disconnection.
*/
private SocketChannel mSocketChannel;
private Integer mLastBatteryLevel = null;
private long mLastBatteryCheckTime = 0;
/** Path to the screen recorder binary on the device. */
private static final String SCREEN_RECORDER_DEVICE_PATH = "/system/bin/screenrecord";
private static final long LS_TIMEOUT_SEC = 2;
/** Flag indicating whether the device has the screen recorder binary. */
private Boolean mHasScreenRecorder;
/** Cached list of hardware characteristics */
private Set<String> mHardwareCharacteristics;
private int mApiLevel;
private String mName;
/**
* Output receiver for "pm install package.apk" command line.
*/
private static final class InstallReceiver extends MultiLineReceiver {
private static final String SUCCESS_OUTPUT = "Success"; //$NON-NLS-1$
private static final Pattern FAILURE_PATTERN = Pattern.compile("Failure\\s+\\[(.*)\\]"); //$NON-NLS-1$
private String mErrorMessage = null;
public InstallReceiver() {
}
@Override
public void processNewLines(String[] lines) {
for (String line : lines) {
if (!line.isEmpty()) {
if (line.startsWith(SUCCESS_OUTPUT)) {
mErrorMessage = null;
} else {
Matcher m = FAILURE_PATTERN.matcher(line);
if (m.matches()) {
mErrorMessage = m.group(1);
} else {
mErrorMessage = "Unknown failure";
}
}
}
}
}
@Override
public boolean isCancelled() {
return false;
}
public String getErrorMessage() {
return mErrorMessage;
}
}
/*
* (non-Javadoc)
* @see com.android.ddmlib.IDevice#getSerialNumber()
*/
@NonNull
@Override
public String getSerialNumber() {
return mSerialNumber;
}
@Override
public String getAvdName() {
return mAvdName;
}
/**
* Sets the name of the AVD
*/
void setAvdName(String avdName) {
if (!isEmulator()) {
throw new IllegalArgumentException(
"Cannot set the AVD name of the device is not an emulator");
}
mAvdName = avdName;
}
@Override
public String getName() {
if (mName != null) {
return mName;
}
if (isOnline()) {
// cache name only if device is online
mName = constructName();
return mName;
} else {
return constructName();
}
}
private String constructName() {
if (isEmulator()) {
String avdName = getAvdName();
if (avdName != null) {
return String.format("%s [%s]", avdName, getSerialNumber());
} else {
return getSerialNumber();
}
} else {
String manufacturer = null;
String model = null;
try {
manufacturer = cleanupStringForDisplay(getProperty(PROP_DEVICE_MANUFACTURER));
model = cleanupStringForDisplay(getProperty(PROP_DEVICE_MODEL));
} catch (Exception e) {
// If there are exceptions thrown while attempting to get these properties,
// we can just use the serial number, so ignore these exceptions.
}
StringBuilder sb = new StringBuilder(20);
if (manufacturer != null) {
sb.append(manufacturer);
sb.append(SEPARATOR);
}
if (model != null) {
sb.append(model);
sb.append(SEPARATOR);
}
sb.append(getSerialNumber());
return sb.toString();
}
}
private String cleanupStringForDisplay(String s) {
if (s == null) {
return null;
}
StringBuilder sb = new StringBuilder(s.length());
for (int i = 0; i < s.length(); i++) {
char c = s.charAt(i);
if (Character.isLetterOrDigit(c)) {
sb.append(Character.toLowerCase(c));
} else {
sb.append('_');
}
}
return sb.toString();
}
/*
* (non-Javadoc)
* @see com.android.ddmlib.IDevice#getState()
*/
@Override
public DeviceState getState() {
return mState;
}
/**
* Changes the state of the device.
*/
void setState(DeviceState state) {
mState = state;
}
/*
* (non-Javadoc)
* @see com.android.ddmlib.IDevice#getProperties()
*/
@Override
public Map<String, String> getProperties() {
return Collections.unmodifiableMap(mPropFetcher.getProperties());
}
/*
* (non-Javadoc)
* @see com.android.ddmlib.IDevice#getPropertyCount()
*/
@Override
public int getPropertyCount() {
return mPropFetcher.getProperties().size();
}
/*
* (non-Javadoc)
* @see com.android.ddmlib.IDevice#getProperty(java.lang.String)
*/
@Override
public String getProperty(String name) {
Future<String> future = mPropFetcher.getProperty(name);
try {
return future.get(GET_PROP_TIMEOUT_MS, TimeUnit.MILLISECONDS);
} catch (InterruptedException e) {
// ignore
} catch (ExecutionException e) {
// ignore
} catch (java.util.concurrent.TimeoutException e) {
// ignore
}
return null;
}
@Override
public boolean arePropertiesSet() {
return mPropFetcher.arePropertiesSet();
}
@Override
public String getPropertyCacheOrSync(String name) throws TimeoutException,
AdbCommandRejectedException, ShellCommandUnresponsiveException, IOException {
Future<String> future = mPropFetcher.getProperty(name);
try {
return future.get();
} catch (InterruptedException e) {
// ignore
} catch (ExecutionException e) {
// ignore
}
return null;
}
@Override
public String getPropertySync(String name) throws TimeoutException,
AdbCommandRejectedException, ShellCommandUnresponsiveException, IOException {
Future<String> future = mPropFetcher.getProperty(name);
try {
return future.get();
} catch (InterruptedException e) {
// ignore
} catch (ExecutionException e) {
// ignore
}
return null;
}
@NonNull
@Override
public Future<String> getSystemProperty(@NonNull String name) {
return mPropFetcher.getProperty(name);
}
@Override
public boolean supportsFeature(@NonNull Feature feature) {
switch (feature) {
case SCREEN_RECORD:
if (getApiLevel() < 19) {
return false;
}
if (mHasScreenRecorder == null) {
mHasScreenRecorder = hasBinary(SCREEN_RECORDER_DEVICE_PATH);
}
return mHasScreenRecorder;
case PROCSTATS:
return getApiLevel() >= 19;
default:
return false;
}
}
// The full list of features can be obtained from /etc/permissions/features*
// However, the smaller set of features we are interested in can be obtained by
// reading the build characteristics property.
@Override
public boolean supportsFeature(@NonNull HardwareFeature feature) {
if (mHardwareCharacteristics == null) {
try {
String characteristics = getProperty(PROP_BUILD_CHARACTERISTICS);
if (characteristics == null) {
return false;
}
mHardwareCharacteristics = Sets.newHashSet(Splitter.on(',').split(characteristics));
} catch (Exception e) {
mHardwareCharacteristics = Collections.emptySet();
}
}
return mHardwareCharacteristics.contains(feature.getCharacteristic());
}
private int getApiLevel() {
if (mApiLevel > 0) {
return mApiLevel;
}
try {
String buildApi = getProperty(PROP_BUILD_API_LEVEL);
mApiLevel = buildApi == null ? -1 : Integer.parseInt(buildApi);
return mApiLevel;
} catch (Exception e) {
return -1;
}
}
private boolean hasBinary(String path) {
CountDownLatch latch = new CountDownLatch(1);
CollectingOutputReceiver receiver = new CollectingOutputReceiver(latch);
try {
executeShellCommand("ls " + path, receiver);
} catch (Exception e) {
return false;
}
try {
latch.await(LS_TIMEOUT_SEC, TimeUnit.SECONDS);
} catch (InterruptedException e) {
return false;
}
String value = receiver.getOutput().trim();
return !value.endsWith("No such file or directory");
}
@Nullable
@Override
public String getMountPoint(@NonNull String name) {
String mount = mMountPoints.get(name);
if (mount == null) {
try {
mount = queryMountPoint(name);
mMountPoints.put(name, mount);
} catch (TimeoutException ignored) {
} catch (AdbCommandRejectedException ignored) {
} catch (ShellCommandUnresponsiveException ignored) {
} catch (IOException ignored) {
}
}
return mount;
}
@Nullable
private String queryMountPoint(@NonNull final String name)
throws TimeoutException, AdbCommandRejectedException, ShellCommandUnresponsiveException,
IOException {
final AtomicReference<String> ref = Atomics.newReference();
executeShellCommand("echo $" + name, new MultiLineReceiver() { //$NON-NLS-1$
@Override
public boolean isCancelled() {
return false;
}
@Override
public void processNewLines(String[] lines) {
for (String line : lines) {
if (!line.isEmpty()) {
// this should be the only one.
ref.set(line);
}
}
}
});
return ref.get();
}
@Override
public String toString() {
return mSerialNumber;
}
/*
* (non-Javadoc)
* @see com.android.ddmlib.IDevice#isOnline()
*/
@Override
public boolean isOnline() {
return mState == DeviceState.ONLINE;
}
/*
* (non-Javadoc)
* @see com.android.ddmlib.IDevice#isEmulator()
*/
@Override
public boolean isEmulator() {
return mSerialNumber.matches(RE_EMULATOR_SN);
}
/*
* (non-Javadoc)
* @see com.android.ddmlib.IDevice#isOffline()
*/
@Override
public boolean isOffline() {
return mState == DeviceState.OFFLINE;
}
/*
* (non-Javadoc)
* @see com.android.ddmlib.IDevice#isBootLoader()
*/
@Override
public boolean isBootLoader() {
return mState == DeviceState.BOOTLOADER;
}
/*
* (non-Javadoc)
* @see com.android.ddmlib.IDevice#getSyncService()
*/
@Override
public SyncService getSyncService()
throws TimeoutException, AdbCommandRejectedException, IOException {
SyncService syncService = new SyncService(AndroidDebugBridge.getSocketAddress(), this);
if (syncService.openSync()) {
return syncService;
}
return null;
}
/*
* (non-Javadoc)
* @see com.android.ddmlib.IDevice#getFileListingService()
*/
@Override
public FileListingService getFileListingService() {
return new FileListingService(this);
}
@Override
public RawImage getScreenshot()
throws TimeoutException, AdbCommandRejectedException, IOException {
return getScreenshot(0, TimeUnit.MILLISECONDS);
}
@Override
public RawImage getScreenshot(long timeout, TimeUnit unit)
throws TimeoutException, AdbCommandRejectedException, IOException {
return AdbHelper.getFrameBuffer(AndroidDebugBridge.getSocketAddress(), this, timeout, unit);
}
@Override
public void startScreenRecorder(String remoteFilePath, ScreenRecorderOptions options,
IShellOutputReceiver receiver) throws TimeoutException, AdbCommandRejectedException,
IOException, ShellCommandUnresponsiveException {
executeShellCommand(getScreenRecorderCommand(remoteFilePath, options), receiver, 0, null);
}
@VisibleForTesting(visibility = VisibleForTesting.Visibility.PRIVATE)
static String getScreenRecorderCommand(@NonNull String remoteFilePath,
@NonNull ScreenRecorderOptions options) {
StringBuilder sb = new StringBuilder();
sb.append("screenrecord");
sb.append(' ');
if (options.width > 0 && options.height > 0) {
sb.append("--size ");
sb.append(options.width);
sb.append('x');
sb.append(options.height);
sb.append(' ');
}
if (options.bitrateMbps > 0) {
sb.append("--bit-rate ");
sb.append(options.bitrateMbps * 1000000);
sb.append(' ');
}
if (options.timeLimit > 0) {
sb.append("--time-limit ");
long seconds = TimeUnit.SECONDS.convert(options.timeLimit, options.timeLimitUnits);
if (seconds > 180) {
seconds = 180;
}
sb.append(seconds);
sb.append(' ');
}
sb.append(remoteFilePath);
return sb.toString();
}
@Override
public void executeShellCommand(String command, IShellOutputReceiver receiver)
throws TimeoutException, AdbCommandRejectedException, ShellCommandUnresponsiveException,
IOException {
AdbHelper.executeRemoteCommand(AndroidDebugBridge.getSocketAddress(), command, this,
receiver, DdmPreferences.getTimeOut());
}
@Override
public void executeShellCommand(String command, IShellOutputReceiver receiver,
int maxTimeToOutputResponse)
throws TimeoutException, AdbCommandRejectedException, ShellCommandUnresponsiveException,
IOException {
AdbHelper.executeRemoteCommand(AndroidDebugBridge.getSocketAddress(), command, this,
receiver, maxTimeToOutputResponse);
}
@Override
public void executeShellCommand(String command, IShellOutputReceiver receiver,
long maxTimeToOutputResponse, TimeUnit maxTimeUnits)
throws TimeoutException, AdbCommandRejectedException, ShellCommandUnresponsiveException,
IOException {
AdbHelper.executeRemoteCommand(AndroidDebugBridge.getSocketAddress(), command, this,
receiver, maxTimeToOutputResponse, maxTimeUnits);
}
@Override
public void runEventLogService(LogReceiver receiver)
throws TimeoutException, AdbCommandRejectedException, IOException {
AdbHelper.runEventLogService(AndroidDebugBridge.getSocketAddress(), this, receiver);
}
@Override
public void runLogService(String logname, LogReceiver receiver)
throws TimeoutException, AdbCommandRejectedException, IOException {
AdbHelper.runLogService(AndroidDebugBridge.getSocketAddress(), this, logname, receiver);
}
@Override
public void createForward(int localPort, int remotePort)
throws TimeoutException, AdbCommandRejectedException, IOException {
AdbHelper.createForward(AndroidDebugBridge.getSocketAddress(), this,
String.format("tcp:%d", localPort), //$NON-NLS-1$
String.format("tcp:%d", remotePort)); //$NON-NLS-1$
}
@Override
public void createForward(int localPort, String remoteSocketName,
DeviceUnixSocketNamespace namespace) throws TimeoutException,
AdbCommandRejectedException, IOException {
AdbHelper.createForward(AndroidDebugBridge.getSocketAddress(), this,
String.format("tcp:%d", localPort), //$NON-NLS-1$
String.format("%s:%s", namespace.getType(), remoteSocketName)); //$NON-NLS-1$
}
@Override
public void removeForward(int localPort, int remotePort)
throws TimeoutException, AdbCommandRejectedException, IOException {
AdbHelper.removeForward(AndroidDebugBridge.getSocketAddress(), this,
String.format("tcp:%d", localPort), //$NON-NLS-1$
String.format("tcp:%d", remotePort)); //$NON-NLS-1$
}
@Override
public void removeForward(int localPort, String remoteSocketName,
DeviceUnixSocketNamespace namespace) throws TimeoutException,
AdbCommandRejectedException, IOException {
AdbHelper.removeForward(AndroidDebugBridge.getSocketAddress(), this,
String.format("tcp:%d", localPort), //$NON-NLS-1$
String.format("%s:%s", namespace.getType(), remoteSocketName)); //$NON-NLS-1$
}
Device(DeviceMonitor monitor, String serialNumber, DeviceState deviceState) {
mMonitor = monitor;
mSerialNumber = serialNumber;
mState = deviceState;
}
DeviceMonitor getMonitor() {
return mMonitor;
}
@Override
public boolean hasClients() {
synchronized (mClients) {
return !mClients.isEmpty();
}
}
@Override
public Client[] getClients() {
synchronized (mClients) {
return mClients.toArray(new Client[mClients.size()]);
}
}
@Override
public Client getClient(String applicationName) {
synchronized (mClients) {
for (Client c : mClients) {
if (applicationName.equals(c.getClientData().getClientDescription())) {
return c;
}
}
}
return null;
}
void addClient(Client client) {
synchronized (mClients) {
mClients.add(client);
}
addClientInfo(client);
}
List<Client> getClientList() {
return mClients;
}
void clearClientList() {
synchronized (mClients) {
mClients.clear();
}
clearClientInfo();
}
/**
* Removes a {@link Client} from the list.
* @param client the client to remove.
* @param notify Whether or not to notify the listeners of a change.
*/
void removeClient(Client client, boolean notify) {
mMonitor.addPortToAvailableList(client.getDebuggerListenPort());
synchronized (mClients) {
mClients.remove(client);
}
if (notify) {
mMonitor.getServer().deviceChanged(this, CHANGE_CLIENT_LIST);
}
removeClientInfo(client);
}
/** Sets the socket channel on which a track-jdwp command for this device has been sent. */
void setClientMonitoringSocket(@NonNull SocketChannel socketChannel) {
mSocketChannel = socketChannel;
}
/**
* Returns the channel on which responses to the track-jdwp command will be available if it
* has been set, null otherwise. The channel is set via {@link #setClientMonitoringSocket(SocketChannel)},
* which is usually invoked when the device goes online.
*/
@Nullable
SocketChannel getClientMonitoringSocket() {
return mSocketChannel;
}
void update(int changeMask) {
mMonitor.getServer().deviceChanged(this, changeMask);
}
void update(Client client, int changeMask) {
mMonitor.getServer().clientChanged(client, changeMask);
updateClientInfo(client, changeMask);
}
void setMountingPoint(String name, String value) {
mMountPoints.put(name, value);
}
private void addClientInfo(Client client) {
ClientData cd = client.getClientData();
setClientInfo(cd.getPid(), cd.getClientDescription());
}
private void updateClientInfo(Client client, int changeMask) {
if ((changeMask & Client.CHANGE_NAME) == Client.CHANGE_NAME) {
addClientInfo(client);
}
}
private void removeClientInfo(Client client) {
int pid = client.getClientData().getPid();
mClientInfo.remove(pid);
}
private void clearClientInfo() {
mClientInfo.clear();
}
private void setClientInfo(int pid, String pkgName) {
if (pkgName == null) {
pkgName = UNKNOWN_PACKAGE;
}
mClientInfo.put(pid, pkgName);
}
@Override
public String getClientName(int pid) {
String pkgName = mClientInfo.get(pid);
return pkgName == null ? UNKNOWN_PACKAGE : pkgName;
}
@Override
public void pushFile(String local, String remote)
throws IOException, AdbCommandRejectedException, TimeoutException, SyncException {
SyncService sync = null;
try {
String targetFileName = getFileName(local);
Log.d(targetFileName, String.format("Uploading %1$s onto device '%2$s'",
targetFileName, getSerialNumber()));
sync = getSyncService();
if (sync != null) {
String message = String.format("Uploading file onto device '%1$s'",
getSerialNumber());
Log.d(LOG_TAG, message);
sync.pushFile(local, remote, SyncService.getNullProgressMonitor());
} else {
throw new IOException("Unable to open sync connection!");
}
} catch (TimeoutException e) {
Log.e(LOG_TAG, "Error during Sync: timeout.");
throw e;
} catch (SyncException e) {
Log.e(LOG_TAG, String.format("Error during Sync: %1$s", e.getMessage()));
throw e;
} catch (IOException e) {
Log.e(LOG_TAG, String.format("Error during Sync: %1$s", e.getMessage()));
throw e;
} finally {
if (sync != null) {
sync.close();
}
}
}
@Override
public void pullFile(String remote, String local)
throws IOException, AdbCommandRejectedException, TimeoutException, SyncException {
SyncService sync = null;
try {
String targetFileName = getFileName(remote);
Log.d(targetFileName, String.format("Downloading %1$s from device '%2$s'",
targetFileName, getSerialNumber()));
sync = getSyncService();
if (sync != null) {
String message = String.format("Downloading file from device '%1$s'",
getSerialNumber());
Log.d(LOG_TAG, message);
sync.pullFile(remote, local, SyncService.getNullProgressMonitor());
} else {
throw new IOException("Unable to open sync connection!");
}
} catch (TimeoutException e) {
Log.e(LOG_TAG, "Error during Sync: timeout.");
throw e;
} catch (SyncException e) {
Log.e(LOG_TAG, String.format("Error during Sync: %1$s", e.getMessage()));
throw e;
} catch (IOException e) {
Log.e(LOG_TAG, String.format("Error during Sync: %1$s", e.getMessage()));
throw e;
} finally {
if (sync != null) {
sync.close();
}
}
}
@Override
public String installPackage(String packageFilePath, boolean reinstall,
String... extraArgs)
throws InstallException {
try {
String remoteFilePath = syncPackageToDevice(packageFilePath);
String result = installRemotePackage(remoteFilePath, reinstall, extraArgs);
removeRemotePackage(remoteFilePath);
return result;
} catch (IOException e) {
throw new InstallException(e);
} catch (AdbCommandRejectedException e) {
throw new InstallException(e);
} catch (TimeoutException e) {
throw new InstallException(e);
} catch (SyncException e) {
throw new InstallException(e);
}
}
@Override
public void installPackages(List<String> apkFilePaths, int timeOutInMs, boolean reinstall,
String... extraArgs) throws InstallException {
assert(!apkFilePaths.isEmpty());
if (getApiLevel() < 21) {
Log.w("Internal error : installPackages invoked with device < 21 for %s",
Joiner.on(",").join(apkFilePaths));
if (apkFilePaths.size() == 1) {
installPackage(apkFilePaths.get(0), reinstall, extraArgs);
return;
}
Log.e("Internal error : installPackages invoked with device < 21 for multiple APK : %s",
Joiner.on(",").join(apkFilePaths));
throw new InstallException(
"Internal error : installPackages invoked with device < 21 for multiple APK : "
+ Joiner.on(",").join(apkFilePaths));
}
String mainPackageFilePath = apkFilePaths.get(0);
Log.d(mainPackageFilePath,
String.format("Uploading main %1$s and %2$s split APKs onto device '%3$s'",
mainPackageFilePath, Joiner.on(',').join(apkFilePaths),
getSerialNumber()));
try {
// create a installation session.
List<String> extraArgsList = extraArgs != null
? ImmutableList.copyOf(extraArgs)
: ImmutableList.<String>of();
String sessionId = createMultiInstallSession(apkFilePaths, extraArgsList, reinstall);
if (sessionId == null) {
Log.d(mainPackageFilePath, "Failed to establish session, quit installation");
throw new InstallException("Failed to establish session");
}
Log.d(mainPackageFilePath, String.format("Established session id=%1$s", sessionId));
// now upload each APK in turn.
int index = 0;
boolean allUploadSucceeded = true;
while (allUploadSucceeded && index < apkFilePaths.size()) {
allUploadSucceeded = uploadAPK(sessionId, apkFilePaths.get(index), index++);
}
// if all files were upload successfully, commit otherwise abandon the installation.
String command = allUploadSucceeded
? "pm install-commit " + sessionId
: "pm install-abandon " + sessionId;
InstallReceiver receiver = new InstallReceiver();
executeShellCommand(command, receiver, timeOutInMs, TimeUnit.MILLISECONDS);
String errorMessage = receiver.getErrorMessage();
if (errorMessage != null) {
String message = String.format("Failed to finalize session : %1$s", errorMessage);
Log.e(mainPackageFilePath, message);
throw new InstallException(message);
}
// in case not all files were upload and we abandoned the install, make sure to
// notifier callers.
if (!allUploadSucceeded) {
throw new InstallException("Unable to upload some APKs");
}
} catch (TimeoutException e) {
Log.e(LOG_TAG, "Error during Sync: timeout.");
throw new InstallException(e);
} catch (IOException e) {
Log.e(LOG_TAG, String.format("Error during Sync: %1$s", e.getMessage()));
throw new InstallException(e);
} catch (AdbCommandRejectedException e) {
throw new InstallException(e);
} catch (ShellCommandUnresponsiveException e) {
Log.e(LOG_TAG, String.format("Error during shell execution: %1$s", e.getMessage()));
throw new InstallException(e);
}
}
/**
* Implementation of {@link com.android.ddmlib.MultiLineReceiver} that can receive a
* Success message from ADB followed by a session ID.
*/
private static class MultiInstallReceiver extends MultiLineReceiver {
private static final Pattern successPattern = Pattern.compile("Success: .*\\[(\\d*)\\]");
@Nullable String sessionId = null;
@Override
public boolean isCancelled() {
return false;
}
@Override
public void processNewLines(String[] lines) {
for (String line : lines) {
Matcher matcher = successPattern.matcher(line);
if (matcher.matches()) {
sessionId = matcher.group(1);
}
}
}
@Nullable
public String getSessionId() {
return sessionId;
}
}
@Nullable
private String createMultiInstallSession(List<String> apkFileNames,
@NonNull Collection<String> extraArgs, boolean reinstall)
throws TimeoutException, AdbCommandRejectedException, ShellCommandUnresponsiveException,
IOException {
List<File> apkFiles = Lists.transform(apkFileNames, new Function<String, File>() {
@Override
public File apply(String input) {
return new File(input);
}
});
long totalFileSize = 0L;
for (File apkFile : apkFiles) {
if (apkFile.exists() && apkFile.isFile()) {
totalFileSize += apkFile.length();
} else {
throw new IllegalArgumentException(apkFile.getAbsolutePath() + " is not a file");
}
}
StringBuilder parameters = new StringBuilder();
if (reinstall) {
parameters.append(("-r "));
}
parameters.append(Joiner.on(' ').join(extraArgs));
MultiInstallReceiver receiver = new MultiInstallReceiver();
String cmd = String.format("pm install-create %1$s -S %2$d",
parameters.toString(),
totalFileSize);
executeShellCommand(cmd, receiver, DdmPreferences.getTimeOut());
return receiver.getSessionId();
}
private static final CharMatcher UNSAFE_PM_INSTALL_SESSION_SPLIT_NAME_CHARS =
CharMatcher.inRange('a','z').or(CharMatcher.inRange('A','Z'))
.or(CharMatcher.anyOf("_-")).negate();
private boolean uploadAPK(final String sessionId, String apkFilePath, int uniqueId) {
Log.d(sessionId, String.format("Uploading APK %1$s ", apkFilePath));
File fileToUpload = new File(apkFilePath);
if (!fileToUpload.exists()) {
Log.e(sessionId, String.format("File not found: %1$s", apkFilePath));
return false;
}
if (fileToUpload.isDirectory()) {
Log.e(sessionId, String.format("Directory upload not supported: %1$s", apkFilePath));
return false;
}
String baseName = fileToUpload.getName().lastIndexOf('.') != -1
? fileToUpload.getName().substring(0, fileToUpload.getName().lastIndexOf('.'))
: fileToUpload.getName();
baseName = UNSAFE_PM_INSTALL_SESSION_SPLIT_NAME_CHARS.replaceFrom(baseName, '_');
String command = String.format("pm install-write -S %d %s %d_%s -",
fileToUpload.length(), sessionId, uniqueId, baseName);
Log.d(sessionId, String.format("Executing : %1$s", command));
InputStream inputStream = null;
try {
inputStream = new BufferedInputStream(new FileInputStream(fileToUpload));
InstallReceiver receiver = new InstallReceiver();
AdbHelper.executeRemoteCommand(AndroidDebugBridge.getSocketAddress(),
AdbHelper.AdbService.EXEC, command, this,
receiver, DdmPreferences.getTimeOut(), TimeUnit.MILLISECONDS, inputStream);
if (receiver.getErrorMessage() != null) {
Log.e(sessionId, String.format("Error while uploading %1$s : %2$s", fileToUpload.getName(),
receiver.getErrorMessage()));
} else {
Log.d(sessionId, String.format("Successfully uploaded %1$s", fileToUpload.getName()));
}
return receiver.getErrorMessage() == null;
} catch (Exception e) {
Log.e(sessionId, e);
return false;
} finally {
if (inputStream != null) {
try {
inputStream.close();
} catch (IOException e) {
Log.e(sessionId, e);
}
}
}
}
@Override
public String syncPackageToDevice(String localFilePath)
throws IOException, AdbCommandRejectedException, TimeoutException, SyncException {
SyncService sync = null;
try {
String packageFileName = getFileName(localFilePath);
String remoteFilePath = String.format("/data/local/tmp/%1$s", packageFileName); //$NON-NLS-1$
Log.d(packageFileName, String.format("Uploading %1$s onto device '%2$s'",
packageFileName, getSerialNumber()));
sync = getSyncService();
if (sync != null) {
String message = String.format("Uploading file onto device '%1$s'",
getSerialNumber());
Log.d(LOG_TAG, message);
sync.pushFile(localFilePath, remoteFilePath, SyncService.getNullProgressMonitor());
} else {
throw new IOException("Unable to open sync connection!");
}
return remoteFilePath;
} catch (TimeoutException e) {
Log.e(LOG_TAG, "Error during Sync: timeout.");
throw e;
} catch (SyncException e) {
Log.e(LOG_TAG, String.format("Error during Sync: %1$s", e.getMessage()));
throw e;
} catch (IOException e) {
Log.e(LOG_TAG, String.format("Error during Sync: %1$s", e.getMessage()));
throw e;
} finally {
if (sync != null) {
sync.close();
}
}
}
/**
* Helper method to retrieve the file name given a local file path
* @param filePath full directory path to file
* @return {@link String} file name
*/
private static String getFileName(String filePath) {
return new File(filePath).getName();
}
@Override
public String installRemotePackage(String remoteFilePath, boolean reinstall,
String... extraArgs) throws InstallException {
try {
InstallReceiver receiver = new InstallReceiver();
StringBuilder optionString = new StringBuilder();
if (reinstall) {
optionString.append("-r ");
}
if (extraArgs != null) {
optionString.append(Joiner.on(' ').join(extraArgs));
}
String cmd = String.format("pm install %1$s \"%2$s\"", optionString.toString(),
remoteFilePath);
executeShellCommand(cmd, receiver, INSTALL_TIMEOUT_MINUTES, TimeUnit.MINUTES);
return receiver.getErrorMessage();
} catch (TimeoutException e) {
throw new InstallException(e);
} catch (AdbCommandRejectedException e) {
throw new InstallException(e);
} catch (ShellCommandUnresponsiveException e) {
throw new InstallException(e);
} catch (IOException e) {
throw new InstallException(e);
}
}
@Override
public void removeRemotePackage(String remoteFilePath) throws InstallException {
try {
executeShellCommand(String.format("rm \"%1$s\"", remoteFilePath),
new NullOutputReceiver(), INSTALL_TIMEOUT_MINUTES, TimeUnit.MINUTES);
} catch (IOException e) {
throw new InstallException(e);
} catch (TimeoutException e) {
throw new InstallException(e);
} catch (AdbCommandRejectedException e) {
throw new InstallException(e);
} catch (ShellCommandUnresponsiveException e) {
throw new InstallException(e);
}
}
@Override
public String uninstallPackage(String packageName) throws InstallException {
try {
InstallReceiver receiver = new InstallReceiver();
executeShellCommand("pm uninstall " + packageName, receiver, INSTALL_TIMEOUT_MINUTES,
TimeUnit.MINUTES);
return receiver.getErrorMessage();
} catch (TimeoutException e) {
throw new InstallException(e);
} catch (AdbCommandRejectedException e) {
throw new InstallException(e);
} catch (ShellCommandUnresponsiveException e) {
throw new InstallException(e);
} catch (IOException e) {
throw new InstallException(e);
}
}
/*
* (non-Javadoc)
* @see com.android.ddmlib.IDevice#reboot()
*/
@Override
public void reboot(String into)
throws TimeoutException, AdbCommandRejectedException, IOException {
AdbHelper.reboot(into, AndroidDebugBridge.getSocketAddress(), this);
}
@Override
public Integer getBatteryLevel() throws TimeoutException, AdbCommandRejectedException,
IOException, ShellCommandUnresponsiveException {
// use default of 5 minutes
return getBatteryLevel(5 * 60 * 1000);
}
@Override
public Integer getBatteryLevel(long freshnessMs) throws TimeoutException,
AdbCommandRejectedException, IOException, ShellCommandUnresponsiveException {
Future<Integer> futureBattery = getBattery(freshnessMs, TimeUnit.MILLISECONDS);
try {
return futureBattery.get();
} catch (InterruptedException e) {
return null;
} catch (ExecutionException e) {
return null;
}
}
@NonNull
@Override
public Future<Integer> getBattery() {
return getBattery(5, TimeUnit.MINUTES);
}
@NonNull
@Override
public Future<Integer> getBattery(long freshnessTime, @NonNull TimeUnit timeUnit) {
return mBatteryFetcher.getBattery(freshnessTime, timeUnit);
}
@NonNull
@Override
public List<String> getAbis() {
/* Try abiList (implemented in L onwards) otherwise fall back to abi and abi2. */
String abiList = getProperty(IDevice.PROP_DEVICE_CPU_ABI_LIST);
if(abiList != null) {
return Lists.newArrayList(abiList.split(","));
} else {
List<String> abis = Lists.newArrayListWithExpectedSize(2);
String abi = getProperty(IDevice.PROP_DEVICE_CPU_ABI);
if (abi != null) {
abis.add(abi);
}
abi = getProperty(IDevice.PROP_DEVICE_CPU_ABI2);
if (abi != null) {
abis.add(abi);
}
return abis;
}
}
@Override
public int getDensity() {
String densityValue = getProperty(IDevice.PROP_DEVICE_DENSITY);
if (densityValue != null) {
try {
return Integer.parseInt(densityValue);
} catch (NumberFormatException e) {
return -1;
}
}
return -1;
}
@Override
public String getLanguage() {
return getProperties().get(IDevice.PROP_DEVICE_LANGUAGE);
}
@Override
public String getRegion() {
return getProperty(IDevice.PROP_DEVICE_REGION);
}
}
|
[
"\"ADB_INSTALL_TIMEOUT\""
] |
[] |
[
"ADB_INSTALL_TIMEOUT"
] |
[]
|
["ADB_INSTALL_TIMEOUT"]
|
java
| 1 | 0 | |
examples/senders/senders.go
|
package main
import (
"fmt"
"github.com/sendgrid/sendgrid-go"
"log"
"os"
)
// CreateaSenderIdentity : Create a Sender Identity
// POST /senders
func CreateaSenderIdentity() {
apiKey := os.Getenv("YOUR_SENDGRID_APIKEY")
host := "https://api.sendgrid.com"
request := sendgrid.GetRequest(apiKey, "/v3/senders", host)
request.Method = "POST"
request.Body = []byte(` {
"address": "123 Elm St.",
"address_2": "Apt. 456",
"city": "Denver",
"country": "United States",
"from": {
"email": "[email protected]",
"name": "Example INC"
},
"nickname": "My Sender ID",
"reply_to": {
"email": "[email protected]",
"name": "Example INC"
},
"state": "Colorado",
"zip": "80202"
}`)
response, err := sendgrid.API(request)
if err != nil {
log.Println(err)
} else {
fmt.Println(response.StatusCode)
fmt.Println(response.Body)
fmt.Println(response.Headers)
}
}
// GetallSenderIdentities : Get all Sender Identities
// GET /senders
func GetallSenderIdentities() {
apiKey := os.Getenv("YOUR_SENDGRID_APIKEY")
host := "https://api.sendgrid.com"
request := sendgrid.GetRequest(apiKey, "/v3/senders", host)
request.Method = "GET"
response, err := sendgrid.API(request)
if err != nil {
log.Println(err)
} else {
fmt.Println(response.StatusCode)
fmt.Println(response.Body)
fmt.Println(response.Headers)
}
}
// UpdateaSenderIdentity : Update a Sender Identity
// PATCH /senders/{sender_id}
func UpdateaSenderIdentity() {
apiKey := os.Getenv("YOUR_SENDGRID_APIKEY")
host := "https://api.sendgrid.com"
request := sendgrid.GetRequest(apiKey, "/v3/senders/{sender_id}", host)
request.Method = "PATCH"
request.Body = []byte(` {
"address": "123 Elm St.",
"address_2": "Apt. 456",
"city": "Denver",
"country": "United States",
"from": {
"email": "[email protected]",
"name": "Example INC"
},
"nickname": "My Sender ID",
"reply_to": {
"email": "[email protected]",
"name": "Example INC"
},
"state": "Colorado",
"zip": "80202"
}`)
response, err := sendgrid.API(request)
if err != nil {
log.Println(err)
} else {
fmt.Println(response.StatusCode)
fmt.Println(response.Body)
fmt.Println(response.Headers)
}
}
// ViewaSenderIdentity : View a Sender Identity
// GET /senders/{sender_id}
func ViewaSenderIdentity() {
apiKey := os.Getenv("YOUR_SENDGRID_APIKEY")
host := "https://api.sendgrid.com"
request := sendgrid.GetRequest(apiKey, "/v3/senders/{sender_id}", host)
request.Method = "GET"
response, err := sendgrid.API(request)
if err != nil {
log.Println(err)
} else {
fmt.Println(response.StatusCode)
fmt.Println(response.Body)
fmt.Println(response.Headers)
}
}
// DeleteaSenderIdentity : Delete a Sender Identity
// DELETE /senders/{sender_id}
func DeleteaSenderIdentity() {
apiKey := os.Getenv("YOUR_SENDGRID_APIKEY")
host := "https://api.sendgrid.com"
request := sendgrid.GetRequest(apiKey, "/v3/senders/{sender_id}", host)
request.Method = "DELETE"
response, err := sendgrid.API(request)
if err != nil {
log.Println(err)
} else {
fmt.Println(response.StatusCode)
fmt.Println(response.Body)
fmt.Println(response.Headers)
}
}
// ResendSenderIdentityVerification : Resend Sender Identity Verification
// POST /senders/{sender_id}/resend_verification
func ResendSenderIdentityVerification() {
apiKey := os.Getenv("YOUR_SENDGRID_APIKEY")
host := "https://api.sendgrid.com"
request := sendgrid.GetRequest(apiKey, "/v3/senders/{sender_id}/resend_verification", host)
request.Method = "POST"
response, err := sendgrid.API(request)
if err != nil {
log.Println(err)
} else {
fmt.Println(response.StatusCode)
fmt.Println(response.Body)
fmt.Println(response.Headers)
}
}
func main() {
// add your function calls here
}
|
[
"\"YOUR_SENDGRID_APIKEY\"",
"\"YOUR_SENDGRID_APIKEY\"",
"\"YOUR_SENDGRID_APIKEY\"",
"\"YOUR_SENDGRID_APIKEY\"",
"\"YOUR_SENDGRID_APIKEY\"",
"\"YOUR_SENDGRID_APIKEY\""
] |
[] |
[
"YOUR_SENDGRID_APIKEY"
] |
[]
|
["YOUR_SENDGRID_APIKEY"]
|
go
| 1 | 0 | |
main.go
|
package main
import (
"encoding/json"
"log"
"net/http"
"os"
"time"
"github.com/google/uuid"
)
type Result struct {
ServerTime time.Time
ServerTZ string
ClientTime time.Time
ClientTZ string
Pass bool
}
func main() {
// re-used vars
reqIdHeaderKey := http.CanonicalHeaderKey("x-request-id")
client := &http.Client{}
////////// TEST 1 - Ensure i cant talk directly to the time-server //////////////////////
serverUuidWithHyphen := uuid.New()
serverUrl := os.Getenv("SERVERURL")
// setup request to proxy server
req, err := http.NewRequest("GET", serverUrl, nil)
if err != nil {
log.Print("Failed to create new request.")
log.Fatal(err.Error())
}
// add requestid
req.Header.Set(reqIdHeaderKey, serverUuidWithHyphen.String())
// send request to proxy
resp, err := client.Do(req)
if err != nil {
// this is for actual failures, not non-2** codes
log.Print("Failed to make request to server")
log.Fatal(err.Error())
}
if resp.StatusCode == http.StatusOK {
// this is a problem, as we shouldnt be able to access the server directly
log.Fatal("Can access the time-server, which should violate allow-nothing policy")
//log.Fatal(err.Error())
}
// defer resp.Body.Close()
// var proxyResult Result
// json.NewDecoder(resp.Body).Decode(&proxyResult)
/////////////////////////////////////////////////////////////////////////////////////////
////////// TEST 2 - Ensure i can talk to the proxy server ///////////////////////////////
// Create request ID for this test
uuidWithHyphen := uuid.New()
proxyUrl := os.Getenv("PROXYURL")
// setup request to proxy server
req, err = http.NewRequest("GET", proxyUrl, nil)
if err != nil {
log.Print("Failed to create new request.")
log.Fatal(err.Error())
}
// add requestid
req.Header.Set(reqIdHeaderKey, uuidWithHyphen.String())
// send request to proxy
resp, err = client.Do(req)
if err != nil {
log.Print("Failed to make request to proxy")
log.Fatal(err.Error())
}
defer resp.Body.Close()
var proxyResult Result
json.NewDecoder(resp.Body).Decode(&proxyResult)
// check that the server returned the correct request id
returnVal, proxyOk := resp.Header[reqIdHeaderKey]
if !proxyOk {
log.Fatal("proxy didnt return x-request-id")
//log.Fatal(err.Error())
}
if returnVal[0] != uuidWithHyphen.String() {
log.Fatal("proxy returned different x-request-id")
}
/////////////////////////////////////////////////////////////////////////////////////////
log.Print("Test complete.")
os.Exit(0)
}
|
[
"\"SERVERURL\"",
"\"PROXYURL\""
] |
[] |
[
"SERVERURL",
"PROXYURL"
] |
[]
|
["SERVERURL", "PROXYURL"]
|
go
| 2 | 0 | |
compiler/tests/04_pand4_test.py
|
#!/usr/bin/env python3
# See LICENSE for licensing information.
#
# Copyright (c) 2016-2019 Regents of the University of California and The Board
# of Regents for the Oklahoma Agricultural and Mechanical College
# (acting for and on behalf of Oklahoma State University)
# All rights reserved.
#
import unittest
from testutils import *
import sys, os
sys.path.append(os.getenv("OPENRAM_HOME"))
import globals
from globals import OPTS
import debug
class pand4_test(openram_test):
def runTest(self):
config_file = "{}/tests/configs/config".format(os.getenv("OPENRAM_HOME"))
globals.init_openram(config_file)
global verify
import verify
import pand4
debug.info(2, "Testing pand4 gate 4x")
a = pand4.pand4(name="pand4x4", size=4)
self.local_check(a)
globals.end_openram()
# instantiate a copdsay of the class to actually run the test
if __name__ == "__main__":
(OPTS, args) = globals.parse_args()
del sys.argv[1:]
header(__file__, OPTS.tech_name)
unittest.main(testRunner=debugTestRunner())
|
[] |
[] |
[
"OPENRAM_HOME"
] |
[]
|
["OPENRAM_HOME"]
|
python
| 1 | 0 | |
Projects/CBVs/manage.py
|
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'CBVs.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
src/accelerate/commands/launch.py
|
#!/usr/bin/env python
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import importlib
import os
import subprocess
import sys
import warnings
from ast import literal_eval
from pathlib import Path
from typing import Dict, List
from accelerate.commands.config import default_config_file, load_config_from_file
from accelerate.commands.config.config_args import SageMakerConfig
from accelerate.state import ComputeEnvironment, DistributedType
from accelerate.utils import PrepareForLaunch, is_sagemaker_available
def launch_command_parser(subparsers=None):
if subparsers is not None:
parser = subparsers.add_parser("launch")
else:
parser = argparse.ArgumentParser("Accelerate launch command")
parser.add_argument(
"--config_file", default=None, help="The config file to use for the default values in the launching script."
)
parser.add_argument(
"--multi_gpu",
default=False,
action="store_true",
help="Whether or not this should launch a distributed GPU training.",
)
parser.add_argument(
"--use_deepspeed",
default=False,
action="store_true",
help="Whether to use deepspeed.",
)
parser.add_argument(
"--tpu", default=False, action="store_true", help="Whether or not this should launch a TPU training."
)
parser.add_argument(
"--mixed_precision",
default="no",
type=str,
choices=["no", "fp16", "bf16"],
help="Whether or not to use mixed precision training. "
"Choose between FP16 and BF16 (bfloat16) training. "
"BF16 training is only supported on Nvidia Ampere GPUs and PyTorch 1.10 or later.",
)
parser.add_argument(
"--fp16", default=False, action="store_true", help="Whether or not to use mixed precision training."
)
parser.add_argument(
"--cpu", default=False, action="store_true", help="Whether or not to force the training on the CPU."
)
parser.add_argument(
"--num_processes", type=int, default=None, help="The total number of processes to be launched in parallel."
)
parser.add_argument(
"--num_machines", type=int, default=None, help="The total number of machines used in this training."
)
parser.add_argument(
"--machine_rank", type=int, default=None, help="The rank of the machine on which this script is launched."
)
parser.add_argument("--main_process_ip", type=str, default=None, help="The IP address of the machine of rank 0.")
parser.add_argument(
"--main_process_port",
type=int,
default=None,
help="The port to use to communicate with the machine of rank 0.",
)
parser.add_argument(
"--main_training_function",
type=str,
default=None,
help="The name of the main function to be executed in your script (only for TPU training).",
)
parser.add_argument(
"-m",
"--module",
action="store_true",
help="Change each process to interpret the launch script as a Python module, executing with the same behavior as 'python -m'.",
)
parser.add_argument(
"--no_python",
action="store_true",
help="Skip prepending the training script with 'python' - just execute it directly. Useful when the script is not a Python script.",
)
parser.add_argument(
"--aws_access_key_id",
type=str,
default=None,
help="The AWS_ACCESS_KEY_ID used to launch the Amazon SageMaker training job",
)
parser.add_argument(
"--aws_secret_access_key",
type=str,
default=None,
help="The AWS_SECRET_ACCESS_KEY used to launch the Amazon SageMaker training job",
)
parser.add_argument(
"training_script",
type=str,
help=(
"The full path to the script to be launched in parallel, followed by all the arguments for the training "
"script."
),
)
parser.add_argument(
"--zero_stage",
default=None,
type=int,
help="DeepSpeed's ZeRO optimization stage (useful only when `use_deepspeed` flag is passed).",
)
parser.add_argument(
"--offload_optimizer_device",
default=None,
type=str,
help="Decides where (none|cpu|nvme) to offload optimizer states (useful only when `use_deepspeed` flag is passed).",
)
parser.add_argument(
"--gradient_accumulation_steps",
default=None,
type=int,
help="No of gradient_accumulation_steps used in your training script (useful only when `use_deepspeed` flag is passed).",
)
# Other arguments of the training scripts
parser.add_argument("training_script_args", nargs=argparse.REMAINDER, help="Arguments of the training script.")
if subparsers is not None:
parser.set_defaults(func=launch_command)
return parser
def simple_launcher(args):
cmd = []
if args.no_python and args.module:
raise ValueError("--module and --no_python cannot be used together")
if not args.no_python:
cmd.append(sys.executable)
if args.module:
cmd.append("-m")
cmd.append(args.training_script)
cmd.extend(args.training_script_args)
current_env = os.environ.copy()
current_env["USE_CPU"] = str(args.cpu)
mixed_precision = args.mixed_precision.lower()
if mixed_precision not in ["no", "fp16", "bf16"]:
raise ValueError(f"Unknown mixed_precision mode: {mixed_precision}. Choose between 'no', 'fp16' and 'bf16'.")
if args.fp16:
warnings.warn('--fp16 flag is deprecated. Use "--mixed_precision fp16" instead.', DeprecationWarning)
mixed_precision = "fp16"
current_env["MIXED_PRECISION"] = str(mixed_precision)
process = subprocess.Popen(cmd, env=current_env)
process.wait()
if process.returncode != 0:
raise subprocess.CalledProcessError(returncode=process.returncode, cmd=cmd)
def multi_gpu_launcher(args):
cmd = [sys.executable, "-m", "torch.distributed.launch", "--use_env"]
if args.num_machines > 1:
cmd.extend(
[
"--nproc_per_node",
str(args.num_processes // args.num_machines),
"--nnodes",
str(args.num_machines),
"--node_rank",
str(args.machine_rank),
"--master_addr",
args.main_process_ip,
"--master_port",
str(args.main_process_port),
]
)
else:
cmd.extend(["--nproc_per_node", str(args.num_processes)])
if args.main_process_port is not None:
cmd.extend(["--master_port", str(args.main_process_port)])
if args.module and args.no_python:
raise ValueError("--module and --no_python cannot be used together")
elif args.module:
cmd.append("--module")
elif args.no_python:
cmd.append("--no_python")
cmd.append(args.training_script)
cmd.extend(args.training_script_args)
current_env = os.environ.copy()
mixed_precision = args.mixed_precision.lower()
if mixed_precision not in ["no", "fp16", "bf16"]:
raise ValueError(f"Unknown mixed_precision mode: {mixed_precision}. Choose between 'no', 'fp16' and 'bf16'.")
if args.fp16:
warnings.warn('--fp16 flag is deprecated. Use "--mixed_precision fp16" instead.', DeprecationWarning)
mixed_precision = "fp16"
current_env["MIXED_PRECISION"] = str(mixed_precision)
process = subprocess.Popen(cmd, env=current_env)
process.wait()
if process.returncode != 0:
raise subprocess.CalledProcessError(returncode=process.returncode, cmd=cmd)
def deepspeed_launcher(args):
cmd = ["deepspeed"]
if args.num_machines > 1:
cmd.extend(
[
"--num_gpus",
str(args.num_processes // args.num_machines),
"--num_nodes",
str(args.num_machines),
"--node_rank",
str(args.machine_rank),
"--master_addr",
args.main_process_ip,
"--master_port",
str(args.main_process_port),
]
)
else:
cmd.extend(["--num_gpus", str(args.num_processes)])
if args.module and args.no_python:
raise ValueError("--module and --no_python cannot be used together")
elif args.module:
cmd.append("--module")
elif args.no_python:
cmd.append("--no_python")
cmd.append(args.training_script)
cmd.extend(args.training_script_args)
current_env = os.environ.copy()
mixed_precision = args.mixed_precision.lower()
if mixed_precision not in ["no", "fp16", "bf16"]:
raise ValueError(f"Unknown mixed_precision mode: {mixed_precision}. Choose between 'no', 'fp16' and 'bf16'.")
if args.fp16:
warnings.warn('--fp16 flag is deprecated. Use "--mixed_precision fp16" instead.', DeprecationWarning)
mixed_precision = "fp16"
current_env["MIXED_PRECISION"] = str(mixed_precision)
current_env["USE_DEEPSPEED"] = "true"
current_env["DEEPSPEED_ZERO_STAGE"] = str(args.zero_stage)
current_env["GRADIENT_ACCUMULATION_STEPS"] = str(args.gradient_accumulation_steps)
current_env["DEEPSPEED_OFFLOAD_OPTIMIZER_DEVICE"] = str(args.offload_optimizer_device)
process = subprocess.Popen(cmd, env=current_env)
process.wait()
if process.returncode != 0:
raise subprocess.CalledProcessError(returncode=process.returncode, cmd=cmd)
def tpu_launcher(args):
import torch_xla.distributed.xla_multiprocessing as xmp
if args.no_python:
raise ValueError("--no_python cannot be used with TPU launcher")
if args.module:
mod_name = args.training_script
else:
# Import training_script as a module
script_path = Path(args.training_script)
sys.path.append(str(script_path.parent.resolve()))
mod_name = script_path.stem
mod = importlib.import_module(mod_name)
if not hasattr(mod, args.main_training_function):
raise ValueError(
f"Your training script should have a function named {args.main_training_function}, or you should pass a "
"different value to `--main_training_function`."
)
# Patch sys.argv
sys.argv = [mod.__file__] + args.training_script_args
main_function = getattr(mod, args.main_training_function)
xmp.spawn(PrepareForLaunch(main_function), args=(), nprocs=args.num_processes)
def _convert_nargs_to_dict(nargs: List[str]) -> Dict[str, str]:
if len(nargs) < 0:
return {}
# helper function to infer type for argsparser
def _infer_type(s):
try:
s = float(s)
if s // 1 == s:
return int(s)
return s
except ValueError:
return s
parser = argparse.ArgumentParser()
_, unknown = parser.parse_known_args(nargs)
for index, argument in enumerate(unknown):
if argument.startswith(("-", "--")):
action = None
if index + 1 < len(unknown): # checks if next index would be in list
if unknown[index + 1].startswith(("-", "--")): # checks if next element is an key
# raise an error if element is store_true or store_false
raise ValueError(
"SageMaker doesn’t support argparse actions for `store_true` or `store_false`. Please define explicit types"
)
else: # raise an error if last element is store_true or store_false
raise ValueError(
"SageMaker doesn’t support argparse actions for `store_true` or `store_false`. Please define explicit types"
)
# adds argument to parser based on action_store true
if action is None:
parser.add_argument(argument, type=_infer_type)
else:
parser.add_argument(argument, action=action)
return {
key: (literal_eval(value) if value == "True" or value == "False" else value)
for key, value in parser.parse_args(nargs).__dict__.items()
}
def sagemaker_launcher(sagemaker_config: SageMakerConfig, args):
if not is_sagemaker_available():
raise ImportError(
"Please install sagemaker to be able to launch training on Amazon SageMaker with `pip install accelerate[sagemaker]`"
)
if args.module or args.no_python:
raise ValueError(
"SageMaker requires a python training script file and cannot be used with --module or --no_python"
)
from sagemaker.huggingface import HuggingFace
# configure environment
print("Configuring Amazon SageMaker environment")
os.environ["AWS_DEFAULT_REGION"] = sagemaker_config.region
# configure credentials
if sagemaker_config.profile is not None:
os.environ["AWS_PROFILE"] = sagemaker_config.profile
elif args.aws_access_key_id is not None and args.aws_secret_access_key is not None:
os.environ["AWS_ACCESS_KEY_ID"] = args.aws_access_key_id
os.environ["AWS_SECRET_ACCESS_KEY"] = args.aws_secret_access_key
else:
raise EnvironmentError(
"You need to provide an aws_access_key_id and aws_secret_access_key when not using aws_profile"
)
# extract needed arguments
source_dir = os.path.dirname(args.training_script)
if not source_dir: # checks if string is empty
source_dir = "."
entry_point = os.path.basename(args.training_script)
if not entry_point.endswith(".py"):
raise ValueError(f'Your training script should be a python script and not "{entry_point}"')
print("Converting Arguments to Hyperparameters")
hyperparameters = _convert_nargs_to_dict(args.training_script_args)
mixed_precision = args.mixed_precision.lower()
if mixed_precision not in ["no", "fp16", "bf16"]:
raise ValueError(f"Unknown mixed_precision mode: {mixed_precision}. Choose between 'no', 'fp16' and 'bf16'.")
if args.fp16:
warnings.warn('--fp16 flag is deprecated. Use "--mixed_precision fp16" instead.', DeprecationWarning)
mixed_precision = "fp16"
# Environment variables to be set for use during training job
environment = {"MIXED_PRECISION": str(mixed_precision)}
# configure distribution set up
distribution = None # TODO: not yet implemented
# configure session
print("Creating Estimator")
huggingface_estimator = HuggingFace(
entry_point=entry_point,
source_dir=source_dir,
role=sagemaker_config.iam_role_name,
transformers_version="4.4",
pytorch_version="1.6",
py_version="py36",
base_job_name=sagemaker_config.base_job_name,
instance_count=sagemaker_config.num_machines,
instance_type=sagemaker_config.ec2_instance_type,
debugger_hook_config=False,
distribution=distribution,
hyperparameters=hyperparameters,
environment=environment,
)
huggingface_estimator.fit()
print(f"You can find your model data at: {huggingface_estimator.model_data}")
def launch_command(args):
# Sanity checks
if sum([args.multi_gpu, args.tpu, args.use_deepspeed]) > 1:
raise ValueError("You can only pick one between `--multi_gpu`, `--use_deepspeed`, `--tpu`.")
defaults = None
# Get the default from the config file.
if args.config_file is not None or os.path.isfile(default_config_file) and not args.cpu:
defaults = load_config_from_file(args.config_file)
if not args.multi_gpu and not args.tpu and not args.use_deepspeed:
args.use_deepspeed = defaults.distributed_type == DistributedType.DEEPSPEED
args.multi_gpu = defaults.distributed_type == DistributedType.MULTI_GPU
args.tpu = defaults.distributed_type == DistributedType.TPU
if defaults.compute_environment == ComputeEnvironment.LOCAL_MACHINE:
# Update args with the defaults
for name, attr in defaults.__dict__.items():
if isinstance(attr, dict):
for k in defaults.deepspeed_config:
if getattr(args, k) is None:
setattr(args, k, defaults.deepspeed_config[k])
continue
# Those args are handled separately
if (
name not in ["compute_environment", "fp16", "mixed_precision", "distributed_type"]
and getattr(args, name, None) is None
):
setattr(args, name, attr)
if not args.mixed_precision:
if args.fp16:
args.mixed_precision = "fp16"
else:
args.mixed_precision = defaults.mixed_precision
else:
if args.num_processes is None:
args.num_processes = 1
# Use the proper launcher
if args.use_deepspeed and not args.cpu:
deepspeed_launcher(args)
elif args.multi_gpu and not args.cpu:
multi_gpu_launcher(args)
elif args.tpu and not args.cpu:
tpu_launcher(args)
elif defaults is not None and defaults.compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER:
sagemaker_launcher(defaults, args)
else:
simple_launcher(args)
def main():
parser = launch_command_parser()
args = parser.parse_args()
launch_command(args)
if __name__ == "__main__":
main()
|
[] |
[] |
[
"AWS_PROFILE",
"AWS_DEFAULT_REGION",
"AWS_SECRET_ACCESS_KEY",
"AWS_ACCESS_KEY_ID"
] |
[]
|
["AWS_PROFILE", "AWS_DEFAULT_REGION", "AWS_SECRET_ACCESS_KEY", "AWS_ACCESS_KEY_ID"]
|
python
| 4 | 0 | |
lib/pydotplus/graphviz.py
|
# -*- coding: utf-8 -*-
#
# Copyright (c) 2014 Carlos Jenkins <[email protected]>
# Copyright (c) 2014 Lance Hepler
# Copyright (c) 2004-2011 Ero Carrera <[email protected]>
# Copyright (c) 2004-2007 Michael Krause <[email protected]>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""
Graphviz's dot language Python interface.
This module provides with a full interface to create handle modify
and process graphs in Graphviz's dot language.
"""
from __future__ import division, print_function
import os
import re
import subprocess
import sys
import tempfile
import copy
from operator import itemgetter
from . import parser
PY3 = not sys.version_info < (3, 0, 0)
if PY3:
NULL_SEP = b''
basestring = str
long = int
unicode = str
else:
NULL_SEP = ''
GRAPH_ATTRIBUTES = set([
'Damping', 'K', 'URL', 'aspect', 'bb', 'bgcolor',
'center', 'charset', 'clusterrank', 'colorscheme', 'comment', 'compound',
'concentrate', 'defaultdist', 'dim', 'dimen', 'diredgeconstraints',
'dpi', 'epsilon', 'esep', 'fontcolor', 'fontname', 'fontnames',
'fontpath', 'fontsize', 'id', 'label', 'labeljust', 'labelloc',
'landscape', 'layers', 'layersep', 'layout', 'levels', 'levelsgap',
'lheight', 'lp', 'lwidth', 'margin', 'maxiter', 'mclimit', 'mindist',
'mode', 'model', 'mosek', 'nodesep', 'nojustify', 'normalize', 'nslimit',
'nslimit1', 'ordering', 'orientation', 'outputorder', 'overlap',
'overlap_scaling', 'pack', 'packmode', 'pad', 'page', 'pagedir',
'quadtree', 'quantum', 'rankdir', 'ranksep', 'ratio', 'remincross',
'repulsiveforce', 'resolution', 'root', 'rotate', 'searchsize', 'sep',
'showboxes', 'size', 'smoothing', 'sortv', 'splines', 'start',
'stylesheet', 'target', 'truecolor', 'viewport', 'voro_margin',
# for subgraphs
'rank'
])
EDGE_ATTRIBUTES = set([
'URL', 'arrowhead', 'arrowsize', 'arrowtail',
'color', 'colorscheme', 'comment', 'constraint', 'decorate', 'dir',
'edgeURL', 'edgehref', 'edgetarget', 'edgetooltip', 'fontcolor',
'fontname', 'fontsize', 'headURL', 'headclip', 'headhref', 'headlabel',
'headport', 'headtarget', 'headtooltip', 'href', 'id', 'label',
'labelURL', 'labelangle', 'labeldistance', 'labelfloat', 'labelfontcolor',
'labelfontname', 'labelfontsize', 'labelhref', 'labeltarget',
'labeltooltip', 'layer', 'len', 'lhead', 'lp', 'ltail', 'minlen',
'nojustify', 'penwidth', 'pos', 'samehead', 'sametail', 'showboxes',
'style', 'tailURL', 'tailclip', 'tailhref', 'taillabel', 'tailport',
'tailtarget', 'tailtooltip', 'target', 'tooltip', 'weight',
'rank'
])
NODE_ATTRIBUTES = set([
'URL', 'color', 'colorscheme', 'comment',
'distortion', 'fillcolor', 'fixedsize', 'fontcolor', 'fontname',
'fontsize', 'group', 'height', 'id', 'image', 'imagescale', 'label',
'labelloc', 'layer', 'margin', 'nojustify', 'orientation', 'penwidth',
'peripheries', 'pin', 'pos', 'rects', 'regular', 'root', 'samplepoints',
'shape', 'shapefile', 'showboxes', 'sides', 'skew', 'sortv', 'style',
'target', 'tooltip', 'vertices', 'width', 'z',
# The following are attributes dot2tex
'texlbl', 'texmode'
])
CLUSTER_ATTRIBUTES = set([
'K', 'URL', 'bgcolor', 'color', 'colorscheme',
'fillcolor', 'fontcolor', 'fontname', 'fontsize', 'label', 'labeljust',
'labelloc', 'lheight', 'lp', 'lwidth', 'nojustify', 'pencolor',
'penwidth', 'peripheries', 'sortv', 'style', 'target', 'tooltip'
])
def is_string_like(obj): # from John Hunter, types-free version
"""Check if obj is string."""
try:
obj + ''
except (TypeError, ValueError):
return False
return True
def get_fobj(fname, mode='w+'):
"""Obtain a proper file object.
Parameters
----------
fname : string, file object, file descriptor
If a string or file descriptor, then we create a file object.
If *fname*
is a file object, then we do nothing and ignore the specified *mode*
parameter.
mode : str
The mode of the file to be opened.
Returns
-------
fobj : file object
The file object.
close : bool
If *fname* was a string, then *close* will be *True* to signify that
the file object should be closed after writing to it. Otherwise,
*close* will be *False* signifying that the user, in essence, created
the file object already and that subsequent operations should not
close it.
"""
if is_string_like(fname):
fobj = open(fname, mode)
close = True
elif hasattr(fname, 'write'):
# fname is a file-like object, perhaps a StringIO (for example)
fobj = fname
close = False
else:
# assume it is a file descriptor
fobj = os.fdopen(fname, mode)
close = False
return fobj, close
#
# Extented version of ASPN's Python Cookbook Recipe:
# Frozen dictionaries.
# http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/414283
#
# This version freezes dictionaries used as values within dictionaries.
#
class frozendict(dict):
def _blocked_attribute(obj):
raise AttributeError("A frozendict cannot be modified.")
_blocked_attribute = property(_blocked_attribute)
__delitem__ = __setitem__ = clear = _blocked_attribute
pop = popitem = setdefault = update = _blocked_attribute
def __new__(cls, *args, **kw):
new = dict.__new__(cls)
args_ = []
for arg in args:
if isinstance(arg, dict):
arg = copy.copy(arg)
for k, v in arg.items():
if isinstance(v, frozendict):
arg[k] = v
elif isinstance(v, dict):
arg[k] = frozendict(v)
elif isinstance(v, list):
v_ = list()
for elm in v:
if isinstance(elm, dict):
v_.append(frozendict(elm))
else:
v_.append(elm)
arg[k] = tuple(v_)
args_.append(arg)
else:
args_.append(arg)
dict.__init__(new, *args_, **kw)
return new
def __init__(self, *args, **kw):
pass
def __hash__(self):
try:
return self._cached_hash
except AttributeError:
h = self._cached_hash = hash(tuple(sorted(self.items())))
return h
def __repr__(self):
return "frozendict(%s)" % dict.__repr__(self)
dot_keywords = ['graph', 'subgraph', 'digraph', 'node', 'edge', 'strict']
id_re_alpha_nums = re.compile('^[_a-zA-Z][a-zA-Z0-9_,]*$', re.UNICODE)
id_re_alpha_nums_with_ports = re.compile(
'^[_a-zA-Z][a-zA-Z0-9_,:\"]*[a-zA-Z0-9_,\"]+$', re.UNICODE
)
id_re_num = re.compile('^[0-9,]+$', re.UNICODE)
id_re_with_port = re.compile('^([^:]*):([^:]*)$', re.UNICODE)
id_re_dbl_quoted = re.compile('^\".*\"$', re.S | re.UNICODE)
id_re_html = re.compile('^<.*>$', re.S | re.UNICODE)
def needs_quotes(s):
"""Checks whether a string is a dot language ID.
It will check whether the string is solely composed
by the characters allowed in an ID or not.
If the string is one of the reserved keywords it will
need quotes too but the user will need to add them
manually.
"""
# If the name is a reserved keyword it will need quotes but pydot
# can't tell when it's being used as a keyword or when it's simply
# a name. Hence the user needs to supply the quotes when an element
# would use a reserved keyword as name. This function will return
# false indicating that a keyword string, if provided as-is, won't
# need quotes.
if s in dot_keywords:
return False
chars = [ord(c) for c in s if ord(c) > 0x7f or ord(c) == 0]
if chars and not id_re_dbl_quoted.match(s) and not id_re_html.match(s):
return True
for test_re in [
id_re_alpha_nums, id_re_num, id_re_dbl_quoted,
id_re_html, id_re_alpha_nums_with_ports]:
if test_re.match(s):
return False
m = id_re_with_port.match(s)
if m:
return needs_quotes(m.group(1)) or needs_quotes(m.group(2))
return True
def quote_if_necessary(s):
# Older versions of graphviz throws a syntax error for empty values without
# quotes, e.g. [label=]
if s == '':
return '""'
if isinstance(s, bool):
if s is True:
return 'True'
return 'False'
if not isinstance(s, basestring):
return s
if not s:
return s
if needs_quotes(s):
replace = {'"': r'\"', "\n": r'\n', "\r": r'\r'}
for (a, b) in replace.items():
s = s.replace(a, b)
return '"' + s + '"'
return s
def graph_from_dot_data(data):
"""Load graph as defined by data in DOT format.
The data is assumed to be in DOT format. It will
be parsed and a Dot class will be returned,
representing the graph.
"""
return parser.parse_dot_data(data)
def graph_from_dot_file(path):
"""Load graph as defined by a DOT file.
The file is assumed to be in DOT format. It will
be loaded, parsed and a Dot class will be returned,
representing the graph.
"""
fd = open(path, 'rb')
data = fd.read()
fd.close()
return graph_from_dot_data(data)
def graph_from_edges(edge_list, node_prefix='', directed=False):
"""Creates a basic graph out of an edge list.
The edge list has to be a list of tuples representing
the nodes connected by the edge.
The values can be anything: bool, int, float, str.
If the graph is undirected by default, it is only
calculated from one of the symmetric halves of the matrix.
"""
if directed:
graph = Dot(graph_type='digraph')
else:
graph = Dot(graph_type='graph')
for edge in edge_list:
if isinstance(edge[0], str):
src = node_prefix + edge[0]
else:
src = node_prefix + str(edge[0])
if isinstance(edge[1], str):
dst = node_prefix + edge[1]
else:
dst = node_prefix + str(edge[1])
e = Edge(src, dst)
graph.add_edge(e)
return graph
def graph_from_adjacency_matrix(matrix, node_prefix='', directed=False):
"""Creates a basic graph out of an adjacency matrix.
The matrix has to be a list of rows of values
representing an adjacency matrix.
The values can be anything: bool, int, float, as long
as they can evaluate to True or False.
"""
node_orig = 1
if directed:
graph = Dot(graph_type='digraph')
else:
graph = Dot(graph_type='graph')
for row in matrix:
if not directed:
skip = matrix.index(row)
r = row[skip:]
else:
skip = 0
r = row
node_dest = skip + 1
for e in r:
if e:
graph.add_edge(
Edge(
node_prefix + node_orig,
node_prefix + node_dest
)
)
node_dest += 1
node_orig += 1
return graph
def graph_from_incidence_matrix(matrix, node_prefix='', directed=False):
"""Creates a basic graph out of an incidence matrix.
The matrix has to be a list of rows of values
representing an incidence matrix.
The values can be anything: bool, int, float, as long
as they can evaluate to True or False.
"""
if directed:
graph = Dot(graph_type='digraph')
else:
graph = Dot(graph_type='graph')
for row in matrix:
nodes = []
c = 1
for node in row:
if node:
nodes.append(c * node)
c += 1
nodes.sort()
if len(nodes) == 2:
graph.add_edge(
Edge(
node_prefix + abs(nodes[0]),
node_prefix + nodes[1]
)
)
if not directed:
graph.set_simplify(True)
return graph
def __find_executables(path):
"""Used by find_graphviz
path - single directory as a string
If any of the executables are found, it will return a dictionary
containing the program names as keys and their paths as values.
Otherwise returns None
"""
success = False
progs = {
'dot': '',
'twopi': '',
'neato': '',
'circo': '',
'fdp': '',
'sfdp': ''
}
was_quoted = False
path = path.strip()
if path.startswith('"') and path.endswith('"'):
path = path[1:-1]
was_quoted = True
if os.path.isdir(path):
for prg in progs.keys():
if progs[prg]:
continue
if os.path.exists(os.path.join(path, prg)):
if was_quoted:
progs[prg] = '"' + os.path.join(path, prg) + '"'
else:
progs[prg] = os.path.join(path, prg)
success = True
elif os.path.exists(os.path.join(path, prg + '.exe')):
if was_quoted:
progs[prg] = '"' + os.path.join(path, prg + '.exe') + '"'
else:
progs[prg] = os.path.join(path, prg + '.exe')
success = True
if success:
return progs
else:
return None
# The multi-platform version of this 'find_graphviz' function was
# contributed by Peter Cock
def find_graphviz():
"""Locate Graphviz's executables in the system.
Tries three methods:
First: Windows Registry (Windows only)
This requires Mark Hammond's pywin32 is installed.
Secondly: Search the path
It will look for 'dot', 'twopi' and 'neato' in all the directories
specified in the PATH environment variable.
Thirdly: Default install location (Windows only)
It will look for 'dot', 'twopi' and 'neato' in the default install
location under the "Program Files" directory.
It will return a dictionary containing the program names as keys
and their paths as values.
If this fails, it returns None.
"""
# Method 1 (Windows only)
if os.sys.platform == 'win32':
HKEY_LOCAL_MACHINE = 0x80000002
KEY_QUERY_VALUE = 0x0001
RegOpenKeyEx = None
RegQueryValueEx = None
RegCloseKey = None
try:
import win32api
RegOpenKeyEx = win32api.RegOpenKeyEx
RegQueryValueEx = win32api.RegQueryValueEx
RegCloseKey = win32api.RegCloseKey
except ImportError:
# Print a messaged suggesting they install these?
pass
try:
import ctypes
def RegOpenKeyEx(key, subkey, opt, sam):
result = ctypes.c_uint(0)
ctypes.windll.advapi32.RegOpenKeyExA(
key, subkey, opt, sam, ctypes.byref(result)
)
return result.value
def RegQueryValueEx(hkey, valuename):
data_type = ctypes.c_uint(0)
data_len = ctypes.c_uint(1024)
data = ctypes.create_string_buffer(1024)
# this has a return value, which we should probably check
ctypes.windll.advapi32.RegQueryValueExA(
hkey, valuename, 0, ctypes.byref(data_type),
data, ctypes.byref(data_len)
)
return data.value
RegCloseKey = ctypes.windll.advapi32.RegCloseKey
except ImportError:
# Print a messaged suggesting they install these?
pass
if RegOpenKeyEx is not None:
# Get the GraphViz install path from the registry
hkey = None
potentialKeys = [
"SOFTWARE\\ATT\\Graphviz",
"SOFTWARE\\AT&T Research Labs\\Graphviz"
]
for potentialKey in potentialKeys:
try:
hkey = RegOpenKeyEx(
HKEY_LOCAL_MACHINE,
potentialKey, 0, KEY_QUERY_VALUE
)
if hkey is not None:
path = RegQueryValueEx(hkey, "InstallPath")
RegCloseKey(hkey)
# The regitry variable might exist, left by old
# installations but with no value, in those cases we
# keep searching...
if not path:
continue
# Now append the "bin" subdirectory:
path = os.path.join(path, "bin")
progs = __find_executables(path)
if progs is not None:
# print("Used Windows registry")
return progs
except Exception:
# raise
pass
else:
break
# Method 2 (Linux, Windows etc)
if 'PATH' in os.environ:
for path in os.environ['PATH'].split(os.pathsep):
progs = __find_executables(path)
if progs is not None:
# print("Used path")
return progs
# Method 3 (Windows only)
if os.sys.platform == 'win32':
# Try and work out the equivalent of "C:\Program Files" on this
# machine (might be on drive D:, or in a different language)
if 'PROGRAMFILES' in os.environ:
# Note, we could also use the win32api to get this
# information, but win32api may not be installed.
path = os.path.join(
os.environ['PROGRAMFILES'], 'ATT', 'GraphViz', 'bin'
)
else:
# Just in case, try the default...
path = r"C:\Program Files\att\Graphviz\bin"
progs = __find_executables(path)
if progs is not None:
# print("Used default install location")
return progs
for path in (
'/usr/bin', '/usr/local/bin',
'/opt/local/bin',
'/opt/bin', '/sw/bin', '/usr/share',
'/Applications/Graphviz.app/Contents/MacOS/'):
progs = __find_executables(path)
if progs is not None:
# print("Used path")
return progs
# Failed to find GraphViz
return None
class Common(object):
"""Common information to several classes.
Should not be directly used, several classes are derived from
this one.
"""
def __getstate__(self):
dict = copy.copy(self.obj_dict)
return dict
def __setstate__(self, state):
self.obj_dict = state
def __get_attribute__(self, attr):
"""Look for default attributes for this node"""
attr_val = self.obj_dict['attributes'].get(attr, None)
if attr_val is None:
# get the defaults for nodes/edges
default_node_name = self.obj_dict['type']
# The defaults for graphs are set on a node named 'graph'
if default_node_name in ('subgraph', 'digraph', 'cluster'):
default_node_name = 'graph'
g = self.get_parent_graph()
if g is not None:
defaults = g.get_node(default_node_name)
else:
return None
# Multiple defaults could be set by having repeated 'graph [...]'
# 'node [...]', 'edge [...]' statements. In such case, if the
# same attribute is set in different statements, only the first
# will be returned. In order to get all, one would call the
# get_*_defaults() methods and handle those. Or go node by node
# (of the ones specifying defaults) and modify the attributes
# individually.
#
if not isinstance(defaults, (list, tuple)):
defaults = [defaults]
for default in defaults:
attr_val = default.obj_dict['attributes'].get(attr, None)
if attr_val:
return attr_val
else:
return attr_val
return None
def set_parent_graph(self, parent_graph):
self.obj_dict['parent_graph'] = parent_graph
def get_parent_graph(self):
return self.obj_dict.get('parent_graph', None)
def set(self, name, value):
"""Set an attribute value by name.
Given an attribute 'name' it will set its value to 'value'.
There's always the possibility of using the methods:
set_'name'(value)
which are defined for all the existing attributes.
"""
self.obj_dict['attributes'][name] = value
def get(self, name):
"""Get an attribute value by name.
Given an attribute 'name' it will get its value.
There's always the possibility of using the methods:
get_'name'()
which are defined for all the existing attributes.
"""
return self.obj_dict['attributes'].get(name, None)
def get_attributes(self):
""""""
return self.obj_dict['attributes']
def set_sequence(self, seq):
self.obj_dict['sequence'] = seq
def get_sequence(self):
return self.obj_dict['sequence']
def create_attribute_methods(self, obj_attributes):
# for attr in self.obj_dict['attributes']:
for attr in obj_attributes:
# Generate all the Setter methods.
#
self.__setattr__(
'set_' + attr,
lambda x, a=attr: self.obj_dict['attributes'].__setitem__(a, x)
)
# Generate all the Getter methods.
#
self.__setattr__(
'get_' + attr,
lambda a=attr: self.__get_attribute__(a)
)
class Error(Exception):
"""General error handling class.
"""
def __init__(self, value):
self.value = value
def __str__(self):
return self.value
class InvocationException(Exception):
"""
To indicate that a ploblem occurred while running any of the GraphViz
executables.
"""
def __init__(self, value):
self.value = value
def __str__(self):
return self.value
class Node(Common):
"""A graph node.
This class represents a graph's node with all its attributes.
node(name, attribute=value, ...)
name: node's name
All the attributes defined in the Graphviz dot language should
be supported.
"""
def __init__(self, name='', obj_dict=None, **attrs):
#
# Nodes will take attributes of all other types because the defaults
# for any GraphViz object are dealt with as if they were Node
# definitions
#
if obj_dict is not None:
self.obj_dict = obj_dict
else:
self.obj_dict = dict()
# Copy the attributes
#
self.obj_dict['attributes'] = dict(attrs)
self.obj_dict['type'] = 'node'
self.obj_dict['parent_graph'] = None
self.obj_dict['parent_node_list'] = None
self.obj_dict['sequence'] = None
# Remove the compass point
#
port = None
if isinstance(name, basestring) and not name.startswith('"'):
idx = name.find(':')
if idx > 0 and idx + 1 < len(name):
name, port = name[:idx], name[idx:]
if isinstance(name, (long, int)):
name = str(name)
self.obj_dict['name'] = quote_if_necessary(name)
self.obj_dict['port'] = port
self.create_attribute_methods(NODE_ATTRIBUTES)
def set_name(self, node_name):
"""Set the node's name."""
self.obj_dict['name'] = node_name
def get_name(self):
"""Get the node's name."""
return self.obj_dict['name']
def get_port(self):
"""Get the node's port."""
return self.obj_dict['port']
def add_style(self, style):
styles = self.obj_dict['attributes'].get('style', None)
if not styles and style:
styles = [style]
else:
styles = styles.split(',')
styles.append(style)
self.obj_dict['attributes']['style'] = ','.join(styles)
def to_string(self):
"""Returns a string representation of the node in dot language.
"""
# RMF: special case defaults for node, edge and graph properties.
#
node = quote_if_necessary(self.obj_dict['name'])
node_attr = list()
for attr, value in sorted(
self.obj_dict['attributes'].items(),
key=itemgetter(0)):
if value is not None:
node_attr.append('%s=%s' % (attr, quote_if_necessary(value)))
else:
node_attr.append(attr)
# No point in having nodes setting any defaults if the don't set
# any attributes...
#
if node in ('graph', 'node', 'edge') and len(node_attr) == 0:
return ''
node_attr = ', '.join(node_attr)
if node_attr:
node += ' [' + node_attr + ']'
return node + ';'
class Edge(Common):
"""A graph edge.
This class represents a graph's edge with all its attributes.
edge(src, dst, attribute=value, ...)
src: source node's name
dst: destination node's name
All the attributes defined in the Graphviz dot language should
be supported.
Attributes can be set through the dynamically generated methods:
set_[attribute name], i.e. set_label, set_fontname
or directly by using the instance's special dictionary:
Edge.obj_dict['attributes'][attribute name], i.e.
edge_instance.obj_dict['attributes']['label']
edge_instance.obj_dict['attributes']['fontname']
"""
def __init__(self, src='', dst='', obj_dict=None, **attrs):
if isinstance(src, (list, tuple)) and dst == '':
src, dst = src
if obj_dict is not None:
self.obj_dict = obj_dict
else:
self.obj_dict = dict()
# Copy the attributes
#
self.obj_dict['attributes'] = dict(attrs)
self.obj_dict['type'] = 'edge'
self.obj_dict['parent_graph'] = None
self.obj_dict['parent_edge_list'] = None
self.obj_dict['sequence'] = None
if isinstance(src, Node):
src = src.get_name()
if isinstance(dst, Node):
dst = dst.get_name()
points = (quote_if_necessary(src), quote_if_necessary(dst))
self.obj_dict['points'] = points
self.create_attribute_methods(EDGE_ATTRIBUTES)
def get_source(self):
"""Get the edges source node name."""
return self.obj_dict['points'][0]
def get_destination(self):
"""Get the edge's destination node name."""
return self.obj_dict['points'][1]
def __hash__(self):
return hash(hash(self.get_source()) + hash(self.get_destination()))
def __eq__(self, edge):
"""Compare two edges.
If the parent graph is directed, arcs linking
node A to B are considered equal and A->B != B->A
If the parent graph is undirected, any edge
connecting two nodes is equal to any other
edge connecting the same nodes, A->B == B->A
"""
if not isinstance(edge, Edge):
raise Error("Can't compare and edge to a non-edge object.")
if self.get_parent_graph().get_top_graph_type() == 'graph':
# If the graph is undirected, the edge has neither
# source nor destination.
#
if ((self.get_source() == edge.get_source() and
self.get_destination() == edge.get_destination()) or
(edge.get_source() == self.get_destination() and
edge.get_destination() == self.get_source())):
return True
else:
if (self.get_source() == edge.get_source() and
self.get_destination() == edge.get_destination()):
return True
return False
def parse_node_ref(self, node_str):
if not isinstance(node_str, str):
return node_str
if node_str.startswith('"') and node_str.endswith('"'):
return node_str
node_port_idx = node_str.rfind(':')
if (node_port_idx > 0 and node_str[0] == '"' and
node_str[node_port_idx - 1] == '"'):
return node_str
if node_port_idx > 0:
a = node_str[:node_port_idx]
b = node_str[node_port_idx + 1:]
node = quote_if_necessary(a)
node += ':' + quote_if_necessary(b)
return node
return node_str
def to_string(self):
"""Returns a string representation of the edge in dot language.
"""
src = self.parse_node_ref(self.get_source())
dst = self.parse_node_ref(self.get_destination())
if isinstance(src, frozendict):
edge = [Subgraph(obj_dict=src).to_string()]
elif isinstance(src, (int, long)):
edge = [str(src)]
else:
edge = [src]
if (self.get_parent_graph() and
self.get_parent_graph().get_top_graph_type() and
self.get_parent_graph().get_top_graph_type() == 'digraph'):
edge.append('->')
else:
edge.append('--')
if isinstance(dst, frozendict):
edge.append(Subgraph(obj_dict=dst).to_string())
elif isinstance(dst, (int, long)):
edge.append(str(dst))
else:
edge.append(dst)
edge_attr = list()
for attr, value in sorted(
self.obj_dict['attributes'].items(),
key=itemgetter(0)):
if value is not None:
edge_attr.append('%s=%s' % (attr, quote_if_necessary(value)))
else:
edge_attr.append(attr)
edge_attr = ', '.join(edge_attr)
if edge_attr:
edge.append(' [' + edge_attr + ']')
return ' '.join(edge) + ';'
class Graph(Common):
"""Class representing a graph in Graphviz's dot language.
This class implements the methods to work on a representation
of a graph in Graphviz's dot language.
graph(graph_name='G', graph_type='digraph',
strict=False, suppress_disconnected=False, attribute=value, ...)
graph_name:
the graph's name
graph_type:
can be 'graph' or 'digraph'
suppress_disconnected:
defaults to False, which will remove from the
graph any disconnected nodes.
simplify:
if True it will avoid displaying equal edges, i.e.
only one edge between two nodes. removing the
duplicated ones.
All the attributes defined in the Graphviz dot language should
be supported.
Attributes can be set through the dynamically generated methods:
set_[attribute name], i.e. set_size, set_fontname
or using the instance's attributes:
Graph.obj_dict['attributes'][attribute name], i.e.
graph_instance.obj_dict['attributes']['label']
graph_instance.obj_dict['attributes']['fontname']
"""
def __init__(
self, graph_name='G', obj_dict=None, graph_type='digraph',
strict=False, suppress_disconnected=False, simplify=False,
**attrs):
if obj_dict is not None:
self.obj_dict = obj_dict
else:
self.obj_dict = dict()
self.obj_dict['attributes'] = dict(attrs)
if graph_type not in ['graph', 'digraph']:
raise Error((
'Invalid type "%s". Accepted graph types are: '
'graph, digraph, subgraph' % graph_type
))
self.obj_dict['name'] = quote_if_necessary(graph_name)
self.obj_dict['type'] = graph_type
self.obj_dict['strict'] = strict
self.obj_dict['suppress_disconnected'] = suppress_disconnected
self.obj_dict['simplify'] = simplify
self.obj_dict['current_child_sequence'] = 1
self.obj_dict['nodes'] = dict()
self.obj_dict['edges'] = dict()
self.obj_dict['subgraphs'] = dict()
self.set_parent_graph(self)
self.create_attribute_methods(GRAPH_ATTRIBUTES)
def get_graph_type(self):
return self.obj_dict['type']
def get_top_graph_type(self):
parent = self
while True:
parent_ = parent.get_parent_graph()
if parent_ == parent:
break
parent = parent_
return parent.obj_dict['type']
def set_graph_defaults(self, **attrs):
self.add_node(Node('graph', **attrs))
def get_graph_defaults(self, **attrs):
graph_nodes = self.get_node('graph')
if isinstance(graph_nodes, (list, tuple)):
return [node.get_attributes() for node in graph_nodes]
return graph_nodes.get_attributes()
def set_node_defaults(self, **attrs):
self.add_node(Node('node', **attrs))
def get_node_defaults(self, **attrs):
graph_nodes = self.get_node('node')
if isinstance(graph_nodes, (list, tuple)):
return [node.get_attributes() for node in graph_nodes]
return graph_nodes.get_attributes()
def set_edge_defaults(self, **attrs):
self.add_node(Node('edge', **attrs))
def get_edge_defaults(self, **attrs):
graph_nodes = self.get_node('edge')
if isinstance(graph_nodes, (list, tuple)):
return [node.get_attributes() for node in graph_nodes]
return graph_nodes.get_attributes()
def set_simplify(self, simplify):
"""Set whether to simplify or not.
If True it will avoid displaying equal edges, i.e.
only one edge between two nodes. removing the
duplicated ones.
"""
self.obj_dict['simplify'] = simplify
def get_simplify(self):
"""Get whether to simplify or not.
Refer to set_simplify for more information.
"""
return self.obj_dict['simplify']
def set_type(self, graph_type):
"""Set the graph's type, 'graph' or 'digraph'."""
self.obj_dict['type'] = graph_type
def get_type(self):
"""Get the graph's type, 'graph' or 'digraph'."""
return self.obj_dict['type']
def set_name(self, graph_name):
"""Set the graph's name."""
self.obj_dict['name'] = graph_name
def get_name(self):
"""Get the graph's name."""
return self.obj_dict['name']
def set_strict(self, val):
"""Set graph to 'strict' mode.
This option is only valid for top level graphs.
"""
self.obj_dict['strict'] = val
def get_strict(self, val):
"""Get graph's 'strict' mode (True, False).
This option is only valid for top level graphs.
"""
return self.obj_dict['strict']
def set_suppress_disconnected(self, val):
"""Suppress disconnected nodes in the output graph.
This option will skip nodes in the graph with no incoming or outgoing
edges. This option works also for subgraphs and has effect only in the
current graph/subgraph.
"""
self.obj_dict['suppress_disconnected'] = val
def get_suppress_disconnected(self, val):
"""Get if suppress disconnected is set.
Refer to set_suppress_disconnected for more information.
"""
return self.obj_dict['suppress_disconnected']
def get_next_sequence_number(self):
seq = self.obj_dict['current_child_sequence']
self.obj_dict['current_child_sequence'] += 1
return seq
def add_node(self, graph_node):
"""Adds a node object to the graph.
It takes a node object as its only argument and returns
None.
"""
if not isinstance(graph_node, Node):
raise TypeError(
'add_node() received a non node '
'class object: {}'.format(str(graph_node))
)
node = self.get_node(graph_node.get_name())
if not node:
self.obj_dict['nodes'][graph_node.get_name()] = [
graph_node.obj_dict
]
# self.node_dict[graph_node.get_name()] = graph_node.attributes
graph_node.set_parent_graph(self.get_parent_graph())
else:
self.obj_dict['nodes'][graph_node.get_name()].append(
graph_node.obj_dict
)
graph_node.set_sequence(self.get_next_sequence_number())
def del_node(self, name, index=None):
"""Delete a node from the graph.
Given a node's name all node(s) with that same name
will be deleted if 'index' is not specified or set
to None.
If there are several nodes with that same name and
'index' is given, only the node in that position
will be deleted.
'index' should be an integer specifying the position
of the node to delete. If index is larger than the
number of nodes with that name, no action is taken.
If nodes are deleted it returns True. If no action
is taken it returns False.
"""
if isinstance(name, Node):
name = name.get_name()
if name in self.obj_dict['nodes']:
if index is not None and index < len(self.obj_dict['nodes'][name]):
del self.obj_dict['nodes'][name][index]
return True
else:
del self.obj_dict['nodes'][name]
return True
return False
def get_node(self, name):
"""Retrieve a node from the graph.
Given a node's name the corresponding Node
instance will be returned.
If one or more nodes exist with that name a list of
Node instances is returned.
An empty list is returned otherwise.
"""
match = list()
if name in self.obj_dict['nodes']:
match.extend([
Node(obj_dict=obj_dict)
for obj_dict
in self.obj_dict['nodes'][name]
])
return match
def get_nodes(self):
"""Get the list of Node instances."""
return self.get_node_list()
def get_node_list(self):
"""Get the list of Node instances.
This method returns the list of Node instances
composing the graph.
"""
node_objs = list()
for node, obj_dict_list in self.obj_dict['nodes'].items():
node_objs.extend([
Node(obj_dict=obj_d)
for obj_d
in obj_dict_list
])
return node_objs
def add_edge(self, graph_edge):
"""Adds an edge object to the graph.
It takes a edge object as its only argument and returns
None.
"""
if not isinstance(graph_edge, Edge):
raise TypeError(
'add_edge() received a non '
'edge class object: {}'.format(str(graph_edge))
)
edge_points = (graph_edge.get_source(), graph_edge.get_destination())
if edge_points in self.obj_dict['edges']:
edge_list = self.obj_dict['edges'][edge_points]
edge_list.append(graph_edge.obj_dict)
else:
self.obj_dict['edges'][edge_points] = [graph_edge.obj_dict]
graph_edge.set_sequence(self.get_next_sequence_number())
graph_edge.set_parent_graph(self.get_parent_graph())
def del_edge(self, src_or_list, dst=None, index=None):
"""Delete an edge from the graph.
Given an edge's (source, destination) node names all
matching edges(s) will be deleted if 'index' is not
specified or set to None.
If there are several matching edges and 'index' is
given, only the edge in that position will be deleted.
'index' should be an integer specifying the position
of the edge to delete. If index is larger than the
number of matching edges, no action is taken.
If edges are deleted it returns True. If no action
is taken it returns False.
"""
if isinstance(src_or_list, (list, tuple)):
if dst is not None and isinstance(dst, (int, long)):
index = dst
src, dst = src_or_list
else:
src, dst = src_or_list, dst
if isinstance(src, Node):
src = src.get_name()
if isinstance(dst, Node):
dst = dst.get_name()
if (src, dst) in self.obj_dict['edges']:
if index is not None and index < len(
self.obj_dict['edges'][(src, dst)]):
del self.obj_dict['edges'][(src, dst)][index]
return True
else:
del self.obj_dict['edges'][(src, dst)]
return True
return False
def get_edge(self, src_or_list, dst=None):
"""Retrieved an edge from the graph.
Given an edge's source and destination the corresponding
Edge instance(s) will be returned.
If one or more edges exist with that source and destination
a list of Edge instances is returned.
An empty list is returned otherwise.
"""
if isinstance(src_or_list, (list, tuple)) and dst is None:
edge_points = tuple(src_or_list)
edge_points_reverse = (edge_points[1], edge_points[0])
else:
edge_points = (src_or_list, dst)
edge_points_reverse = (dst, src_or_list)
match = list()
if edge_points in self.obj_dict['edges'] or (
self.get_top_graph_type() == 'graph' and
edge_points_reverse in self.obj_dict['edges']):
edges_obj_dict = self.obj_dict['edges'].get(
edge_points,
self.obj_dict['edges'].get(edge_points_reverse, None))
for edge_obj_dict in edges_obj_dict:
match.append(Edge(
edge_points[0],
edge_points[1],
obj_dict=edge_obj_dict
))
return match
def get_edges(self):
return self.get_edge_list()
def get_edge_list(self):
"""Get the list of Edge instances.
This method returns the list of Edge instances
composing the graph.
"""
edge_objs = list()
for edge, obj_dict_list in self.obj_dict['edges'].items():
edge_objs.extend([
Edge(obj_dict=obj_d)
for obj_d
in obj_dict_list
])
return edge_objs
def add_subgraph(self, sgraph):
"""Adds an subgraph object to the graph.
It takes a subgraph object as its only argument and returns
None.
"""
if not isinstance(sgraph, Subgraph) and \
not isinstance(sgraph, Cluster):
raise TypeError(
'add_subgraph() received a non '
'subgraph class object:'.format(str(sgraph))
)
if sgraph.get_name() in self.obj_dict['subgraphs']:
sgraph_list = self.obj_dict['subgraphs'][sgraph.get_name()]
sgraph_list.append(sgraph.obj_dict)
else:
self.obj_dict['subgraphs'][sgraph.get_name()] = [sgraph.obj_dict]
sgraph.set_sequence(self.get_next_sequence_number())
sgraph.set_parent_graph(self.get_parent_graph())
def get_subgraph(self, name):
"""Retrieved a subgraph from the graph.
Given a subgraph's name the corresponding
Subgraph instance will be returned.
If one or more subgraphs exist with the same name, a list of
Subgraph instances is returned.
An empty list is returned otherwise.
"""
match = list()
if name in self.obj_dict['subgraphs']:
sgraphs_obj_dict = self.obj_dict['subgraphs'].get(name)
for obj_dict_list in sgraphs_obj_dict:
match.append(Subgraph(obj_dict=obj_dict_list))
return match
def get_subgraphs(self):
return self.get_subgraph_list()
def get_subgraph_list(self):
"""Get the list of Subgraph instances.
This method returns the list of Subgraph instances
in the graph.
"""
sgraph_objs = list()
for sgraph, obj_dict_list in self.obj_dict['subgraphs'].items():
sgraph_objs.extend([
Subgraph(obj_dict=obj_d)
for obj_d
in obj_dict_list
])
return sgraph_objs
def set_parent_graph(self, parent_graph):
self.obj_dict['parent_graph'] = parent_graph
for obj_list in self.obj_dict['nodes'].values():
for obj in obj_list:
obj['parent_graph'] = parent_graph
for obj_list in self.obj_dict['edges'].values():
for obj in obj_list:
obj['parent_graph'] = parent_graph
for obj_list in self.obj_dict['subgraphs'].values():
for obj in obj_list:
Graph(obj_dict=obj).set_parent_graph(parent_graph)
def to_string(self):
"""Returns a string representation of the graph in dot language.
It will return the graph and all its subelements in string from.
"""
graph = list()
if self.obj_dict.get('strict', None) is not None:
if self == self.get_parent_graph() and self.obj_dict['strict']:
graph.append('strict ')
if self.obj_dict['name'] == '':
if 'show_keyword' in self.obj_dict and \
self.obj_dict['show_keyword']:
graph.append('subgraph {\n')
else:
graph.append('{\n')
else:
graph.append(
'{} {} {{\n'.format(
self.obj_dict['type'],
self.obj_dict['name']
)
)
for attr, value in sorted(
self.obj_dict['attributes'].items(),
key=itemgetter(0)):
if value is not None:
graph.append('%s=%s' % (attr, quote_if_necessary(value)))
else:
graph.append(attr)
graph.append(';\n')
edges_done = set()
edge_obj_dicts = list()
for e in self.obj_dict['edges'].values():
edge_obj_dicts.extend(e)
if edge_obj_dicts:
edge_src_set, edge_dst_set = list(
zip(*[obj['points'] for obj in edge_obj_dicts])
)
edge_src_set, edge_dst_set = set(edge_src_set), set(edge_dst_set)
else:
edge_src_set, edge_dst_set = set(), set()
node_obj_dicts = list()
for e in self.obj_dict['nodes'].values():
node_obj_dicts.extend(e)
sgraph_obj_dicts = list()
for sg in self.obj_dict['subgraphs'].values():
sgraph_obj_dicts.extend(sg)
obj_list = sorted([
(obj['sequence'], obj)
for obj
in (edge_obj_dicts + node_obj_dicts + sgraph_obj_dicts)
],key=itemgetter(0))
for idx, obj in obj_list:
if obj['type'] == 'node':
node = Node(obj_dict=obj)
if self.obj_dict.get('suppress_disconnected', False):
if (node.get_name() not in edge_src_set and
node.get_name() not in edge_dst_set):
continue
graph.append(node.to_string() + '\n')
elif obj['type'] == 'edge':
edge = Edge(obj_dict=obj)
if self.obj_dict.get('simplify', False) and edge in edges_done:
continue
graph.append(edge.to_string() + '\n')
edges_done.add(edge)
else:
sgraph = Subgraph(obj_dict=obj)
graph.append(sgraph.to_string() + '\n')
graph.append('}\n')
return ''.join(graph)
class Subgraph(Graph):
"""Class representing a subgraph in Graphviz's dot language.
This class implements the methods to work on a representation
of a subgraph in Graphviz's dot language.
subgraph(
graph_name='subG', suppress_disconnected=False, attribute=value, ...
)
graph_name:
the subgraph's name
suppress_disconnected:
defaults to false, which will remove from the
subgraph any disconnected nodes.
All the attributes defined in the Graphviz dot language should
be supported.
Attributes can be set through the dynamically generated methods:
set_[attribute name], i.e. set_size, set_fontname
or using the instance's attributes:
Subgraph.obj_dict['attributes'][attribute name], i.e.
subgraph_instance.obj_dict['attributes']['label']
subgraph_instance.obj_dict['attributes']['fontname']
"""
# RMF: subgraph should have all the attributes of graph so it can be passed
# as a graph to all methods
#
def __init__(
self, graph_name='', obj_dict=None, suppress_disconnected=False,
simplify=False, **attrs):
Graph.__init__(
self, graph_name=graph_name, obj_dict=obj_dict,
suppress_disconnected=suppress_disconnected,
simplify=simplify, **attrs)
if obj_dict is None:
self.obj_dict['type'] = 'subgraph'
class Cluster(Graph):
"""Class representing a cluster in Graphviz's dot language.
This class implements the methods to work on a representation
of a cluster in Graphviz's dot language.
cluster(
graph_name='subG', suppress_disconnected=False, attribute=value, ...
)
graph_name:
the cluster's name (the string 'cluster' will be always prepended)
suppress_disconnected:
defaults to false, which will remove from the
cluster any disconnected nodes.
All the attributes defined in the Graphviz dot language should
be supported.
Attributes can be set through the dynamically generated methods:
set_[attribute name], i.e. set_color, set_fontname
or using the instance's attributes:
Cluster.obj_dict['attributes'][attribute name], i.e.
cluster_instance.obj_dict['attributes']['label']
cluster_instance.obj_dict['attributes']['fontname']
"""
def __init__(
self, graph_name='subG', obj_dict=None,
suppress_disconnected=False,
simplify=False, **attrs):
Graph.__init__(
self, graph_name=graph_name, obj_dict=obj_dict,
suppress_disconnected=suppress_disconnected, simplify=simplify,
**attrs
)
if obj_dict is None:
self.obj_dict['type'] = 'subgraph'
self.obj_dict['name'] = 'cluster_' + graph_name
self.create_attribute_methods(CLUSTER_ATTRIBUTES)
class Dot(Graph):
"""A container for handling a dot language file.
This class implements methods to write and process
a dot language file. It is a derived class of
the base class 'Graph'.
"""
def __init__(self, *argsl, **argsd):
Graph.__init__(self, *argsl, **argsd)
self.shape_files = list()
self.progs = None
self.formats = [
'canon', 'cmap', 'cmapx', 'cmapx_np', 'dia', 'dot',
'fig', 'gd', 'gd2', 'gif', 'hpgl', 'imap', 'imap_np', 'ismap',
'jpe', 'jpeg', 'jpg', 'mif', 'mp', 'pcl', 'pdf', 'pic', 'plain',
'plain-ext', 'png', 'ps', 'ps2', 'svg', 'svgz', 'vml', 'vmlz',
'vrml', 'vtx', 'wbmp', 'xdot', 'xlib'
]
self.prog = 'dot'
# Automatically creates all the methods enabling the creation
# of output in any of the supported formats.
for frmt in self.formats:
self.__setattr__(
'create_' + frmt,
lambda f=frmt, prog=self.prog: self.create(format=f, prog=prog)
)
f = self.__dict__['create_' + frmt]
f.__doc__ = (
'''Refer to the docstring accompanying the'''
''''create' method for more information.'''
)
for frmt in self.formats + ['raw']:
self.__setattr__(
'write_' + frmt,
lambda path,
f=frmt,
prog=self.prog: self.write(path, format=f, prog=prog)
)
f = self.__dict__['write_' + frmt]
f.__doc__ = (
'''Refer to the docstring accompanying the'''
''''write' method for more information.'''
)
def __getstate__(self):
return copy.copy(self.obj_dict)
def __setstate__(self, state):
self.obj_dict = state
def set_shape_files(self, file_paths):
"""Add the paths of the required image files.
If the graph needs graphic objects to be used as shapes or otherwise
those need to be in the same folder as the graph is going to be
rendered from. Alternatively the absolute path to the files can be
specified when including the graphics in the graph.
The files in the location pointed to by the path(s) specified as
arguments to this method will be copied to the same temporary location
where the graph is going to be rendered.
"""
if isinstance(file_paths, basestring):
self.shape_files.append(file_paths)
if isinstance(file_paths, (list, tuple)):
self.shape_files.extend(file_paths)
def set_prog(self, prog):
"""Sets the default program.
Sets the default program in charge of processing
the dot file into a graph.
"""
self.prog = prog
def set_graphviz_executables(self, paths):
"""
This method allows to manually specify the location of the GraphViz
executables.
The argument to this method should be a dictionary where the keys are
as follows:
{'dot': '', 'twopi': '', 'neato': '', 'circo': '', 'fdp': ''}
and the values are the paths to the corresponding executable,
including the name of the executable itself.
"""
self.progs = paths
def write(self, path, prog=None, format='raw'):
"""
Given a filename 'path' it will open/create and truncate
such file and write on it a representation of the graph
defined by the dot object and in the format specified by
'format'. 'path' can also be an open file-like object, such as
a StringIO instance.
The format 'raw' is used to dump the string representation
of the Dot object, without further processing.
The output can be processed by any of graphviz tools, defined
in 'prog', which defaults to 'dot'
Returns True or False according to the success of the write
operation.
There's also the preferred possibility of using:
write_'format'(path, prog='program')
which are automatically defined for all the supported formats.
[write_ps(), write_gif(), write_dia(), ...]
"""
if prog is None:
prog = self.prog
fobj, close = get_fobj(path, 'w+b')
try:
if format == 'raw':
data = self.to_string()
if isinstance(data, basestring):
if not isinstance(data, unicode):
try:
data = unicode(data, 'utf-8')
except:
pass
try:
charset = self.get_charset()
if not PY3 or not charset:
charset = 'utf-8'
data = data.encode(charset)
except:
if PY3:
data = data.encode('utf-8')
pass
fobj.write(data)
else:
fobj.write(self.create(prog, format))
finally:
if close:
fobj.close()
return True
def create(self, prog=None, format='ps'):
"""Creates and returns a Postscript representation of the graph.
create will write the graph to a temporary dot file and process
it with the program given by 'prog' (which defaults to 'twopi'),
reading the Postscript output and returning it as a string is the
operation is successful.
On failure None is returned.
There's also the preferred possibility of using:
create_'format'(prog='program')
which are automatically defined for all the supported formats.
[create_ps(), create_gif(), create_dia(), ...]
If 'prog' is a list instead of a string the fist item is expected
to be the program name, followed by any optional command-line
arguments for it:
['twopi', '-Tdot', '-s10']
"""
if prog is None:
prog = self.prog
if isinstance(prog, (list, tuple)):
prog, args = prog[0], prog[1:]
else:
args = []
if self.progs is None:
self.progs = find_graphviz()
if self.progs is None:
raise InvocationException(
'GraphViz\'s executables not found')
if prog not in self.progs:
raise InvocationException(
'GraphViz\'s executable "%s" not found' % prog)
if not os.path.exists(self.progs[prog]) or \
not os.path.isfile(self.progs[prog]):
raise InvocationException(
'GraphViz\'s executable "{}" is not'
' a file or doesn\'t exist'.format(self.progs[prog])
)
tmp_fd, tmp_name = tempfile.mkstemp()
os.close(tmp_fd)
self.write(tmp_name)
tmp_dir = os.path.dirname(tmp_name)
# For each of the image files...
for img in self.shape_files:
# Get its data
f = open(img, 'rb')
f_data = f.read()
f.close()
# And copy it under a file with the same name in the temporary
# directory
f = open(os.path.join(tmp_dir, os.path.basename(img)), 'wb')
f.write(f_data)
f.close()
cmdline = [self.progs[prog], '-T' + format, tmp_name] + args
p = subprocess.Popen(
cmdline,
cwd=tmp_dir,
stderr=subprocess.PIPE, stdout=subprocess.PIPE)
stderr = p.stderr
stdout = p.stdout
stdout_output = list()
while True:
data = stdout.read()
if not data:
break
stdout_output.append(data)
stdout.close()
stdout_output = NULL_SEP.join(stdout_output)
if not stderr.closed:
stderr_output = list()
while True:
data = stderr.read()
if not data:
break
stderr_output.append(data)
stderr.close()
if stderr_output:
stderr_output = NULL_SEP.join(stderr_output)
if PY3:
stderr_output = stderr_output.decode(sys.stderr.encoding)
# pid, status = os.waitpid(p.pid, 0)
status = p.wait()
if status != 0:
raise InvocationException(
'Program terminated with status: %d. stderr follows: %s' % (
status, stderr_output))
elif stderr_output:
print(stderr_output)
# For each of the image files...
for img in self.shape_files:
# remove it
os.unlink(os.path.join(tmp_dir, os.path.basename(img)))
os.unlink(tmp_name)
return stdout_output
|
[] |
[] |
[
"PATH",
"PROGRAMFILES"
] |
[]
|
["PATH", "PROGRAMFILES"]
|
python
| 2 | 0 | |
train.py
|
# -*- coding: utf-8 -*-
# file: train.py
# author: songyouwei <[email protected]>
# Copyright (C) 2018. All Rights Reserved.
import logging
import argparse
import math
import os
import sys
import random
import numpy
import pickle
import pandas as pd
from sklearn import metrics
from time import strftime, localtime
from transformers import BertModel
import torch
import torch.nn as nn
from torch.utils.data import DataLoader, random_split
from data_utils import build_tokenizer, build_embedding_matrix, Tokenizer4Bert, ABSADataset
from models import LSTM, IAN, MemNet, RAM, TD_LSTM, TC_LSTM, Cabasc, ATAE_LSTM, TNet_LF, AOA, MGAN, ASGCN, LCF_BERT
from models.aen import CrossEntropyLoss_LSR, AEN_BERT
from models.bert_spc import BERT_SPC
logger = logging.getLogger()
logger.setLevel(logging.INFO)
logger.addHandler(logging.StreamHandler(sys.stdout))
class Instructor:
def __init__(self, opt):
self.opt = opt
if 'bert' in opt.model_name:
tokenizer = Tokenizer4Bert(opt.max_seq_len, opt.pretrained_bert_name)
bert = BertModel.from_pretrained(opt.pretrained_bert_name)
self.model = opt.model_class(bert, opt).to(opt.device)
else:
tokenizer = build_tokenizer(
fnames=[opt.dataset_file['train'], opt.dataset_file['test']],
max_seq_len=opt.max_seq_len,
dat_fname='{0}_tokenizer.dat'.format(opt.dataset))
embedding_matrix = build_embedding_matrix(
word2idx=tokenizer.word2idx,
embed_dim=opt.embed_dim,
dat_fname='{0}_{1}_embedding_matrix.dat'.format(str(opt.embed_dim), opt.dataset))
self.model = opt.model_class(embedding_matrix, opt).to(opt.device)
self.trainset = ABSADataset(opt.dataset_file['train'], tokenizer)
self.testset = ABSADataset(opt.dataset_file['test'], tokenizer)
assert 0 <= opt.valset_ratio < 1
if opt.valset_ratio > 0:
valset_len = int(len(self.trainset) * opt.valset_ratio)
self.trainset, self.valset = random_split(self.trainset, (len(self.trainset)-valset_len, valset_len))
else:
self.valset = self.testset
if opt.device.type == 'cuda':
logger.info('cuda memory allocated: {}'.format(torch.cuda.memory_allocated(device=opt.device.index)))
self._print_args()
def _print_args(self):
n_trainable_params, n_nontrainable_params = 0, 0
for p in self.model.parameters():
n_params = torch.prod(torch.tensor(p.shape))
if p.requires_grad:
n_trainable_params += n_params
else:
n_nontrainable_params += n_params
logger.info('> n_trainable_params: {0}, n_nontrainable_params: {1}'.format(n_trainable_params, n_nontrainable_params))
logger.info('> training arguments:')
for arg in vars(self.opt):
logger.info('>>> {0}: {1}'.format(arg, getattr(self.opt, arg)))
def _reset_params(self):
for child in self.model.children():
if type(child) != BertModel: # skip bert params
for p in child.parameters():
if p.requires_grad:
if len(p.shape) > 1:
self.opt.initializer(p)
else:
stdv = 1. / math.sqrt(p.shape[0])
torch.nn.init.uniform_(p, a=-stdv, b=stdv)
def _train(self, criterion, optimizer, train_data_loader, val_data_loader):
max_val_acc = 0
max_val_f1 = 0
max_val_epoch = 0
global_step = 0
path = None
for i_epoch in range(self.opt.num_epoch):
logger.info('>' * 100)
logger.info('epoch: {}'.format(i_epoch))
n_correct, n_total, loss_total = 0, 0, 0
# switch model to training mode
self.model.train()
for i_batch, batch in enumerate(train_data_loader):
global_step += 1
# clear gradient accumulators
optimizer.zero_grad()
inputs = [batch[col].to(self.opt.device) for col in self.opt.inputs_cols]
outputs = self.model(inputs)
targets = batch['polarity'].to(self.opt.device)
loss = criterion(outputs, targets)
loss.backward()
optimizer.step()
n_correct += (torch.argmax(outputs, -1) == targets).sum().item()
n_total += len(outputs)
loss_total += loss.item() * len(outputs)
if global_step % self.opt.log_step == 0:
train_acc = n_correct / n_total
train_loss = loss_total / n_total
logger.info('loss: {:.4f}, acc: {:.4f}'.format(train_loss, train_acc))
val_acc, val_f1 = self._evaluate_acc_f1(val_data_loader)
logger.info('> val_acc: {:.4f}, val_f1: {:.4f}'.format(val_acc, val_f1))
if val_acc > max_val_acc:
max_val_acc = val_acc
max_val_epoch = i_epoch
if not os.path.exists('state_dict'):
os.mkdir('state_dict')
path = 'state_dict/{0}_{1}_val_acc_{2}'.format(self.opt.model_name, self.opt.dataset, round(val_acc, 4))
torch.save(self.model.state_dict(), path)
logger.info('>> saved: {}'.format(path))
if val_f1 > max_val_f1:
max_val_f1 = val_f1
if i_epoch - max_val_epoch >= self.opt.patience:
print('>> early stop.')
break
return path
def _evaluate_acc_f1(self, data_loader):
n_correct, n_total = 0, 0
t_targets_all, t_outputs_all = None, None
preds = []
ground_truth = []
probs_0 = []
probs_1 = []
# switch model to evaluation mode
self.model.eval()
with torch.no_grad():
for i_batch, t_batch in enumerate(data_loader):
t_inputs = [t_batch[col].to(self.opt.device) for col in self.opt.inputs_cols]
t_targets = t_batch['polarity'].to(self.opt.device)
t_outputs = self.model(t_inputs)
n_correct += (torch.argmax(t_outputs, -1) == t_targets).sum().item()
n_total += len(t_outputs)
preds.append(torch.argmax(t_outputs, -1))
probs_0.append(t_outputs[:,0])
probs_1.append(t_outputs[:,1])
ground_truth.append(t_targets)
if t_targets_all is None:
t_targets_all = t_targets
t_outputs_all = t_outputs
else:
t_targets_all = torch.cat((t_targets_all, t_targets), dim=0)
t_outputs_all = torch.cat((t_outputs_all, t_outputs), dim=0)
acc = n_correct / n_total
#removing the 2 label from labels=[0, 1, 2] for a two-class problem
f1 = metrics.f1_score(t_targets_all.cpu(), torch.argmax(t_outputs_all, -1).cpu(), labels=[0, 1], average='macro')
preds_np = torch.cat(preds, dim=0).cpu().numpy()
ground_truth_np = torch.cat(ground_truth, dim=0).cpu().numpy()
probs_0_np = torch.cat(probs_0, dim=0).cpu().numpy()
probs_1_np = torch.cat(probs_1, dim=0).cpu().numpy()
df_preds = pd.DataFrame({'prediction': preds_np, 'ground_truth': ground_truth_np, 'probs_0': probs_0_np, 'probs_1': probs_1_np})
df_preds.to_csv("test_predictions.csv")
#test_preds = [preds, ground_truth]
#with open('test_predictions.pkl', 'wb') as fh:
# pickle.dump(test_preds, fh)
return acc, f1
def run(self):
# Loss and Optimizer
criterion = nn.CrossEntropyLoss()
_params = filter(lambda p: p.requires_grad, self.model.parameters())
optimizer = self.opt.optimizer(_params, lr=self.opt.lr, weight_decay=self.opt.l2reg)
train_data_loader = DataLoader(dataset=self.trainset, batch_size=self.opt.batch_size, shuffle=True)
test_data_loader = DataLoader(dataset=self.testset, batch_size=self.opt.batch_size, shuffle=False)
val_data_loader = DataLoader(dataset=self.valset, batch_size=self.opt.batch_size, shuffle=False)
self._reset_params()
best_model_path = self._train(criterion, optimizer, train_data_loader, val_data_loader)
self.model.load_state_dict(torch.load(best_model_path))
test_acc, test_f1 = self._evaluate_acc_f1(test_data_loader)
logger.info('>> test_acc: {:.4f}, test_f1: {:.4f}'.format(test_acc, test_f1))
def main():
# Hyper Parameters
parser = argparse.ArgumentParser()
parser.add_argument('--model_name', default='bert_spc', type=str)
parser.add_argument('--dataset', default='wmp_presidential', type=str, help='twitter, restaurant, laptop, wmp_presidential')
parser.add_argument('--optimizer', default='adam', type=str)
parser.add_argument('--initializer', default='xavier_uniform_', type=str)
parser.add_argument('--lr', default=2e-5, type=float, help='try 5e-5, 2e-5 for BERT, 1e-3 for others')
parser.add_argument('--dropout', default=0.1, type=float)
parser.add_argument('--l2reg', default=0.01, type=float)
parser.add_argument('--num_epoch', default=20, type=int, help='try larger number for non-BERT models')
parser.add_argument('--batch_size', default=16, type=int, help='try 16, 32, 64 for BERT models')
parser.add_argument('--log_step', default=10, type=int)
parser.add_argument('--embed_dim', default=300, type=int)
parser.add_argument('--hidden_dim', default=300, type=int)
parser.add_argument('--bert_dim', default=768, type=int)
parser.add_argument('--pretrained_bert_name', default='bert-base-uncased', type=str)
parser.add_argument('--max_seq_len', default=85, type=int)
parser.add_argument('--polarities_dim', default=2, type=int)
parser.add_argument('--hops', default=3, type=int)
parser.add_argument('--patience', default=5, type=int)
parser.add_argument('--device', default=None, type=str, help='e.g. cuda:0')
parser.add_argument('--seed', default=1234, type=int, help='set seed for reproducibility')
parser.add_argument('--valset_ratio', default=0, type=float, help='set ratio between 0 and 1 for validation support')
# The following parameters are only valid for the lcf-bert model
parser.add_argument('--local_context_focus', default='cdm', type=str, help='local context focus mode, cdw or cdm')
parser.add_argument('--SRD', default=3, type=int, help='semantic-relative-distance, see the paper of LCF-BERT model')
opt = parser.parse_args()
if opt.seed is not None:
random.seed(opt.seed)
numpy.random.seed(opt.seed)
torch.manual_seed(opt.seed)
torch.cuda.manual_seed(opt.seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
os.environ['PYTHONHASHSEED'] = str(opt.seed)
model_classes = {
'lstm': LSTM,
'td_lstm': TD_LSTM,
'tc_lstm': TC_LSTM,
'atae_lstm': ATAE_LSTM,
'ian': IAN,
'memnet': MemNet,
'ram': RAM,
'cabasc': Cabasc,
'tnet_lf': TNet_LF,
'aoa': AOA,
'mgan': MGAN,
'asgcn': ASGCN,
'bert_spc': BERT_SPC,
'aen_bert': AEN_BERT,
'lcf_bert': LCF_BERT,
# default hyper-parameters for LCF-BERT model is as follws:
# lr: 2e-5
# l2: 1e-5
# batch size: 16
# num epochs: 5
}
dataset_files = {
'twitter': {
'train': './datasets/acl-14-short-data/train.raw',
'test': './datasets/acl-14-short-data/test.raw'
},
'restaurant': {
'train': './datasets/semeval14/Restaurants_Train.xml.seg',
'test': './datasets/semeval14/Restaurants_Test_Gold.xml.seg'
},
'laptop': {
'train': './datasets/semeval14/Laptops_Train.xml.seg',
'test': './datasets/semeval14/Laptops_Test_Gold.xml.seg'
},
'wmp_presidential': {
'train': './datasets/wmp/wmp_presidential_train.xml.seg',
'test': './datasets/wmp/wmp_presidential_test.xml.seg'
}
}
input_colses = {
'lstm': ['text_indices'],
'td_lstm': ['left_with_aspect_indices', 'right_with_aspect_indices'],
'tc_lstm': ['left_with_aspect_indices', 'right_with_aspect_indices', 'aspect_indices'],
'atae_lstm': ['text_indices', 'aspect_indices'],
'ian': ['text_indices', 'aspect_indices'],
'memnet': ['context_indices', 'aspect_indices'],
'ram': ['text_indices', 'aspect_indices', 'left_indices'],
'cabasc': ['text_indices', 'aspect_indices', 'left_with_aspect_indices', 'right_with_aspect_indices'],
'tnet_lf': ['text_indices', 'aspect_indices', 'aspect_boundary'],
'aoa': ['text_indices', 'aspect_indices'],
'mgan': ['text_indices', 'aspect_indices', 'left_indices'],
'asgcn': ['text_indices', 'aspect_indices', 'left_indices', 'dependency_graph'],
'bert_spc': ['concat_bert_indices', 'concat_segments_indices'],
'aen_bert': ['text_bert_indices', 'aspect_bert_indices'],
'lcf_bert': ['concat_bert_indices', 'concat_segments_indices', 'text_bert_indices', 'aspect_bert_indices'],
}
initializers = {
'xavier_uniform_': torch.nn.init.xavier_uniform_,
'xavier_normal_': torch.nn.init.xavier_normal_,
'orthogonal_': torch.nn.init.orthogonal_,
}
optimizers = {
'adadelta': torch.optim.Adadelta, # default lr=1.0
'adagrad': torch.optim.Adagrad, # default lr=0.01
'adam': torch.optim.Adam, # default lr=0.001
'adamax': torch.optim.Adamax, # default lr=0.002
'asgd': torch.optim.ASGD, # default lr=0.01
'rmsprop': torch.optim.RMSprop, # default lr=0.01
'sgd': torch.optim.SGD,
}
opt.model_class = model_classes[opt.model_name]
opt.dataset_file = dataset_files[opt.dataset]
opt.inputs_cols = input_colses[opt.model_name]
opt.initializer = initializers[opt.initializer]
opt.optimizer = optimizers[opt.optimizer]
opt.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') \
if opt.device is None else torch.device(opt.device)
log_file = '{}-{}-{}.log'.format(opt.model_name, opt.dataset, strftime("%y%m%d-%H%M", localtime()))
logger.addHandler(logging.FileHandler(log_file))
ins = Instructor(opt)
ins.run()
if __name__ == '__main__':
main()
|
[] |
[] |
[
"PYTHONHASHSEED"
] |
[]
|
["PYTHONHASHSEED"]
|
python
| 1 | 0 | |
test/integration/sonobuoy_integration_test.go
|
// +build integration
package integration
import (
"bytes"
"context"
"fmt"
"io/ioutil"
"os"
"os/exec"
"path/filepath"
"strings"
"testing"
"time"
)
const (
defaultSonobuoyPath = "../../sonobuoy"
bash = "/bin/bash"
defaultTestTimeout = 2 * time.Minute
)
var (
// Path to the Sonobuoy CLI
sonobuoy string
)
func findSonobuoyCLI() (string, error) {
sonobuoyPath := os.Getenv("SONOBUOY_CLI")
if sonobuoyPath == "" {
sonobuoyPath = defaultSonobuoyPath
}
if _, err := os.Stat(sonobuoyPath); os.IsNotExist(err) {
return "", err
}
return sonobuoyPath, nil
}
// runSonobuoyCommandWithContext runs the Sonobuoy CLI with the given context and arguments.
// It returns any encountered error and the stdout and stderr from the command execution.
func runSonobuoyCommandWithContext(ctx context.Context, t *testing.T, args string) (bytes.Buffer, bytes.Buffer, error) {
var stdout, stderr bytes.Buffer
command := exec.CommandContext(ctx, sonobuoy, strings.Fields(args)...)
command.Stdout = &stdout
command.Stderr = &stderr
t.Logf("Running %q\n", command.String())
return stdout, stderr, command.Run()
}
func mustRunSonobuoyCommand(t *testing.T, args string) bytes.Buffer {
return mustRunSonobuoyCommandWithContext(context.Background(), t, args)
}
// mustRunSonobuoyCommandWithContext runs the Sonobuoy CLI with the given context and arguments.
// It returns stdout and fails the test immediately if there are any errors.
func mustRunSonobuoyCommandWithContext(ctx context.Context, t *testing.T, args string) bytes.Buffer {
var stdout, stderr bytes.Buffer
command := exec.CommandContext(ctx, sonobuoy, strings.Fields(args)...)
command.Stdout = &stdout
command.Stderr = &stderr
t.Logf("Running %q\n", command.String())
if err := command.Run(); err != nil {
t.Fatalf("Expected %q to not error but got error: %q with stdout: %q and stderr: %q", args, err, stdout.String(), stderr.String())
}
return stdout
}
// runSonobuoyCommand runs the Sonobuoy CLI with the given arguments and a background context.
// It returns any encountered error and the stdout and stderr from the command execution.
func runSonobuoyCommand(t *testing.T, args string) (bytes.Buffer, bytes.Buffer, error) {
return runSonobuoyCommandWithContext(context.Background(), t, args)
}
// getNamespace returns the namespace to use for the current test and a function to clean it up
// asynchronously afterwards.
func getNamespace(t *testing.T) (string, func()) {
ns := "sonobuoy-" + strings.ToLower(t.Name())
return ns, func() { cleanup(t, ns) }
}
// cleanup runs sonobuoy delete for the given namespace. If no namespace is provided, it will
// omit the namespace argument and use the default.
func cleanup(t *testing.T, namespace string) {
args := "delete"
if namespace != "" {
args += " -n " + namespace
}
stdout, stderr, err := runSonobuoyCommand(t, args)
if err != nil {
t.Logf("Error encountered during cleanup: %q\n", err)
t.Log(stdout.String())
t.Log(stderr.String())
}
}
func TestUseNamespaceFromManifest(t *testing.T) {
ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
defer cancel()
ns, cleanup := getNamespace(t)
defer cleanup()
genArgs := fmt.Sprintf("gen -p testImage/yaml/job-junit-passing-singlefile.yaml -n %v", ns)
genStdout := mustRunSonobuoyCommandWithContext(ctx, t, genArgs)
// Write the contents of gen to a temp file
tmpfile, err := ioutil.TempFile("", "gen.*.yaml")
if err != nil {
t.Fatal(err)
}
defer os.Remove(tmpfile.Name()) // clean up
if _, err := tmpfile.Write(genStdout.Bytes()); err != nil {
t.Fatal(err)
}
if err := tmpfile.Close(); err != nil {
t.Fatal(err)
}
// Pass the gen output to sonobuoy run
runArgs := fmt.Sprintf("run --wait -f %v", tmpfile.Name())
mustRunSonobuoyCommandWithContext(ctx, t, runArgs)
}
// TestSimpleRun runs a simple plugin to check that it runs successfully
func TestSimpleRun(t *testing.T) {
ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
defer cancel()
ns, cleanup := getNamespace(t)
defer cleanup()
args := fmt.Sprintf("run --image-pull-policy IfNotPresent --wait -p testImage/yaml/job-junit-passing-singlefile.yaml -n %v", ns)
mustRunSonobuoyCommandWithContext(ctx, t, args)
}
// TestQuick runs a real "--mode quick" check against the cluster to ensure that it passes.
func TestQuick(t *testing.T) {
ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
defer cancel()
ns, cleanup := getNamespace(t)
defer cleanup()
args := fmt.Sprintf("run --image-pull-policy IfNotPresent --wait --mode=quick -n %v", ns)
mustRunSonobuoyCommandWithContext(ctx, t, args)
checkStatusForPluginErrors(ctx, t, ns, "e2e", 0)
tb := mustDownloadTarball(ctx, t, ns)
tb = saveToArtifacts(t, tb)
checkTarballPluginForErrors(t, tb, "e2e", 0)
}
func checkStatusForPluginErrors(ctx context.Context, t *testing.T, ns, plugin string, failCount int) {
var expectVals []string
switch {
case failCount == 0:
expectVals = []string{
`"status":"complete","result-status":"passed"`,
`"passed":1`,
}
case failCount > 0:
expectVals = []string{
`"status":"complete","result-status":"failed"`,
fmt.Sprintf(`"failed":%v`, failCount),
}
case failCount < 0:
t.Fatalf("failCount < 0 not permitted; expected >=0, got %v", failCount)
}
args := fmt.Sprintf(`status --json -n %v`, ns)
out := mustRunSonobuoyCommandWithContext(ctx, t, args)
for _, v := range expectVals {
if !strings.Contains(out.String(), v) {
t.Errorf("Expected output of %q to contain %q but output was %v", args, v, out.String())
}
}
}
func mustDownloadTarball(ctx context.Context, t *testing.T, ns string) string {
args := fmt.Sprintf("retrieve -n %v", ns)
tarballName := mustRunSonobuoyCommandWithContext(ctx, t, args)
t.Logf("Tarball downloaded to: %v", tarballName.String())
return strings.TrimSpace(tarballName.String())
}
// checkPluginForErrors runs multiple checks to ensure that failCount errors occurred for the given
// plugin. Ensures that all our different reporting methods are in agreement.
func checkTarballPluginForErrors(t *testing.T, tarball, plugin string, failCount int) {
if plugin == "e2e" {
expectOut := fmt.Sprintf("failed tests: %v", failCount)
args := fmt.Sprintf("e2e %v ", tarball)
out := mustRunSonobuoyCommand(t, args)
if !strings.Contains(out.String(), expectOut) {
t.Errorf("Expected output of %q to contain %q but output was %v", args, expectOut, out.String())
}
}
expectOut := fmt.Sprintf("Failed: %v", failCount)
args := fmt.Sprintf("results %v --plugin %v", tarball, plugin)
out := mustRunSonobuoyCommand(t, args)
if !strings.Contains(out.String(), expectOut) {
t.Errorf("Expected output of %q to contain %q but output was %v", args, expectOut, out.String())
}
}
func saveToArtifacts(t *testing.T, p string) (newPath string) {
artifactsDir := os.Getenv("ARTIFACTS_DIR")
if artifactsDir == "" {
t.Logf("Skipping saving artifact %v since ARTIFACTS_DIR is unset.", p)
}
artifactFile := filepath.Join(artifactsDir, filepath.Base(p))
origFile := filepath.Join(pwd(t), filepath.Base(p))
if err := os.MkdirAll(artifactsDir, 0755); err != nil {
t.Logf("Error creating directory %v: %v", artifactsDir, err)
return p
}
var stdout, stderr bytes.Buffer
// Shell out to `mv` instead of using os.Rename(); the latter caused a problem due to files being on different devices.
cmd := exec.CommandContext(context.Background(), bash, "-c", fmt.Sprintf("mv %v %v", origFile, artifactFile))
cmd.Stdout = &stdout
cmd.Stderr = &stderr
t.Logf("Running %q\n", cmd.String())
if err := cmd.Run(); err != nil {
t.Logf("Error saving tarball to artifacts directory: %v", err)
t.Logf(" stdout: %v stderr: %v", stdout.String(), stderr.String())
return p
}
t.Logf("Moved tarball from %q to %q for artifact preservation", origFile, artifactFile)
return artifactFile
}
// TestSonobuoyVersion checks that all fields in the output from `version` are non-empty
func TestSonobuoyVersion(t *testing.T) {
stdout := mustRunSonobuoyCommand(t, "version")
lines := strings.Split(stdout.String(), "\n")
for _, line := range lines {
versionComponents := strings.Split(line, ":")
// If a Kubeconfig is not provided, a warning is included that the API version check is skipped.
// Only check lines where a split on ":" actually happened.
if len(versionComponents) == 2 && strings.TrimSpace(versionComponents[1]) == "" {
t.Errorf("expected value for %v to be set, but was empty", versionComponents[0])
}
}
}
func TestMain(m *testing.M) {
var err error
sonobuoy, err = findSonobuoyCLI()
if err != nil {
fmt.Printf("Skipping integration tests: failed to find sonobuoy CLI: %v\n", err)
os.Exit(1)
}
result := m.Run()
os.Exit(result)
}
func pwd(t *testing.T) string {
pwd, err := os.Getwd()
if err != nil {
t.Fatalf("Unable to get pwd: %v", err)
}
return pwd
}
|
[
"\"SONOBUOY_CLI\"",
"\"ARTIFACTS_DIR\""
] |
[] |
[
"SONOBUOY_CLI",
"ARTIFACTS_DIR"
] |
[]
|
["SONOBUOY_CLI", "ARTIFACTS_DIR"]
|
go
| 2 | 0 | |
libpermian/pipeline/__init__.py
|
import copy
import os
import threading
import logging
from tplib.library import Library
from ..settings import Settings
from ..events.factory import EventFactory
from ..testruns import TestRuns
from ..webui import WebUI
from .. import hooks
from . import library_repo
LOGGER = logging.getLogger(__name__)
def run_pipeline(event, settings_paths, overrides, env=None):
"""
Start the pipeline with provided pipeline parameters.
For more information see Pipeline constructor and run methods.
:param event: JSON encoded event definition
:type event: str
:param settings_paths: Paths to settings files independent on the event (main pipeline settings)
:type settings_paths: list
:param overrides: Direct overrides of settings values, for more details see libpermian.settings.Settings
:type overrides: dict
:param env: Alternative environemnt variables to be used instead of os.environ
:type env: dict, optional
"""
pipeline = Pipeline(event, settings_paths, overrides, env)
pipeline.run()
return pipeline.return_code
def get_caserunconfigurations(event, settings_paths, overrides, env=None):
"""
Instead of running the full pipeline, only generate CaseRunConfigurations
that would be executed for provided event using provided settings.
Uses the same arguments as run_pipeline
"""
pipeline = Pipeline(event, settings_paths, overrides, env)
pipeline._cloneLibrary()
return pipeline.event.generate_caseRunConfigurations(pipeline.library)
class Pipeline():
"""
Create pipeline object providing all essential information required for the
pipeline execution. When ready, the pipeline can be executed only once and
if other execution is needed, one should create a new pipeline object. Note
that plugins still can "leave mess" after previous execution and pipeline
cannot guarantee this not happening, so it's much safer to create only one
pipeline object and exit the process after the pipeline finishes.
:param event: JSON encoded event definition
:type event: str
:param settings_paths: Paths to settings files independent on the event (main pipeline settings)
:type settings_paths: list
:param overrides: Direct overrides of settings values, for more details see libpermian.settings.Settings
:type overrides: dict
:param env: Alternative environemnt variables to be used instead of os.environ
:type env: dict, optional
"""
def __init__(self, event, settings_paths, overrides, env=None):
if env is None:
env = copy.copy(os.environ)
self.settings = Settings(overrides, env, settings_paths)
self.event = EventFactory.make(self.settings, event)
self.library = None
self.testRuns = None
self.executed = False
self.webUI = None
self.return_code = 0
def _checkThreads(self):
"""
Make sure that the pipeline is run from the main thread and there are
no other threads, in other words, make sure we're alone and the waiting
for threads at the end of pipeline will work.
"""
if threading.current_thread() != threading.main_thread():
raise Exception('The pipeline has to be executed from the main thread')
if [threading.current_thread()] != threading.enumerate():
raise Exception('There are other threads active')
def run(self):
"""
This is the main pipeline method which handles all the orchestration
and when ended all the pipeline related activities (except daemon
threads) are be finished.
"""
LOGGER.debug('Starting pipeline')
self._checkThreads()
if self.executed:
raise Exception('The pipeline can be executed only once')
self.executed = True
LOGGER.debug('Starting WebUI')
self._startWebUI()
LOGGER.debug('WebUI started')
self._cloneLibrary()
LOGGER.debug('Making test runs')
self._makeTestRuns()
LOGGER.debug('Preparing reporting')
self._prepareReporting()
LOGGER.debug('Preparing workflows')
self._prepareWorkflows()
LOGGER.debug('Running workflows')
self._runWorkflows()
LOGGER.debug('Waiting for workflows to finish')
self._set_return_code(self._waitForWorkflows(), 1)
LOGGER.debug('Waiting for other threads to finish')
self._waitForThreads()
LOGGER.info('All execution and reporting is done. Performing other post-reporting and shutdown activities.')
LOGGER.debug('Running pipeline_ended handlers')
hooks.builtin.pipeline_ended(self)
LOGGER.debug('Waiting for other (post) threads to finish')
self._waitForThreads() # wait for any possible threads started by the final hook
def _set_return_code(self, succeeded, rc):
if not succeeded:
self.return_code |= rc
def _startWebUI(self):
"""
Start WebUI daemon thread and start providing the pipeline status over
HTTP.
"""
self.webUI = WebUI(self)
self.webUI.start()
self.webUI.waitUntilStarted()
def _cloneLibrary(self, target_directory=None):
"""
Clone repository containing testplans, requirements and testcases and
store them in :py:attr:`library` attribute using
:py:class:`tplib.Library`.
"""
try:
# first try direct specification of path to library
target_directory = self.settings.get('library', 'directPath')
except KeyError:
target_directory = library_repo.clone(target_directory, self.event, self.settings)
self.settings.load_from_library(target_directory)
self.library = Library(target_directory,
additional_testplans=self.event.additional_testplans_data,
additional_requirements=self.event.additional_requrements_data,
additional_testcases=self.event.additional_testcases_data)
def _makeTestRuns(self):
"""
Create TestRuns instance preparing prescriptions for execution and
reporting.
"""
self.testRuns = TestRuns(self.library, self.event, self.settings)
def _prepareReporting(self):
"""
Create ResultRouter instance and have all the ResultSender instances
ready for the workflows to send results.
"""
pass
def _prepareWorkflows(self):
"""
Create all workflow instances for the TestRuns instance.
"""
pass
def _runWorkflows(self):
"""
Start the wokflows associated to the TestRuns.
Note: This method calls hook which signals the pipeline has started the
execution.
"""
self.testRuns.start()
def _waitForWorkflows(self):
"""
Wait until all the workflows associated to the TestRuns have ended.
Note: This method calls hook which signals the pipeline has finished the
execution.
"""
return self.testRuns.wait()
def _waitForThreads(self):
current_thread = threading.current_thread()
for thread in threading.enumerate():
if thread == current_thread:
continue
if thread.daemon:
continue
thread.join()
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
sandbox/config.py
|
"""
config --- NZMATH configurations
"""
import os
import sys
import warnings
WINDOWS_PLATFORMS = ('win32', 'win64', 'cli', )
# ----------------
# Default Settings
# ----------------
#
# Dependencies
# ============
#
# Some third party / platform dependent modules are possibly used,
# and they are configurable.
#
# mpmath
# ------
#
# mpmath is a package providing multiprecision math.
# http://code.google.com/p/mpmath
# This package is used in ecpp module.
#
# If you have mpmath installed, set as the following:
#HAVE_MPMATH = True
#CHECK_MPMATH = False
# Or, if you don't have mpmath installed, set as the following:
#HAVE_MPMATH = False
#CHECK_MPMATH = False
# The default values mean "I don't know; check it later":
HAVE_MPMATH = None
CHECK_MPMATH = True
#
# sqlite3
# -------
#
# sqlite3 is the default database module for Python, but it need to be
# enabled at the build time.
#
# If you have sqlite3 available, set as the following:
#HAVE_SQLITE3 = True
#CHECK_SQLITE3 = False
# Or, if you don't have sqlite3, set as the following:
#HAVE_SQLITE3 = False
#CHECK_SQLITE3 = False
# The default values mean "I don't know; check it later":
HAVE_SQLITE3 = None
CHECK_SQLITE3 = True
#
# net availability
# ----------------
#
# Some functions will connect to the Net (not yet; this is a sample.)
# Desktop machines are usually connected to the Net, but notebooks
# may have connectivity only occasionally.
#
# If you have net connectivity now, set as the following:
#HAVE_NET = True
#CHECK_NET = False
# Or, if your machine is not connected, set as the following:
#HAVE_NET = False
#CHECK_NET = False
# The default values mean "I don't know; check it later":
HAVE_NET = None
CHECK_NET = True
#
# plug-ins
# ========
#
# math
# ----
# Python standard float/complex types and math/cmath modules only
# provide fixed precision (double precision), but sometimes
# multiprecision floating point is needed.
# If you have mpmath installed, set HAVE_MPMATH True and use:
#PLUGIN_MATH = 'mpmath'
#CHECK_PLUGIN_MATH = False
# Otherwise, use only Python float/complex as default (but use mpmath
# if possible):
PLUGIN_MATH = None
CHECK_PLUGIN_MATH = True
#
# Assumptions
# ===========
#
# Some conjectures are useful for assuring the validity of a fast
# algorithm.
#
# All assumptions are default to False, but you can set them True if
# you believe them.
#
# GRH
# ---
#
# Generalized Riemann Hypothesis.
# For example, primality test is O((log n)**2) if GRH is true
# while O((log n)**6) or something without it.
#
# If you believe GRH as the truth:
#GRH = True
# The default is, of course, conservatively doubting it:
GRH = False
#
# Files
# =====
#
# data directory
# --------------
#
# The directory where nzmath (static) data files are stored.
# The default will be os.path.join(sys.prefix, 'share', 'nzmath')
# or os.path.join(sys.prefix, 'Data', 'nzmath') on Windows.
#
# If your *nix computer installs NZMATH as a system program:
#DATADIR = '/usr/share/nzmath'
#CHECK_DATADIR = False
# If it is an optional program:
#DATADIR = '/usr/local/share/nzmath'
#CHECK_DATADIR = False
# Windows users may have more aggressive path:
#DATADIR = r'C:\Python25\Data'
#CHECK_DATADIR = False
# The default values mean "I don't know; check it later":
DATADIR = None
CHECK_DATADIR = True
# -------------------
# User Configurations
# -------------------
confdir = os.environ.get('NZMATHCONFDIR', None)
if confdir is None:
if sys.platform in WINDOWS_PLATFORMS:
# "C:\Documents and Settings\%USERNAME%\Application Data\nzmath"
# APPDIR = "C:\Documents and Settings\%USERNAME%\Application Data"
appdir = os.environ.get('APPDATA', None)
# USERPROFILE = "C:\Documents and Settings\%USERNAME%"
profdir = os.environ.get('USERPROFILE', None)
if appdir is not None:
confdir = os.path.join(appdir, 'nzmath')
elif profdir is not None:
confdir = os.path.join(profdir, 'Application Data', 'nzmath')
else:
# "~/.nzmath.d/"
homedir = os.environ.get('HOME', None)
if homedir is not None:
confdir = os.path.join(homedir, '.nzmath.d')
if confdir is None or not os.path.exists(confdir):
warnings.warn("please set NZMATHCONFDIR")
try:
if confdir is not None and os.path.exists(confdir):
sys.path.insert(0, confdir)
# overwrite the default settings with user's nzmathconf
from nzmathconf import *
except ImportError:
warnings.warn("nzmathconf.py not found")
# ------
# Checks
# ------
#
# mpmath
# ------
#
def check_mpmath():
"""
Check if mpmath is importable or not
"""
try:
import mpmath
return True
except ImportError:
return False
if CHECK_MPMATH:
HAVE_MPMATH = check_mpmath()
#
# sqlite3
# -------
#
def check_sqlite3():
"""
Check if sqlite3 is importable or not.
pysqlite2 may be a substitution.
"""
try:
try:
import sqlite3
return True
except ImportError:
import pysqlite2.dbapi2 as sqlite3
return True
except ImportError:
return False
if CHECK_SQLITE3:
HAVE_SQLITE3 = check_sqlite3()
#
# net availability
# ----------------
#
def check_net():
"""
Check the net connection by http call.
"""
import urllib.request, urllib.error, urllib.parse
try:
urllib.request.urlopen('http://sourceforge.net/projects/nzmath/')
return True
except urllib.error.HTTPError:
# the problem is on server side, thus connected anyway
return True
except urllib.error.URLError:
# no dns, thus no connection
return False
except Exception:
# I don't know the reason, but something wrong
return False
if CHECK_NET:
HAVE_NET = check_net()
#
# math plug-in
#
def check_plugin_math():
"""
Return 'mpmath' if HAVE_MPMATH, None otherwise.
"""
if HAVE_MPMATH:
return 'mpmath'
else:
return None
if CHECK_PLUGIN_MATH:
PLUGIN_MATH = check_plugin_math()
#
# data directory
# --------------
#
def default_datadir():
candidates = []
if DATADIR is not None:
candidates.append(DATADIR)
if sys.platform in WINDOWS_PLATFORMS:
candidates.append(os.path.join(sys.prefix, 'Data', 'nzmath'))
else:
candidates.append(os.path.join(sys.prefix, 'share', 'nzmath'))
# more possibilities?
for canddir in candidates:
if os.path.exists(canddir):
return canddir
return None
if CHECK_DATADIR:
DATADIR = default_datadir()
if DATADIR is None:
warnings.warn('no datadir found')
# Declare exported variables.
__all__ = ['HAVE_MPMATH', 'HAVE_SQLITE3', 'HAVE_NET', 'PLUGIN_MATH',
'GRH', 'DATADIR']
|
[] |
[] |
[
"APPDATA",
"NZMATHCONFDIR",
"HOME",
"USERPROFILE"
] |
[]
|
["APPDATA", "NZMATHCONFDIR", "HOME", "USERPROFILE"]
|
python
| 4 | 0 | |
tests/scale_test.go
|
// +build scale
/*
* Copyright (C) 2017 Red Hat, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy ofthe License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specificlanguage governing permissions and
* limitations under the License.
*
*/
package tests
import (
"errors"
"fmt"
"io/ioutil"
"net/http"
"os"
"testing"
"time"
"github.com/davecgh/go-spew/spew"
"github.com/skydive-project/skydive/analyzer"
gclient "github.com/skydive-project/skydive/api/client"
"github.com/skydive-project/skydive/api/types"
"github.com/skydive-project/skydive/common"
"github.com/skydive-project/skydive/config"
"github.com/skydive-project/skydive/flow"
g "github.com/skydive-project/skydive/gremlin"
shttp "github.com/skydive-project/skydive/http"
"github.com/skydive-project/skydive/websocket"
)
func getAnalyzerStatus(client *shttp.CrudClient) (status analyzer.Status, err error) {
resp, err := client.Request("GET", "status", nil, nil)
if err != nil {
return status, err
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
data, _ := ioutil.ReadAll(resp.Body)
return status, fmt.Errorf("Failed to get status, %s: %s", resp.Status, data)
}
if err := common.JSONDecode(resp.Body, &status); err != nil {
return status, err
}
return
}
func checkAgents(client *shttp.CrudClient, agentsExpected int) error {
status, err := getAnalyzerStatus(client)
if err != nil {
return err
}
if count := len(status.Agents); count != agentsExpected {
return fmt.Errorf("Expected %d agent(s), got %d", agentsExpected, count)
}
return nil
}
func checkHostNodes(client *shttp.CrudClient, gh *gclient.GremlinQueryHelper, nodeExpected int) error {
retry := func() error {
nodes, err := gh.GetNodes(g.G.V().Has("Type", "host"))
if err != nil {
return err
}
if len(nodes) != nodeExpected {
return fmt.Errorf("Should return %d host nodes got : %v", nodeExpected, spew.Sdump(nodes))
}
if err := checkAgents(client, nodeExpected); err != nil {
return err
}
return nil
}
return common.Retry(retry, 10, 5*time.Second)
}
func checkPeers(client *shttp.CrudClient, peersExpected int, state websocket.ConnState) error {
status, err := getAnalyzerStatus(client)
if err != nil {
return err
}
count := 0
for _, peer := range status.Peers.Incomers {
if *peer.State == state {
count++
}
}
for _, peer := range status.Peers.Outgoers {
if *peer.State == state {
count++
}
}
if count != peersExpected {
return fmt.Errorf("Expected %d peers, got %d, status: %+v", peersExpected, count, status)
}
return nil
}
const (
checkLive = iota + 1
checkHisto
checkBoth
)
func _checkICMPv4Flows(gh *gclient.GremlinQueryHelper, nodeSel g.QueryString, flowExpected int, cmp func(seen, exp int) bool, live bool) error {
node, err := gh.GetNode(nodeSel)
if err != nil {
return errors.New("Node node found: agent-1")
}
tid, _ := node.GetFieldString("TID")
prefix := g.G
if !live {
prefix = prefix.At("-0s", 300)
}
gremlin := prefix.Flows().Has("LayersPath", "Ethernet/IPv4/ICMPv4", "NodeTID", tid).Sort()
retry := func() error {
flows, err := gh.GetFlows(gremlin)
if err != nil {
return fmt.Errorf("%s: %s", gremlin, err)
}
if !cmp(len(flows), flowExpected) {
return fmt.Errorf("Should get %d ICMPv4 flow with prefix(%s) got %s", flowExpected, prefix, flowsToString(flows))
}
return nil
}
return common.Retry(retry, 40, time.Second)
}
func checkICMPv4Flows(gh *gclient.GremlinQueryHelper, nodeSel g.QueryString, flowExpected int, cmp func(seen, exp int) bool, mode int) error {
if mode == checkBoth || mode == checkLive {
if err := _checkICMPv4Flows(gh, nodeSel, flowExpected, cmp, true); err != nil {
return err
}
}
if mode == checkBoth || mode == checkHisto {
if err := _checkICMPv4Flows(gh, nodeSel, flowExpected, cmp, false); err != nil {
return err
}
}
return nil
}
func checkIPerfFlows(gh *gclient.GremlinQueryHelper, flowExpected int) error {
retry := func() error {
flows, err := gh.GetFlows(g.G.Flows().Has("LayersPath", "Ethernet/IPv4/TCP").Has("Transport.B", 5001).Sort())
if err != nil {
return err
}
// two capture 2 flows
if len(flows) != flowExpected {
var flowsTCP []*flow.Flow
if flowsTCP, err = gh.GetFlows(g.G.Flows().Has("LayersPath", "Ethernet/IPv4/TCP").Sort()); err != nil {
return err
}
return fmt.Errorf("Should get %d iperf(tcp/5001) flows, got %s", flowExpected, flowsToString(flowsTCP))
}
return nil
}
if err := common.Retry(retry, 20, time.Second); err != nil {
return err
}
// check in the storage
retry = func() error {
flows, err := gh.GetFlows(g.G.At("-1s", 300).Flows().Has("LayersPath", "Ethernet/IPv4/TCP").Has("Transport.B", 5001).Sort())
if err != nil {
return err
}
if len(flows) != flowExpected {
var flowsTCP []*flow.Flow
if flowsTCP, err = gh.GetFlows(g.G.At("-1s", 300).Flows().Has("LayersPath", "Ethernet/IPv4/TCP").Sort()); err != nil {
return err
}
return fmt.Errorf("Should get %d iperf(tcp/5001) flow from datastore got %s", flowExpected, flowsToString(flowsTCP))
}
maps, err := gh.GetSockets(g.G.At("-1s", 300).Flows().Has("LayersPath", "Ethernet/IPv4/TCP").Sockets())
if err != nil {
return err
}
if len(maps) != len(flows) {
return fmt.Errorf("Should get as many sockets as flows in datastore, %d != %d", len(maps), len(flows))
}
for _, sockets := range maps {
for _, socket := range sockets {
if socket == nil || socket.ProcessInfo.Process != "/usr/bin/iperf" {
return fmt.Errorf("Should get iperf exe as socket info %v", socket)
}
if socket.Name != "iperf" || socket.ProcessInfo.Name != "iperf" {
return fmt.Errorf("Should get iperf thread name %v", socket)
}
}
}
return nil
}
if err := common.Retry(retry, 40, time.Second); err != nil {
return err
}
return nil
}
func checkCaptures(gh *gclient.GremlinQueryHelper, captureExpected int) error {
retry := func() error {
nodes, err := gh.GetNodes(g.G.V().Has("Captures.State", "active"))
if err != nil {
return err
}
if len(nodes) != captureExpected {
return fmt.Errorf("Should return %d capture got : %s", captureExpected, spew.Sdump(nodes))
}
return nil
}
return common.Retry(retry, 20, time.Second)
}
func waitForFirstFlows(gh *gclient.GremlinQueryHelper, expected int) error {
retry := func() error {
flows, err := gh.GetFlows(g.G.Flows().Has("LayersPath", "Ethernet/IPv4/ICMPv4").Sort())
if err != nil {
return err
}
if len(flows) != expected {
return fmt.Errorf("Should get at least one flow, got %s", spew.Sdump(flows))
}
return nil
}
return common.Retry(retry, 10, time.Second)
}
func genICMPv4(t *testing.T, scale, src string, dst string, count int) error {
// generate some packet and wait for seeing them, to be sure that the capture is started
var seen int
pingFnc := func() error {
setupCmds := []Cmd{
{fmt.Sprintf("%s ping %s %s -c 1", scale, src, dst), false},
}
if _, err := execCmds(t, setupCmds...); err == nil {
seen++
if seen == count {
return nil
}
}
return errors.New("Quota not reached yet")
}
return common.Retry(pingFnc, 2*count, time.Second)
}
func TestScaleHA(t *testing.T) {
gopath := os.Getenv("GOPATH")
scale := gopath + "/src/github.com/skydive-project/skydive/scripts/scale.sh"
setupCmds := []Cmd{
{fmt.Sprintf("%s start 2 2 2", scale), true},
{"sleep 30", false},
}
execCmds(t, setupCmds...)
tearDownCmds := []Cmd{
{fmt.Sprintf("%s stop 2 4 2", scale), false},
}
defer execCmds(t, tearDownCmds...)
// Load Agent-1 as default config for our client
config.InitConfig("file", []string{"/tmp/skydive-scale/agent-1.yml"})
authOptions := &shttp.AuthenticationOpts{Username: "admin", Password: "password"}
client, err := gclient.NewCrudClientFromConfig(authOptions)
if err != nil {
t.Fatalf("Failed to create client: %s", err)
}
// switch to the other analyzer
os.Setenv("SKYDIVE_ANALYZERS", "localhost:8084")
gh := gclient.NewGremlinQueryHelper(authOptions)
// expected 1 either Incomer or Outgoer
if err = common.Retry(func() error { return checkPeers(client, 1, websocket.ConnState(common.RunningState)) }, 5, time.Second); err != nil {
execCmds(t, tearDownCmds...)
t.Fatal(err)
}
// test if we have our 2 hosts
if err = checkHostNodes(client, gh, 2); err != nil {
execCmds(t, tearDownCmds...)
t.Fatal(err)
}
// start a capture
capture := types.NewCapture(g.G.V().Has("Type", "netns", "Name", "vm1").Out().Has("Name", "eth0").String(), "")
capture.Type = "pcap"
if err = client.Create("capture", capture, nil); err != nil {
execCmds(t, tearDownCmds...)
t.Fatal(err)
}
// check that we have 2 captures, one per vm1
if err = checkCaptures(gh, 2); err != nil {
execCmds(t, tearDownCmds...)
t.Fatal(err)
}
// generate some icmpv4
if err = genICMPv4(t, scale, "agent-1-vm1", "agent-2-vm1", 30); err != nil {
execCmds(t, tearDownCmds...)
t.Fatal(err)
}
// 30 flows
node1 := g.G.V().Has("Name", "agent-1").Out().Has("Type", "netns", "Name", "vm1").Out().Has("Name", "eth0")
if err = checkICMPv4Flows(gh, node1, 30, func(seen, exp int) bool { return seen == exp }, checkBoth); err != nil {
execCmds(t, tearDownCmds...)
t.Fatal(err)
}
node2 := g.G.V().Has("Name", "agent-2").Out().Has("Type", "netns", "Name", "vm1").Out().Has("Name", "eth0")
if err = checkICMPv4Flows(gh, node2, 30, func(seen, exp int) bool { return seen == exp }, checkBoth); err != nil {
execCmds(t, tearDownCmds...)
t.Fatal(err)
}
// increase the agent number
setupCmds = []Cmd{
{fmt.Sprintf("%s start 2 4 2", scale), false},
}
execCmds(t, setupCmds...)
// test if we have now 4 hosts
if err = checkHostNodes(client, gh, 4); err != nil {
execCmds(t, tearDownCmds...)
t.Fatal(err)
}
// check that we have 4 captures, one per vm1
if err = checkCaptures(gh, 4); err != nil {
execCmds(t, tearDownCmds...)
t.Fatal(err)
}
// kill the last agent
setupCmds = []Cmd{
{fmt.Sprintf("%s stop-agent 4", scale), false},
}
execCmds(t, setupCmds...)
// test if we have now 3 hosts
if err = checkHostNodes(client, gh, 3); err != nil {
execCmds(t, tearDownCmds...)
t.Fatal(err)
}
// switch back to the first analyzer
os.Setenv("SKYDIVE_ANALYZERS", "localhost:8082")
client, err = gclient.NewCrudClientFromConfig(authOptions)
if err != nil {
t.Fatalf("Failed to create client: %s", err)
}
// test if we have still 3 hosts
if err = checkHostNodes(client, gh, 3); err != nil {
execCmds(t, tearDownCmds...)
t.Fatal(err)
}
// destroy the second analyzer
setupCmds = []Cmd{
{fmt.Sprintf("%s stop-analyzer 2", scale), false},
{"sleep 5", false},
}
execCmds(t, setupCmds...)
if err = checkPeers(client, 0, websocket.ConnState(common.RunningState)); err != nil {
execCmds(t, tearDownCmds...)
t.Fatal(err)
}
// test if the remaining analyzer have a correct graph
if err = checkHostNodes(client, gh, 3); err != nil {
execCmds(t, tearDownCmds...)
t.Fatal(err)
}
// generate more icmp traffic
if err = genICMPv4(t, scale, "agent-3-vm1", "agent-1-vm1", 30); err != nil {
execCmds(t, tearDownCmds...)
t.Fatal(err)
}
node3 := g.G.V().Has("Name", "agent-3").Out().Has("Type", "netns", "Name", "vm1").Out().Has("Name", "eth0")
if err = checkICMPv4Flows(gh, node3, 30, func(seen, exp int) bool { return seen == exp }, checkBoth); err != nil {
execCmds(t, tearDownCmds...)
t.Fatal(err)
}
// iperf test 10 sec, 1Mbits/s
setupCmds = []Cmd{
{fmt.Sprintf("%s iperf agent-3-vm1 agent-1-vm1", scale), false},
}
execCmds(t, setupCmds...)
if err = checkIPerfFlows(gh, 2); err != nil {
execCmds(t, tearDownCmds...)
t.Fatal(err)
}
// delete the capture to check that all captures will be delete at the agent side
client.Delete("capture", capture.ID())
if err = checkCaptures(gh, 0); err != nil {
execCmds(t, tearDownCmds...)
t.Fatal(err)
}
// restore the second analyzer
setupCmds = []Cmd{
{fmt.Sprintf("%s start 2 3 2", scale), false},
{"sleep 5", false},
}
execCmds(t, setupCmds...)
if err = common.Retry(func() error {
return checkPeers(client, 1, websocket.ConnState(common.RunningState))
}, 15, time.Second); err != nil {
execCmds(t, tearDownCmds...)
t.Fatal(err)
}
// delete an agent
setupCmds = []Cmd{
{fmt.Sprintf("%s stop-agent 1", scale), false},
}
execCmds(t, setupCmds...)
// test if we have now 2 hosts
if err = checkHostNodes(client, gh, 2); err != nil {
execCmds(t, tearDownCmds...)
t.Fatal(err)
}
// restart the agent 1 to check that flows are still forwarded to analyzer
setupCmds = []Cmd{
{fmt.Sprintf("%s start 2 3 2", scale), false},
{"sleep 5", false},
}
execCmds(t, setupCmds...)
// test if we have now 2 hosts
if err = checkHostNodes(client, gh, 3); err != nil {
execCmds(t, tearDownCmds...)
t.Fatal(err)
}
// restart a capture on all eth0
capture = types.NewCapture(g.G.V().Has("Type", "netns", "Name", "vm1").Out().Has("Name", "eth0").String(), "")
capture.Type = "pcap"
if err = client.Create("capture", capture, nil); err != nil {
t.Fatal(err)
}
// check that we have 3 captures, one per vm1
if err = checkCaptures(gh, 3); err != nil {
execCmds(t, tearDownCmds...)
t.Fatal(err)
}
if err = genICMPv4(t, scale, "agent-1-vm1", "agent-2-vm1", 30); err != nil {
execCmds(t, tearDownCmds...)
t.Fatal(err)
}
// check that we have 30 flow in live as the oldest has been deleted by agent stop
if err = checkICMPv4Flows(gh, node1, 30, func(seen, exp int) bool { return seen == exp }, checkLive); err != nil {
execCmds(t, tearDownCmds...)
t.Fatal(err)
}
// check that we have > 30 flow in histo the ones before stop and the ones just generated
if err = checkICMPv4Flows(gh, node1, 40, func(seen, exp int) bool { return seen >= exp }, checkHisto); err != nil {
execCmds(t, tearDownCmds...)
t.Fatal(err)
}
}
|
[
"\"GOPATH\""
] |
[] |
[
"GOPATH"
] |
[]
|
["GOPATH"]
|
go
| 1 | 0 | |
pkg/kubectl/cmd/plugin/plugin.go
|
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package plugin
import (
"bytes"
"fmt"
"io/ioutil"
"os"
"path/filepath"
"runtime"
"strings"
"github.com/spf13/cobra"
"k8s.io/cli-runtime/pkg/genericclioptions"
cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util"
"k8s.io/kubernetes/pkg/kubectl/util/i18n"
"k8s.io/kubernetes/pkg/kubectl/util/templates"
)
var (
pluginLong = templates.LongDesc(`
Provides utilities for interacting with plugins.
Plugins provide extended functionality that is not part of the major command-line distribution.
Please refer to the documentation and examples for more information about how write your own plugins.`)
pluginListLong = templates.LongDesc(`
List all available plugin files on a user's PATH.
Available plugin files are those that are:
- executable
- anywhere on the user's PATH
- begin with "kubectl-"
`)
)
func NewCmdPlugin(f cmdutil.Factory, streams genericclioptions.IOStreams) *cobra.Command {
cmd := &cobra.Command{
Use: "plugin [flags]",
DisableFlagsInUseLine: true,
Short: i18n.T("Provides utilities for interacting with plugins."),
Long: pluginLong,
Run: func(cmd *cobra.Command, args []string) {
cmdutil.DefaultSubCommandRun(streams.ErrOut)(cmd, args)
},
}
cmd.AddCommand(NewCmdPluginList(f, streams))
return cmd
}
type PluginListOptions struct {
Verifier PathVerifier
NameOnly bool
PluginPaths []string
genericclioptions.IOStreams
}
// NewCmdPluginList provides a way to list all plugin executables visible to kubectl
func NewCmdPluginList(f cmdutil.Factory, streams genericclioptions.IOStreams) *cobra.Command {
o := &PluginListOptions{
IOStreams: streams,
}
cmd := &cobra.Command{
Use: "list",
Short: "list all visible plugin executables on a user's PATH",
Long: pluginListLong,
Run: func(cmd *cobra.Command, args []string) {
cmdutil.CheckErr(o.Complete(cmd))
cmdutil.CheckErr(o.Run())
},
}
cmd.Flags().BoolVar(&o.NameOnly, "name-only", o.NameOnly, "If true, display only the binary name of each plugin, rather than its full path")
return cmd
}
func (o *PluginListOptions) Complete(cmd *cobra.Command) error {
o.Verifier = &CommandOverrideVerifier{
root: cmd.Root(),
seenPlugins: make(map[string]string, 0),
}
o.PluginPaths = filepath.SplitList(os.Getenv("PATH"))
return nil
}
func (o *PluginListOptions) Run() error {
pluginsFound := false
isFirstFile := true
pluginErrors := []error{}
pluginWarnings := 0
for _, dir := range uniquePathsList(o.PluginPaths) {
files, err := ioutil.ReadDir(dir)
if err != nil {
pluginErrors = append(pluginErrors, fmt.Errorf("error: unable to read directory %q in your PATH: %v", dir, err))
continue
}
for _, f := range files {
if f.IsDir() {
continue
}
if !strings.HasPrefix(f.Name(), "kubectl-") {
continue
}
if isFirstFile {
fmt.Fprintf(o.ErrOut, "The following kubectl-compatible plugins are available:\n\n")
pluginsFound = true
isFirstFile = false
}
pluginPath := f.Name()
if !o.NameOnly {
pluginPath = filepath.Join(dir, pluginPath)
}
fmt.Fprintf(o.Out, "%s\n", pluginPath)
if errs := o.Verifier.Verify(filepath.Join(dir, f.Name())); len(errs) != 0 {
for _, err := range errs {
fmt.Fprintf(o.ErrOut, " - %s\n", err)
pluginWarnings++
}
}
}
}
if !pluginsFound {
pluginErrors = append(pluginErrors, fmt.Errorf("error: unable to find any kubectl plugins in your PATH"))
}
if pluginWarnings > 0 {
if pluginWarnings == 1 {
pluginErrors = append(pluginErrors, fmt.Errorf("error: one plugin warning was found"))
} else {
pluginErrors = append(pluginErrors, fmt.Errorf("error: %v plugin warnings were found", pluginWarnings))
}
}
if len(pluginErrors) > 0 {
fmt.Fprintln(o.ErrOut)
errs := bytes.NewBuffer(nil)
for _, e := range pluginErrors {
fmt.Fprintln(errs, e)
}
return fmt.Errorf("%s", errs.String())
}
return nil
}
// pathVerifier receives a path and determines if it is valid or not
type PathVerifier interface {
// Verify determines if a given path is valid
Verify(path string) []error
}
type CommandOverrideVerifier struct {
root *cobra.Command
seenPlugins map[string]string
}
// Verify implements PathVerifier and determines if a given path
// is valid depending on whether or not it overwrites an existing
// kubectl command path, or a previously seen plugin.
func (v *CommandOverrideVerifier) Verify(path string) []error {
if v.root == nil {
return []error{fmt.Errorf("unable to verify path with nil root")}
}
// extract the plugin binary name
segs := strings.Split(path, "/")
binName := segs[len(segs)-1]
cmdPath := strings.Split(binName, "-")
if len(cmdPath) > 1 {
// the first argument is always "kubectl" for a plugin binary
cmdPath = cmdPath[1:]
}
errors := []error{}
if isExec, err := isExecutable(path); err == nil && !isExec {
errors = append(errors, fmt.Errorf("warning: %s identified as a kubectl plugin, but it is not executable", path))
} else if err != nil {
errors = append(errors, fmt.Errorf("error: unable to identify %s as an executable file: %v", path, err))
}
if existingPath, ok := v.seenPlugins[binName]; ok {
errors = append(errors, fmt.Errorf("warning: %s is overshadowed by a similarly named plugin: %s", path, existingPath))
} else {
v.seenPlugins[binName] = path
}
if cmd, _, err := v.root.Find(cmdPath); err == nil {
errors = append(errors, fmt.Errorf("warning: %s overwrites existing command: %q", binName, cmd.CommandPath()))
}
return errors
}
func isExecutable(fullPath string) (bool, error) {
info, err := os.Stat(fullPath)
if err != nil {
return false, err
}
if runtime.GOOS == "windows" {
fileExt := strings.ToLower(filepath.Ext(fullPath))
switch fileExt {
case ".bat", ".cmd", ".com", ".exe":
return true, nil
}
return false, nil
}
if m := info.Mode(); !m.IsDir() && m&0111 != 0 {
return true, nil
}
return false, nil
}
// uniquePathsList deduplicates a given slice of strings without
// sorting or otherwise altering its order in any way.
func uniquePathsList(paths []string) []string {
seen := map[string]bool{}
newPaths := []string{}
for _, p := range paths {
if seen[p] {
continue
}
seen[p] = true
newPaths = append(newPaths, p)
}
return newPaths
}
|
[
"\"PATH\""
] |
[] |
[
"PATH"
] |
[]
|
["PATH"]
|
go
| 1 | 0 | |
commands/mmctltestlib.go
|
// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved.
// See LICENSE.txt for license information.
package commands
import (
"fmt"
"os"
"github.com/mattermost/mattermost-server/v5/model"
"github.com/mattermost/mmctl/client"
)
const (
InstanceURL = "http://localhost:8065"
SysadminUsername = "sysadmin"
SysadminPass = "Sys@dmin-sample1"
UserUsername = "user-1"
UserPass = "SampleUs@r-1"
)
type TestHelper struct {
Client client.Client
SystemAdminClient client.Client
BasicUser *model.User
SystemAdminUser *model.User
}
func setupTestHelper() (*TestHelper, error) {
instanceURL := InstanceURL
if os.Getenv("MMCTL_INSTANCE_URL") != "" {
instanceURL = os.Getenv("MMCTL_INSTANCE_URL")
}
sysadminClient, _, err := InitClientWithUsernameAndPassword(SysadminUsername, SysadminPass, instanceURL, false)
if err != nil {
return nil, fmt.Errorf("system admin client failed to connect: %s", err)
}
sysadminUser, response := sysadminClient.GetUserByUsername(SysadminUsername, "")
if response.Error != nil {
return nil, fmt.Errorf("couldn't retrieve system admin user with username %s: %s", SysadminUsername, response.Error)
}
client, _, err := InitClientWithUsernameAndPassword(UserUsername, UserPass, instanceURL, false)
if err != nil {
return nil, fmt.Errorf("basic client failed to connect: %s", err)
}
basicUser, response := client.GetUserByUsername(UserUsername, "")
if response.Error != nil {
return nil, fmt.Errorf("couldn't retrieve basic user with username %s: %s", UserUsername, response.Error)
}
th := &TestHelper{
Client: client,
SystemAdminClient: sysadminClient,
BasicUser: basicUser,
SystemAdminUser: sysadminUser,
}
return th, nil
}
|
[
"\"MMCTL_INSTANCE_URL\"",
"\"MMCTL_INSTANCE_URL\""
] |
[] |
[
"MMCTL_INSTANCE_URL"
] |
[]
|
["MMCTL_INSTANCE_URL"]
|
go
| 1 | 0 | |
filter/filter.py
|
import os, sys
import pika
import json
import time
import traceback
from peewee import *
from datetime import datetime
MQTT_HOST = os.environ.get('MQTT_HOST')
MQTT_USER = os.environ.get('MQTT_USER')
MQTT_PASSWORD = os.environ.get('MQTT_PASSWORD')
DB_HOST = os.environ.get('DB_HOST')
DB_PASSWORD = os.environ.get('DB_PASSWORD')
DB_USER = os.environ.get('DB_USER')
DB_NAME = os.environ.get('DB_NAME')
RECORD_TIMEOUT = os.environ.get('RECORD_TIMEOUT')
"""In Seconds"""
import sys
import signal
def handler(signum, frame):
sys.exit(1)
signal.signal(signal.SIGTERM, handler)
"""
PSQL ORM courtesy of PeeWee
No need for schema.sql since PeeWee can take care of this for us!
"""
psql_db = PostgresqlDatabase(DB_NAME, user=DB_USER, password=DB_PASSWORD, host=DB_HOST)
class BaseModel(Model):
class Meta:
database = psql_db
class Record_LinkedIn(BaseModel):
url = CharField(primary_key=True)
last_accessed = DateTimeField(default=datetime.utcnow())
class Record_Fb(BaseModel):
fb_id = CharField(primary_key=True)
last_accessed = DateTimeField(default=datetime.utcnow())
class Record_Fsquare(BaseModel):
fsquare_id = CharField(primary_key=True)
last_accessed = DateTimeField(default=datetime.utcnow())
class Record_Google(BaseModel):
google_id = CharField(primary_key=True)
last_accessed = DateTimeField(default=datetime.utcnow())
while True:
try:
psql_db.connect()
break
except Exception:
time.sleep(5)
if not Record_LinkedIn.table_exists():
Record_LinkedIn.create_table()
if not Record_Fb.table_exists():
Record_Fb.create_table()
if not Record_Fsquare.table_exists():
Record_Fsquare.create_table()
if not Record_Google.table_exists():
Record_Google.create_table()
"""
RabbitMQ support courtesy of Pika
"""
while True:
try:
_credentials = pika.PlainCredentials(MQTT_USER, MQTT_PASSWORD)
mqtt_connection = pika.BlockingConnection(pika.ConnectionParameters(host=MQTT_HOST, credentials=_credentials))
break
except Exception:
time.sleep(5)
pqdata = dict()
pqdata['x-max-priority'] = 5
ingress_channel = mqtt_connection.channel()
ingress_channel.exchange_declare(exchange='admin', type='fanout')
ingress_channel.queue_declare(queue='filter', durable=True, arguments=pqdata)
admin_queue = ingress_channel.queue_declare(arguments=pqdata)
ingress_channel.queue_bind(exchange="admin", queue=admin_queue.method.queue)
egress_channel = mqtt_connection.channel()
egress_channel.queue_declare(queue='fetch', durable=True, arguments=pqdata)
"""
Selectors
"""
def retrieve_Fb(facebook_id):
return Record_Fb.select().where(Record_Fb.fb_id == facebook_id).get()
def seen_fb(facebook_id):
try:
retrieve_Fb(facebook_id)
return True
except Exception:
return False
def retrieve_LinkedIn(website):
return Record_LinkedIn.select().where(Record_LinkedIn.url == website).get()
def seen_website(website):
"""
TODO: test this!
"""
try:
retrieve_LinkedIn(website)
return True
except Exception:
return False
def retrieve_Fsquare(foursquare_id):
return Record_Fsquare.select().where(Record_Fsquare.fsquare_id == foursquare_id).get()
def seen_fsquare(foursquare_id):
try:
retrieve_Fsquare(foursquare_id)
return True
except Exception:
return False
def retrieve_Google(google_id):
return Record_Google.select().where(Record_Google.google_id == google_id).get()
def seen_google(google_id):
try:
retrieve_Google(google_id)
return True
except Exception:
return False
"""
Message Handling
"""
def seen_fb_time_ago(lead):
if (datetime.utcnow() - retrieve_Fb(lead).last_accessed).seconds > RECORD_TIMEOUT:
return True
return False
def seen_linkedin_time_ago(lead):
if (datetime.utcnow() - retrieve_LinkedIn(lead).last_accessed).seconds > RECORD_TIMEOUT:
return True
return False
def seen_fsquare_time_ago(lead):
if (datetime.utcnow() - retrieve_Fsquare(lead).last_accessed).seconds > RECORD_TIMEOUT:
return True
return False
def seen_google_time_ago(lead):
if (datetime.utcnow() - retrieve_Google(lead).last_accessed).seconds > RECORD_TIMEOUT:
return True
return False
def callback(ch, method, properties, body):
try:
raw_data = json.loads(body)
if (not raw_data.has_key("potential_leads") or not raw_data.has_key("protocol") or not raw_data.has_key("depth")):
if raw_data.has_key("delete") and raw_data.has_key("resource_locator") and raw_data.has_key("protocol"):
if raw_data["protocol"] == "fb":
"""
sys.stderr.write("Deleted: " + str(raw_data["resource_locator"]) + "\n")
sys.stderr.flush()
"""
if seen_fb(raw_data["resource_locator"]):
retrieve_Fb(raw_data["resource_locator"]).delete_instance()
return
if raw_data["protocol"] == "linkedin":
if seen_website(raw_data["resource_locator"]):
retrieve_LinkedIn(raw_data["resource_locator"]).delete_instance()
return
if raw_data["protocol"] == "fsquare":
if seen_fsquare(raw_data["resource_locator"]):
retrieve_Fsquare(raw_data["resource_locator"]).delete_instance()
return
if raw_data["protocol"] == "google":
if seen_google(raw_data["resource_locator"]):
retrieve_Google(raw_data["resource_locator"]).delete_instance()
return
raise Exception("Unknown protocol requested during deletion")
else:
raise Exception("Body malformed")
potential_leads = raw_data["potential_leads"]
protocol = raw_data["protocol"]
for lead in potential_leads:
try:
if protocol == "fb":
if not seen_fb(lead):
newRecord = Record_Fb(fb_id=lead, last_accessed = datetime.utcnow())
newRecord.save(force_insert=True)
"""
TODO: Handle elif difference
"""
elif seen_fb_time_ago(lead):
Record_Fb.update(last_accessed = datetime.utcnow()).where(fb_id == lead).execute()
sys.stderr.write("Updating: \n" + lead + "\n")
sys.stderr.flush()
else:
#return
continue
if protocol == "linkedin":
if not seen_website(lead):
newRecord = Record_LinkedIn(url=lead, last_accessed = datetime.utcnow())
newRecord.save(force_insert=True)
"""
TODO: Handle elif difference
"""
elif seen_linkedin_time_ago(lead):
Record_LinkedIn.update(last_accessed = datetime.utcnow()).where(url == lead).execute()
sys.stderr.write("Updating: \n" + lead + "\n")
sys.stderr.flush()
else:
return
if protocol == "fsquare":
if not seen_fsquare(lead):
newRecord = Record_Fsquare(fsquare_id=lead, last_accessed= datetime.utcnow())
newRecord.save(force_insert=True)
elif seen_fsquare_time_ago(lead):
Record_Fsquare.update(last_accessed = datetime.utcnow()).where(fsquare_id == lead).execute()
sys.stderr.write("Updating: \n" + lead + "\n")
sys.stderr.flush()
else:
continue
#return
if protocol == "google":
if not seen_google(lead):
newRecord = Record_Google(google_id=lead, last_accessed= datetime.utcnow())
newRecord.save(force_insert=True)
elif seen_google_time_ago(lead):
Record_Google.update(last_accessed=datetime.utcnow()).where(google_id == lead).execute()
sys.stderr.write("Updating: \n" + lead + "\n")
sys.stderr.flush()
else:
# we go on to the next lead if we see a familiar lead
# and if that familiar lead is not due for an update
continue
except Exception as e:
try:
sys.stderr.write("Attempting to rollback db: \n" + str(e) + "\n")
psql_db.rollback()
except Exception as e:
sys.stderr.write("DB connection is messed up: \n" + str(e) + "\n")
psql_db.close()
psql_db.connect()
fetch_data = {"protocol": raw_data["protocol"], "resource_locator": lead, "depth": raw_data["depth"]}
egress_channel.basic_publish(
exchange='',
routing_key='fetch',
body=json.dumps(fetch_data),
properties=pika.BasicProperties(
delivery_mode = 1,
priority = int(raw_data["priority"]) if raw_data.has_key("priority") else 0 # default priority
)
)
except Exception as e:
sys.stderr.write(str(e) + "Unable to filter: \n" + body + "\n")
traceback.print_exc()
try:
psql_db.rollback()
except:
psql_db.close()
psql_db.connect()
sys.stderr.flush()
finally:
ingress_channel.basic_ack(delivery_tag = method.delivery_tag)
def admin_callback(ch, method, properties, body):
try:
data = json.loads(body)
return
except Exception as e:
sys.stderr.write(str(e) + "Unable to fetch: \n" + body + "\n")
traceback.print_exc()
sys.stderr.flush()
finally:
ingress_channel.basic_ack(delivery_tag = method.delivery_tag)
ingress_channel.basic_qos(prefetch_count=1)
ingress_channel.basic_consume(callback, queue='filter')
ingress_channel.basic_consume(admin_callback, queue=admin_queue.method.queue)
ingress_channel.start_consuming()
|
[] |
[] |
[
"DB_PASSWORD",
"MQTT_PASSWORD",
"DB_HOST",
"MQTT_USER",
"DB_NAME",
"RECORD_TIMEOUT",
"MQTT_HOST",
"DB_USER"
] |
[]
|
["DB_PASSWORD", "MQTT_PASSWORD", "DB_HOST", "MQTT_USER", "DB_NAME", "RECORD_TIMEOUT", "MQTT_HOST", "DB_USER"]
|
python
| 8 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.