filename
stringlengths 4
198
| content
stringlengths 25
939k
| environment
list | variablearg
list | constarg
list | variableargjson
stringclasses 1
value | constargjson
stringlengths 2
3.9k
| lang
stringclasses 3
values | constargcount
float64 0
129
⌀ | variableargcount
float64 0
0
⌀ | sentence
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|
src/main/java/tk/wosaj/lambda/database/util/SessionFactoryGenerator.java
|
package tk.wosaj.lambda.database.util;
import org.hibernate.SessionFactory;
import org.hibernate.boot.registry.StandardServiceRegistryBuilder;
import org.hibernate.cfg.Configuration;
import tk.wosaj.lambda.database.guild.GuildItem;
import javax.annotation.Nonnull;
import java.util.Properties;
public final class SessionFactoryGenerator {
private SessionFactoryGenerator() {}
private static SessionFactory sessionFactory;
@Nonnull
public static SessionFactory getSessionFactory() {
if (sessionFactory == null) {
Properties properties = new Properties();
properties.put("java.util.logging.ConsoleHandler.level", "FATAL");
properties.put("hibernate.connection.driver_class", "org.postgresql.Driver");
properties.put("hibernate.connection.url", System.getenv("JDBC_DATABASE_URL"));
properties.put("hibernate.dialect", "org.hibernate.dialect.PostgreSQL10Dialect");
properties.put("show_sql", "false");
try {
Configuration configuration = new Configuration()
.addProperties(properties).addAnnotatedClass(GuildItem.class);
StandardServiceRegistryBuilder builder =
new StandardServiceRegistryBuilder().applySettings(configuration.getProperties());
sessionFactory = configuration.buildSessionFactory(builder.build());
} catch (Exception e) {
e.printStackTrace();
}
}
return sessionFactory;
}
}
|
[
"\"JDBC_DATABASE_URL\""
] |
[] |
[
"JDBC_DATABASE_URL"
] |
[]
|
["JDBC_DATABASE_URL"]
|
java
| 1 | 0 | |
src/picalc.go
|
package main
import (
"fmt"
"log"
"net/http"
"os"
"strconv"
)
// Demo Take 2
// Calculate pi using Gregory-Leibniz series: (4/1) - (4/3) + (4/5) - (4/7) + (4/9) - (4/11) + (4/13) - (4/15) ...
func calculatePi(iterations int) float64 {
var result float64 = 0.0
var sign float64 = 1.0
var denominator float64 = 1.0
for i := 0; i < iterations; i++ {
result = result + (sign * 4/denominator)
denominator = denominator + 2
sign = -sign
}
return result
}
func handler(w http.ResponseWriter, r *http.Request) {
log.Print("Pi calculator received a request.")
iterations, err := strconv.Atoi(r.URL.Query()["iterations"][0])
if err != nil {
fmt.Fprintf(w, "iterations parameter not valid\n")
return
}
fmt.Fprintf(w, "%.10f\n", calculatePi(iterations))
}
func main() {
log.Print("Pi calculator started.")
http.HandleFunc("/", handler)
port := os.Getenv("PORT")
if port == "" {
port = "8080"
}
log.Fatal(http.ListenAndServe(fmt.Sprintf(":%s", port), nil))
}
|
[
"\"PORT\""
] |
[] |
[
"PORT"
] |
[]
|
["PORT"]
|
go
| 1 | 0 | |
onsetwikibot.py
|
from mwclient import Site
import html2markdown
import re
import discord
import os
import functools
try:
from dotenv import load_dotenv
load_dotenv()
except ImportError:
pass
VERSION = '1.0.1'
command = os.getenv('DISCORD_BOT_COMMAND')
if command == None or len(command) == 0:
command = '$wiki'
def tomd(html):
text = re.sub(r'<!--[\s\S]*-->', '', html)
text = re.sub(r'<div.*>', '', text, 1)[:-6]
text = re.sub(r'(<span( [a-z]*="[A-Za-z0-9\-_ ]*")*>)|(</span>)', '', text)
text = re.sub(r'(<div( [a-z]*="[A-Za-z0-9\-_ ]*")*>)|(</div>)', '', text)
text = re.sub(r'<pre>', '```lua\n', text)
text = re.sub(r'</pre>', '```', text)
text = re.sub(r'<table.*>[\s\S]*</table>', '', text)
text = html2markdown.convert(text)
text = re.sub(r'(<a( [a-z]*="[A-Za-z0-9\-_ ]*")*>)|(</a>)', '', text)
text = re.sub(r'\n[ \t]*\* ', '\n', text)
return text
def todiscord(text):
def replaceLinks(source):
link = source.group(2).split(' ')[0]
if not link.startswith('http'):
link = 'https://dev.playonset.com' + link
return source.group(1) + ' (' + link + ')'
text = re.sub(r'\[([A-Za-z0-9\-_ ]*)\]\(([A-Za-z0-9\-_ /:"]*)\)', replaceLinks, text)
def replaceHeaders(source):
return '**' + source.group(2) + '**'
text = re.sub(r'([#]+ )(.*)\n', replaceHeaders, text)
return text
onsetwiki = Site('dev.playonset.com', path='/')
print('Fetching wiki pages...')
wikipages = []
for p in onsetwiki.allpages():
wikipages.append(p)
print(str(len(wikipages)) + ' pages fetched!')
def searchwiki(search):
pages = []
search = search.replace('*', '.*')
for p in wikipages:
if re.search(search, p.page_title):
pages.append(p)
return pages
client = discord.Client()
def splittext(source):
texts = []
current = []
for ln in source.split('\n'):
if len(ln) + functools.reduce(lambda a, b: (len(a) if type(a) == 'list' else a) + len(b), current , 0) + len(current) - 1 > 1024:
texts.append('\n'.join(current))
current = []
current.append(ln)
return texts
@client.event
async def on_message(message):
if message.author == client.user:
return
if message.content == command:
await message.channel.send(embed = discord.Embed(title = 'Onset Wiki Bot', url = 'https://github.com/JanHolger/onsetwikibot', description = 'v' + VERSION + ' by <@185356094191697921>')
.add_field(name = 'Usage', value = 'Use `' + command + ' <search>` to search the wiki!')
.add_field(name = 'Wiki', value = 'https://dev.playonset.com/wiki/')
)
elif message.content.startswith(command + ' '):
query = message.content[6:]
results = searchwiki(query)
if len(results) == 0:
await message.channel.send(embed = discord.Embed(colour = discord.Colour.red(), title = 'No results found!'))
elif len(results) == 1:
embed = discord.Embed(title = results[0].page_title, url = 'https://dev.playonset.com/wiki/' + results[0].name, colour = discord.Colour.green())
i = 0
for text in splittext(todiscord(tomd(onsetwiki.get('parse', pageid=results[0].pageid, disableeditsection=True, disabletoc=True)['parse']['text']['*']))):
embed.add_field(
name = 'Docs' if i == 0 else '...',
value = text,
inline = False
)
i = i + 1
await message.channel.send(embed = embed)
else:
embed = discord.Embed(title = 'Results for "' + query + '"', url = 'https://dev.playonset.com/index.php?search=' + query, colour = discord.Colour.blue())
for r in results:
if(len(embed.fields) == 20):
await message.channel.send(embed = embed)
embed = discord.Embed(colour = discord.Colour.blue())
embed.add_field(name = r.page_title, value = 'https://dev.playonset.com/wiki/' + r.name)
await message.channel.send(embed = embed)
if os.getenv('DISCORD_BOT_TOKEN') == None or len(os.getenv('DISCORD_BOT_TOKEN')) == 0:
print('Environment variable "DISCORD_BOT_TOKEN" not set! Either set it or create a .env file.')
exit(1)
print("Starting Bot")
client.run(os.getenv('DISCORD_BOT_TOKEN'))
|
[] |
[] |
[
"DISCORD_BOT_COMMAND",
"DISCORD_BOT_TOKEN"
] |
[]
|
["DISCORD_BOT_COMMAND", "DISCORD_BOT_TOKEN"]
|
python
| 2 | 0 | |
monkey/asgi.py
|
"""
ASGI config for monkey project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'monkey.settings')
application = get_asgi_application()
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
docs/source/conf.py
|
# -*- coding: utf-8 -*-
#
# PyASN1 documentation build configuration file, created by
# sphinx-quickstart on Sat Jun 27 23:15:54 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.napoleon',
'sphinx.ext.intersphinx',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['.templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'contents'
# General information about the project.
project = u'ASN.1 types and codecs'
copyright = u'2005-2020, Ilya Etingof <[email protected]>'
author = u'Ilya Etingof <[email protected]>'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.4'
# The full version, including alpha/beta/rc tags.
release = '0.4'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
'logo': 'logo.svg',
'description': '<p align=left><i><b>Brewing free software for the greater good</i></b></p>',
'show_powered_by': False,
'github_user': 'etingof',
'github_repo': 'pyasn1',
'fixed_sidebar': True,
}
html_sidebars = {
'**': [
'about.html',
'navigation.html',
'relations.html',
'searchbox.html',
'donate.html',
]
}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = ""
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = '.static/favicon.ico'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['.static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
# html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
html_show_sourcelink = False
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
html_show_sphinx = False
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
# html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
# html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
# html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'pyasn1doc'
# html_context = {
# 'include_analytics': 'PYASN1DEV' in os.environ
# }
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
# Latex figure (float) alignment
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'PyASN1.tex', u'PyASN1 Documentation',
u'Ilya Etingof \\textless{}[email protected]\\textgreater{}', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'pyasn1', u'PyASN1 Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'PyASN1', u'PyASN1 Documentation',
author, 'PyASN1', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
# texinfo_no_detailmenu = False
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'python': ('https://docs.python.org/3.4/', None)}
# this merges constructor docstring with class docstring
autoclass_content = 'both'
# Sort members by type
autodoc_member_order = 'bysource'
# autodoc_member_order = 'groupwise'
# Napoleon settings
napoleon_google_docstring = False
napoleon_numpy_docstring = True
napoleon_include_private_with_doc = False
napoleon_include_special_with_doc = True
napoleon_use_admonition_for_examples = False
napoleon_use_admonition_for_notes = False
napoleon_use_admonition_for_references = False
napoleon_use_ivar = False
napoleon_use_param = False
napoleon_use_rtype = False
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
Python/Flask_Blog/10-Password-Reset-Email/flaskblog/__init__.py
|
import os
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from flask_bcrypt import Bcrypt
from flask_login import LoginManager
from flask_mail import Mail
app = Flask(__name__)
app.config['SECRET_KEY'] = '5791628bb0b13ce0c676dfde280ba245'
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///site.db'
db = SQLAlchemy(app)
bcrypt = Bcrypt(app)
login_manager = LoginManager(app)
login_manager.login_view = 'login'
login_manager.login_message_category = 'info'
app.config['MAIL_SERVER'] = 'smtp.googlemail.com'
app.config['MAIL_PORT'] = 587
app.config['MAIL_USE_TLS'] = True
app.config['MAIL_USERNAME'] = os.environ.get('EMAIL_USER')
app.config['MAIL_PASSWORD'] = os.environ.get('EMAIL_PASS')
mail = Mail(app)
from flaskblog import routes
|
[] |
[] |
[
"EMAIL_PASS",
"EMAIL_USER"
] |
[]
|
["EMAIL_PASS", "EMAIL_USER"]
|
python
| 2 | 0 | |
tests/rpc_test.go
|
// This is a test utility for Ethermint's Web3 JSON-RPC services.
//
// To run these tests please first ensure you have the emintd running
// and have started the RPC service with `emintcli rest-server`.
//
// You can configure the desired ETHERMINT_NODE_HOST and ETHERMINT_INTEGRATION_TEST_MODE
//
// to have it running
package tests
import (
"bytes"
"encoding/json"
"fmt"
"math/big"
"net/http"
"os"
"testing"
"time"
"github.com/cosmos/ethermint/version"
ethcmn "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
ethtypes "github.com/ethereum/go-ethereum/core/types"
"github.com/stretchr/testify/require"
)
const (
addrA = "0xc94770007dda54cF92009BFF0dE90c06F603a09f"
addrAStoreKey = 0
)
var (
ETHERMINT_INTEGRATION_TEST_MODE = os.Getenv("ETHERMINT_INTEGRATION_TEST_MODE")
ETHERMINT_NODE_HOST = os.Getenv("ETHERMINT_NODE_HOST")
zeroString = "0x0"
)
type Request struct {
Version string `json:"jsonrpc"`
Method string `json:"method"`
Params interface{} `json:"params"`
ID int `json:"id"`
}
type RPCError struct {
Code int `json:"code"`
Message string `json:"message"`
Data interface{} `json:"data,omitempty"`
}
type Response struct {
Error *RPCError `json:"error"`
ID int `json:"id"`
Result json.RawMessage `json:"result,omitempty"`
}
func TestMain(m *testing.M) {
if ETHERMINT_INTEGRATION_TEST_MODE != "stable" {
_, _ = fmt.Fprintln(os.Stdout, "Going to skip stable test")
return
}
if ETHERMINT_NODE_HOST == "" {
_, _ = fmt.Fprintln(os.Stdout, "Going to skip stable test, ETHERMINT_NODE_HOST is not defined")
return
}
// Start all tests
code := m.Run()
os.Exit(code)
}
func createRequest(method string, params interface{}) Request {
return Request{
Version: "2.0",
Method: method,
Params: params,
ID: 1,
}
}
func call(t *testing.T, method string, params interface{}) *Response {
req, err := json.Marshal(createRequest(method, params))
require.NoError(t, err)
var rpcRes *Response
time.Sleep(1 * time.Second)
/* #nosec */
res, err := http.Post(ETHERMINT_NODE_HOST, "application/json", bytes.NewBuffer(req))
require.NoError(t, err)
decoder := json.NewDecoder(res.Body)
rpcRes = new(Response)
err = decoder.Decode(&rpcRes)
require.NoError(t, err)
err = res.Body.Close()
require.NoError(t, err)
require.Nil(t, rpcRes.Error)
return rpcRes
}
func TestEth_protocolVersion(t *testing.T) {
expectedRes := hexutil.Uint(version.ProtocolVersion)
rpcRes := call(t, "eth_protocolVersion", []string{})
var res hexutil.Uint
err := res.UnmarshalJSON(rpcRes.Result)
require.NoError(t, err)
t.Logf("Got protocol version: %s\n", res.String())
require.Equal(t, expectedRes, res, "expected: %s got: %s\n", expectedRes.String(), rpcRes.Result)
}
func TestEth_blockNumber(t *testing.T) {
rpcRes := call(t, "eth_blockNumber", []string{})
var res hexutil.Uint64
err := res.UnmarshalJSON(rpcRes.Result)
require.NoError(t, err)
t.Logf("Got block number: %s\n", res.String())
}
func TestEth_GetBalance(t *testing.T) {
rpcRes := call(t, "eth_getBalance", []string{addrA, zeroString})
var res hexutil.Big
err := res.UnmarshalJSON(rpcRes.Result)
require.NoError(t, err)
t.Logf("Got balance %s for %s\n", res.String(), addrA)
// 0 if x == y; where x is res, y is 0
if res.ToInt().Cmp(big.NewInt(0)) != 0 {
t.Errorf("expected balance: %d, got: %s", 0, res.String())
}
}
func TestEth_GetStorageAt(t *testing.T) {
expectedRes := hexutil.Bytes{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
rpcRes := call(t, "eth_getStorageAt", []string{addrA, string(addrAStoreKey), zeroString})
var storage hexutil.Bytes
err := storage.UnmarshalJSON(rpcRes.Result)
require.NoError(t, err)
t.Logf("Got value [%X] for %s with key %X\n", storage, addrA, addrAStoreKey)
require.True(t, bytes.Equal(storage, expectedRes), "expected: %d (%d bytes) got: %d (%d bytes)", expectedRes, len(expectedRes), storage, len(storage))
}
func TestEth_GetCode(t *testing.T) {
expectedRes := hexutil.Bytes{}
rpcRes := call(t, "eth_getCode", []string{addrA, zeroString})
var code hexutil.Bytes
err := code.UnmarshalJSON(rpcRes.Result)
require.NoError(t, err)
t.Logf("Got code [%X] for %s\n", code, addrA)
require.True(t, bytes.Equal(expectedRes, code), "expected: %X got: %X", expectedRes, code)
}
func getAddress(t *testing.T) []byte {
rpcRes := call(t, "eth_accounts", []string{})
var res []hexutil.Bytes
err := json.Unmarshal(rpcRes.Result, &res)
require.NoError(t, err)
return res[0]
}
func TestEth_SendTransaction(t *testing.T) {
from := getAddress(t)
param := make([]map[string]string, 1)
param[0] = make(map[string]string)
param[0]["from"] = "0x" + fmt.Sprintf("%x", from)
param[0]["data"] = "0x6080604052348015600f57600080fd5b5060117f775a94827b8fd9b519d36cd827093c664f93347070a554f65e4a6f56cd73889860405160405180910390a2603580604b6000396000f3fe6080604052600080fdfea165627a7a723058206cab665f0f557620554bb45adf266708d2bd349b8a4314bdff205ee8440e3c240029"
rpcRes := call(t, "eth_sendTransaction", param)
var hash hexutil.Bytes
err := json.Unmarshal(rpcRes.Result, &hash)
require.NoError(t, err)
}
func TestEth_NewFilter(t *testing.T) {
param := make([]map[string][]string, 1)
param[0] = make(map[string][]string)
param[0]["topics"] = []string{"0x0000000000000000000000000000000000000000000000000000000012341234"}
rpcRes := call(t, "eth_newFilter", param)
var ID hexutil.Bytes
err := json.Unmarshal(rpcRes.Result, &ID)
require.NoError(t, err)
}
func TestEth_NewBlockFilter(t *testing.T) {
rpcRes := call(t, "eth_newBlockFilter", []string{})
var ID hexutil.Bytes
err := json.Unmarshal(rpcRes.Result, &ID)
require.NoError(t, err)
}
func TestEth_GetFilterChanges_NoLogs(t *testing.T) {
param := make([]map[string][]string, 1)
param[0] = make(map[string][]string)
param[0]["topics"] = []string{}
rpcRes := call(t, "eth_newFilter", param)
var ID hexutil.Bytes
err := json.Unmarshal(rpcRes.Result, &ID)
require.NoError(t, err)
changesRes := call(t, "eth_getFilterChanges", []string{ID.String()})
var logs []*ethtypes.Log
err = json.Unmarshal(changesRes.Result, &logs)
require.NoError(t, err)
}
func TestEth_GetFilterChanges_WrongID(t *testing.T) {
req, err := json.Marshal(createRequest("eth_getFilterChanges", []string{"0x1122334400000077"}))
require.NoError(t, err)
var rpcRes *Response
time.Sleep(1 * time.Second)
/* #nosec */
res, err := http.Post(ETHERMINT_NODE_HOST, "application/json", bytes.NewBuffer(req))
require.NoError(t, err)
decoder := json.NewDecoder(res.Body)
rpcRes = new(Response)
err = decoder.Decode(&rpcRes)
require.NoError(t, err)
err = res.Body.Close()
require.NoError(t, err)
require.NotNil(t, "invalid filter ID", rpcRes.Error.Message)
}
// sendTestTransaction sends a dummy transaction
func sendTestTransaction(t *testing.T) hexutil.Bytes {
from := getAddress(t)
param := make([]map[string]string, 1)
param[0] = make(map[string]string)
param[0]["from"] = "0x" + fmt.Sprintf("%x", from)
param[0]["to"] = "0x1122334455667788990011223344556677889900"
rpcRes := call(t, "eth_sendTransaction", param)
var hash hexutil.Bytes
err := json.Unmarshal(rpcRes.Result, &hash)
require.NoError(t, err)
return hash
}
func TestEth_GetTransactionReceipt(t *testing.T) {
hash := sendTestTransaction(t)
time.Sleep(time.Second * 5)
param := []string{hash.String()}
rpcRes := call(t, "eth_getTransactionReceipt", param)
receipt := make(map[string]interface{})
err := json.Unmarshal(rpcRes.Result, &receipt)
require.NoError(t, err)
require.Equal(t, "0x1", receipt["status"].(string))
}
// deployTestContract deploys a contract that emits an event in the constructor
func deployTestContract(t *testing.T) (hexutil.Bytes, map[string]interface{}) {
from := getAddress(t)
param := make([]map[string]string, 1)
param[0] = make(map[string]string)
param[0]["from"] = "0x" + fmt.Sprintf("%x", from)
param[0]["data"] = "0x6080604052348015600f57600080fd5b5060117f775a94827b8fd9b519d36cd827093c664f93347070a554f65e4a6f56cd73889860405160405180910390a2603580604b6000396000f3fe6080604052600080fdfea165627a7a723058206cab665f0f557620554bb45adf266708d2bd349b8a4314bdff205ee8440e3c240029"
param[0]["gas"] = "0x200000"
rpcRes := call(t, "eth_sendTransaction", param)
var hash hexutil.Bytes
err := json.Unmarshal(rpcRes.Result, &hash)
require.NoError(t, err)
receipt := waitForReceipt(t, hash)
require.NotNil(t, receipt, "transaction failed")
require.Equal(t, "0x1", receipt["status"].(string))
return hash, receipt
}
func TestEth_GetTransactionReceipt_ContractDeployment(t *testing.T) {
hash, _ := deployTestContract(t)
time.Sleep(time.Second * 5)
param := []string{hash.String()}
rpcRes := call(t, "eth_getTransactionReceipt", param)
receipt := make(map[string]interface{})
err := json.Unmarshal(rpcRes.Result, &receipt)
require.NoError(t, err)
require.Equal(t, "0x1", receipt["status"].(string))
require.NotEqual(t, ethcmn.Address{}.String(), receipt["contractAddress"].(string))
require.NotNil(t, receipt["logs"])
}
func getTransactionReceipt(t *testing.T, hash hexutil.Bytes) map[string]interface{} {
param := []string{hash.String()}
rpcRes := call(t, "eth_getTransactionReceipt", param)
receipt := make(map[string]interface{})
err := json.Unmarshal(rpcRes.Result, &receipt)
require.NoError(t, err)
return receipt
}
func waitForReceipt(t *testing.T, hash hexutil.Bytes) map[string]interface{} {
for i := 0; i < 12; i++ {
receipt := getTransactionReceipt(t, hash)
if receipt != nil {
return receipt
}
time.Sleep(time.Second)
}
return nil
}
func TestEth_GetTransactionLogs(t *testing.T) {
hash, _ := deployTestContract(t)
param := []string{hash.String()}
rpcRes := call(t, "eth_getTransactionLogs", param)
logs := new([]*ethtypes.Log)
err := json.Unmarshal(rpcRes.Result, logs)
require.NoError(t, err)
require.Equal(t, 1, len(*logs))
}
func TestEth_GetFilterChanges_NoTopics(t *testing.T) {
rpcRes := call(t, "eth_blockNumber", []string{})
var res hexutil.Uint64
err := res.UnmarshalJSON(rpcRes.Result)
require.NoError(t, err)
param := make([]map[string]interface{}, 1)
param[0] = make(map[string]interface{})
param[0]["topics"] = []string{}
param[0]["fromBlock"] = res.String()
param[0]["toBlock"] = zeroString // latest
// instantiate new filter
rpcRes = call(t, "eth_newFilter", param)
var ID hexutil.Bytes
err = json.Unmarshal(rpcRes.Result, &ID)
require.NoError(t, err)
// deploy contract, emitting some event
deployTestContract(t)
// get filter changes
changesRes := call(t, "eth_getFilterChanges", []string{ID.String()})
var logs []*ethtypes.Log
err = json.Unmarshal(changesRes.Result, &logs)
require.NoError(t, err)
require.Equal(t, 1, len(logs))
}
func TestEth_GetFilterChanges_Addresses(t *testing.T) {
t.Skip()
// TODO: need transaction receipts to determine contract deployment address
}
func TestEth_GetFilterChanges_BlockHash(t *testing.T) {
t.Skip()
// TODO: need transaction receipts to determine tx block
}
// hash of Hello event
var helloTopic = "0x775a94827b8fd9b519d36cd827093c664f93347070a554f65e4a6f56cd738898"
// world parameter in Hello event
var worldTopic = "0x0000000000000000000000000000000000000000000000000000000000000011"
func deployTestContractWithFunction(t *testing.T) hexutil.Bytes {
// pragma solidity ^0.5.1;
// contract Test {
// event Hello(uint256 indexed world);
// event Test(uint256 indexed a, uint256 indexed b);
// constructor() public {
// emit Hello(17);
// }
// function test(uint256 a, uint256 b) public {
// emit Test(a, b);
// }
// }
bytecode := "0x608060405234801561001057600080fd5b5060117f775a94827b8fd9b519d36cd827093c664f93347070a554f65e4a6f56cd73889860405160405180910390a260c98061004d6000396000f3fe6080604052348015600f57600080fd5b506004361060285760003560e01c8063eb8ac92114602d575b600080fd5b606060048036036040811015604157600080fd5b8101908080359060200190929190803590602001909291905050506062565b005b80827f91916a5e2c96453ddf6b585497262675140eb9f7a774095fb003d93e6dc6921660405160405180910390a3505056fea265627a7a72315820ef746422e676b3ed22147cd771a6f689e7c33ef17bf5cd91921793b5dd01e3e064736f6c63430005110032"
from := getAddress(t)
param := make([]map[string]string, 1)
param[0] = make(map[string]string)
param[0]["from"] = "0x" + fmt.Sprintf("%x", from)
param[0]["data"] = bytecode
param[0]["gas"] = "0x200000"
rpcRes := call(t, "eth_sendTransaction", param)
var hash hexutil.Bytes
err := json.Unmarshal(rpcRes.Result, &hash)
require.NoError(t, err)
receipt := waitForReceipt(t, hash)
require.NotNil(t, receipt, "transaction failed")
require.Equal(t, "0x1", receipt["status"].(string))
return hash
}
// Tests topics case where there are topics in first two positions
func TestEth_GetFilterChanges_Topics_AB(t *testing.T) {
time.Sleep(time.Second)
rpcRes := call(t, "eth_blockNumber", []string{})
var res hexutil.Uint64
err := res.UnmarshalJSON(rpcRes.Result)
require.NoError(t, err)
param := make([]map[string]interface{}, 1)
param[0] = make(map[string]interface{})
param[0]["topics"] = []string{helloTopic, worldTopic}
param[0]["fromBlock"] = res.String()
param[0]["toBlock"] = zeroString // latest
// instantiate new filter
rpcRes = call(t, "eth_newFilter", param)
var ID hexutil.Bytes
err = json.Unmarshal(rpcRes.Result, &ID)
require.NoError(t, err)
deployTestContractWithFunction(t)
// get filter changes
changesRes := call(t, "eth_getFilterChanges", []string{ID.String()})
var logs []*ethtypes.Log
err = json.Unmarshal(changesRes.Result, &logs)
require.NoError(t, err)
require.Equal(t, 1, len(logs))
}
func TestEth_GetFilterChanges_Topics_XB(t *testing.T) {
rpcRes := call(t, "eth_blockNumber", []string{})
var res hexutil.Uint64
err := res.UnmarshalJSON(rpcRes.Result)
require.NoError(t, err)
param := make([]map[string]interface{}, 1)
param[0] = make(map[string]interface{})
param[0]["topics"] = []interface{}{nil, worldTopic}
param[0]["fromBlock"] = res.String()
param[0]["toBlock"] = "0x0" // latest
// instantiate new filter
rpcRes = call(t, "eth_newFilter", param)
var ID hexutil.Bytes
err = json.Unmarshal(rpcRes.Result, &ID)
require.NoError(t, err)
deployTestContractWithFunction(t)
// get filter changes
changesRes := call(t, "eth_getFilterChanges", []string{ID.String()})
var logs []*ethtypes.Log
err = json.Unmarshal(changesRes.Result, &logs)
require.NoError(t, err)
require.Equal(t, 1, len(logs))
}
func TestEth_GetFilterChanges_Topics_XXC(t *testing.T) {
t.Skip()
// TODO: call test function, need tx receipts to determine contract address
}
func TestEth_GetLogs_NoLogs(t *testing.T) {
param := make([]map[string][]string, 1)
param[0] = make(map[string][]string)
param[0]["topics"] = []string{}
call(t, "eth_getLogs", param)
}
func TestEth_GetLogs_Topics_AB(t *testing.T) {
rpcRes := call(t, "eth_blockNumber", []string{})
var res hexutil.Uint64
err := res.UnmarshalJSON(rpcRes.Result)
require.NoError(t, err)
param := make([]map[string]interface{}, 1)
param[0] = make(map[string]interface{})
param[0]["topics"] = []string{helloTopic, worldTopic}
param[0]["fromBlock"] = res.String()
param[0]["toBlock"] = zeroString // latest
hash := deployTestContractWithFunction(t)
waitForReceipt(t, hash)
rpcRes = call(t, "eth_getLogs", param)
var logs []*ethtypes.Log
err = json.Unmarshal(rpcRes.Result, &logs)
require.NoError(t, err)
require.Equal(t, 1, len(logs))
}
func TestEth_PendingTransactionFilter(t *testing.T) {
rpcRes := call(t, "eth_newPendingTransactionFilter", []string{})
var code hexutil.Bytes
err := code.UnmarshalJSON(rpcRes.Result)
require.NoError(t, err)
require.NotNil(t, code)
for i := 0; i < 5; i++ {
deployTestContractWithFunction(t)
}
time.Sleep(10 * time.Second)
// get filter changes
changesRes := call(t, "eth_getFilterChanges", []string{code.String()})
require.NotNil(t, changesRes)
var txs []*hexutil.Bytes
err = json.Unmarshal(changesRes.Result, &txs)
require.NoError(t, err, string(changesRes.Result))
require.True(t, len(txs) >= 2, "could not get any txs", "changesRes.Result", string(changesRes.Result))
}
|
[
"\"ETHERMINT_INTEGRATION_TEST_MODE\"",
"\"ETHERMINT_NODE_HOST\""
] |
[] |
[
"ETHERMINT_NODE_HOST",
"ETHERMINT_INTEGRATION_TEST_MODE"
] |
[]
|
["ETHERMINT_NODE_HOST", "ETHERMINT_INTEGRATION_TEST_MODE"]
|
go
| 2 | 0 | |
sgevents/subprocess.py
|
from __future__ import absolute_import
import cPickle as pickle
import os
import subprocess
import sys
from .utils import get_command_prefix, get_func_name, get_func
from . import logs
def call_in_fork(func, args=None, kwargs=None):
args = args or ()
kwargs = kwargs or ()
pid = os.fork()
if pid:
return pid
func = get_func(func)
func(*args, **kwargs)
def call_in_subprocess(func, args=None, kwargs=None, envvars=None, ):
cmd = get_command_prefix(envvars) if envvars else []
cmd.extend((sys.executable, '-m', 'sgevents.subprocess'))
environ = os.environ.copy()
environ.update(envvars or {})
proc = subprocess.Popen(cmd, stdin=subprocess.PIPE, env=environ)
proc.stdin.write(pickle.dumps({
'func': get_func_name(func),
'args': args,
'kwargs': kwargs,
'log_setup': logs.get_log_setup(),
'log_meta': logs.get_log_meta(),
}))
proc.stdin.close()
return proc
def _main():
raw_package = sys.stdin.read()
package = pickle.loads(raw_package)
func = get_func(package['func'])
args = package.get('args') or ()
kwargs = package.get('kwargs') or {}
log_setup = package.get('log_setup')
log_meta = package.get('log_meta')
# Restore logging state.
if log_setup:
logs.setup_logs(*log_setup)
if log_meta:
logs.update_log_meta(**log_meta)
func(*args, **kwargs)
def test(*args, **kwargs):
print __name__, args, kwargs
if __name__ == '__main__':
exit(_main() or 0)
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
auth/addin/auth_test.go
|
package addin
import (
"os"
"testing"
h "github.com/koltyakov/gosip/test/helpers"
u "github.com/koltyakov/gosip/test/utils"
)
var (
cnfgPath = "./config/private.spo-addin.json"
ci bool
)
func init() {
ci = os.Getenv("SPAUTH_CI") == "true"
if ci { // In CI mode
cnfgPath = "./config/private.spo-addin.ci.json"
auth := &AuthCnfg{
SiteURL: os.Getenv("SPAUTH_SITEURL"),
ClientID: os.Getenv("SPAUTH_CLIENTID"),
ClientSecret: os.Getenv("SPAUTH_CLIENTSECRET"),
}
_ = auth.WriteConfig(u.ResolveCnfgPath(cnfgPath))
}
}
func TestGettingAuthToken(t *testing.T) {
if !h.ConfigExists(cnfgPath) {
t.Skip("No auth config provided")
}
err := h.CheckAuth(
&AuthCnfg{},
cnfgPath,
[]string{"SiteURL", "ClientID", "ClientSecret"},
)
if err != nil {
t.Error(err)
}
}
func TestGettingDigest(t *testing.T) {
if !h.ConfigExists(cnfgPath) {
t.Skip("No auth config provided")
}
err := h.CheckDigest(&AuthCnfg{}, cnfgPath)
if err != nil {
t.Error(err)
}
}
func TestCheckRequest(t *testing.T) {
if !h.ConfigExists(cnfgPath) {
t.Skip("No auth config provided")
}
err := h.CheckRequest(&AuthCnfg{}, cnfgPath)
if err != nil {
t.Error(err)
}
}
func TestAuthEdgeCases(t *testing.T) {
t.Run("ReadConfig/MissedConfig", func(t *testing.T) {
cnfg := &AuthCnfg{}
if err := cnfg.ReadConfig("wrong_path.json"); err == nil {
t.Error("wrong_path config should not pass")
}
})
t.Run("ReadConfig/MissedConfig", func(t *testing.T) {
cnfg := &AuthCnfg{}
if err := cnfg.ReadConfig(u.ResolveCnfgPath("./test/config/malformed.json")); err == nil {
t.Error("malformed config should not pass")
}
})
t.Run("WriteConfig", func(t *testing.T) {
folderPath := u.ResolveCnfgPath("./test/tmp")
filePath := u.ResolveCnfgPath("./test/tmp/addin.json")
cnfg := &AuthCnfg{SiteURL: "test"}
_ = os.MkdirAll(folderPath, os.ModePerm)
if err := cnfg.WriteConfig(filePath); err != nil {
t.Error(err)
}
_ = os.RemoveAll(filePath)
})
t.Run("SetMasterkey", func(t *testing.T) {
cnfg := &AuthCnfg{}
cnfg.SetMasterkey("key")
if cnfg.masterKey != "key" {
t.Error("unable to set master key")
}
})
}
|
[
"\"SPAUTH_CI\"",
"\"SPAUTH_SITEURL\"",
"\"SPAUTH_CLIENTID\"",
"\"SPAUTH_CLIENTSECRET\""
] |
[] |
[
"SPAUTH_CLIENTSECRET",
"SPAUTH_CI",
"SPAUTH_CLIENTID",
"SPAUTH_SITEURL"
] |
[]
|
["SPAUTH_CLIENTSECRET", "SPAUTH_CI", "SPAUTH_CLIENTID", "SPAUTH_SITEURL"]
|
go
| 4 | 0 | |
pkg/get/download.go
|
// Copyright © 2020 Vertigo Tecnologia. All rights reserved.
// Licensed under the Apache License, Version 2.0. See LICENSE file in the project root for full license information.
package get
import (
"fmt"
"io"
"io/ioutil"
"net/http"
u "net/url"
"os"
"os/user"
"strconv"
"github.com/vertigobr/safira/pkg/config"
"github.com/vertigobr/safira/pkg/git"
"gopkg.in/gookit/color.v1"
)
func downloadBinary(url, name string, binary bool) error {
parsedURL, _ := u.Parse(url)
res, err := http.DefaultClient.Get(parsedURL.String())
if err != nil {
return fmt.Errorf("%s Error when obtaining request body: %s", color.Red.Text("[!]"), url)
}
defer res.Body.Close()
dest, err := config.CreateInBinDir()
if err != nil {
return err
}
if binary {
// Criar arquivo
out, err := os.Create(fmt.Sprintf("%s/%s", dest, name))
if err != nil {
return fmt.Errorf("%s Error when creating %s file", color.Red.Text("[!]"), name)
}
defer out.Close()
// Escreve o corpo da resposta no arquivo
if _, err := io.Copy(out, res.Body); err != nil {
return fmt.Errorf("%s Error writing request body to file %s", color.Red.Text("[!]"), name)
}
if err := os.Chmod(fmt.Sprintf("%s/%s", dest, name), 0700); err != nil {
return fmt.Errorf("%s Error when making the X file an executable - %s", color.Red.Text("[!]"), name)
}
} else {
r := ioutil.NopCloser(res.Body)
if err := Untar(r, dest); err != nil {
return err
}
}
sudoUser := os.Getenv("SUDO_USER")
if len(sudoUser) == 0 {
return nil
}
u, err := user.Lookup(sudoUser)
Uid, err := strconv.Atoi(u.Uid)
Gid, err := strconv.Atoi(u.Gid)
if err := os.Chown(fmt.Sprintf("%s/%s", dest, name), Uid, Gid); err != nil {
return fmt.Errorf("%s Error changing the owner of the root folder for the user", color.Red.Text("[!]"))
}
return nil
}
func DownloadTemplate(faasTemplateRepo string, update, verboseFlag bool) error {
if err := git.PullTemplate(faasTemplateRepo, update, verboseFlag); err != nil {
return err
}
return nil
}
|
[
"\"SUDO_USER\""
] |
[] |
[
"SUDO_USER"
] |
[]
|
["SUDO_USER"]
|
go
| 1 | 0 | |
xutil/database/base.py
|
# Database Lib
"""
Oracle
PostGresSQL
SQLite
SQLServer
Hive
Spark
"""
import os, datetime, pandas, time, re
from collections import namedtuple, OrderedDict
import jmespath
import sqlalchemy
from multiprocessing import Queue, Process
from xutil.helpers import (
log,
elog,
slog,
get_exception_message,
struct,
now,
get_databases,
get_dir_path,
get_profile,
get_variables,
file_exists,
str_rmv_indent,
ptable,
make_rec,
get_error_str,
)
from xutil.diskio import read_yaml, write_csvs
conns = {}
_fwklike = lambda k, v: "lower({}) like lower('{}')".format(k, v)
_fwkeq = lambda k, v: "{} = '{}'".format(k, v)
_fw = lambda sep, _fwkop, **kws: sep.join([_fwkop(k, v) for k, v in kws.items()]) # Format WHERE
fwa = lambda _fwkop=_fwkeq, **kws: _fw(' and ', _fwkop, **kws) # Format WHERE AND
fwo = lambda _fwkop=_fwkeq, **kws: _fw(' or ', _fwkop, **kws) # Format WHERE OR
rows_to_dicts = lambda rows: [row._asdict() for row in rows]
class DBConn(object):
"""Base class for database connections"""
_fix_f_name = lambda self, f: f
_to_text = lambda self, t: t
def __init__(self, conn_dict, profile=None, echo=False):
"Inititate connection"
self._cred = struct(conn_dict)
self._cred.kwargs = conn_dict.get('kwargs', {})
self.name = self._cred.get('name', None)
self.username = self._cred.get('username', None)
self.type = self._cred.type
self.engine = None
self._cursor_description = None
self.profile = profile
self.batch_size = 10000
self.fetch_size = 20000
self.echo = echo
self.connect()
self.last_connect = now()
# Base Template
template_base_path = '{}/database/templates/base.yaml'.format(
get_dir_path())
self.template_dict = read_yaml(template_base_path)
# Specific Type Template
template_path = '{}/database/templates/{}.yaml'.format(
get_dir_path(), self.type)
temp_dict = read_yaml(template_path)
for key1 in temp_dict:
# Level 1
if isinstance(temp_dict[key1], dict):
if key1 not in self.template_dict:
self.template_dict[key1] = temp_dict[key1]
# Level 2
for key2 in temp_dict[key1]:
# Always Overwrite
self.template_dict[key1][key2] = temp_dict[key1][key2]
else:
# Level 1 Non-Dict Overwrite
self.template_dict[key1] = temp_dict[key1]
self.variables = self._template('variables')
if os.getenv('PROFILE_YAML'):
other_vars = get_variables()
for key in other_vars:
self.variables[key] = other_vars[key]
self.tmp_folder = self.variables['tmp_folder']
self.set_variables()
if echo:
log("Connected to {} as {}".format(self._cred.name, self._cred.user))
def connect(self):
"""Connect to Database"""
self.engine = self.get_engine()
self.connection = self.engine.connect()
def close(self):
"""Close database connection"""
self.conn.connection.close()
def reconnect(self, min_tresh=0):
"""Re-Connect to Database if minute threshold reached"""
if (now() - self.last_connect).total_seconds() > min_tresh * 60:
log('Reconnecting to {}...'.format(self.name))
self.connect()
self.last_connect = now()
def set_variables(self):
"""Set custom variables"""
raise Exception("Method 'set_variables' is not implemented!")
def get_dialect(self, echo=False):
"""SQLAlchemy dialect"""
raise Exception("Method 'get_dialect' is not implemented!")
def get_engine(self, echo=False):
import sqlalchemy
if not self.engine:
self.create_engine(echo=self.echo)
self.engine_inspect = sqlalchemy.inspect(self.engine)
return self.engine
def check_pk(self, table, fields):
"Check Primary key to ensure there are not duplicates"
if 'where' in fields.lower():
fields, where_clause = fields.lower().split('where')
where_clause = 'where ' + where_clause
else:
where_clause = ''
sql = '''
select
'{table}' as table,
case when count(1) = count({fields}) then 'PASS' else 'FAIL' end as pk_result
from {table}
{where_clause}
'''.format(
table=table,
fields=fields,
where_clause=where_clause,
)
data = self.query(sql, echo=False)
headers = self._fields
print(ptable(headers, data))
if data[0].pk_result == 'FAIL':
raise (Exception('PK Text failed for table "{}" with fields "{}"'.format(
table, fields)))
def _do_execute(self, sql):
try:
self._cursor_description = None
self.fields = None
self.result = self.connection.execute(sql)
self._cursor_description = self.result._cursor_description()
self._fields = self._get_cursor_fields()
except Exception as E:
if 'not open' in get_error_str(E):
pass # error when Oracle doesn't have a cursor open
else:
log(Exception('Error for SQL:\n' + sql))
raise E
def execute_multi(self,
sql,
dtype='namedtuple',
limit=None,
echo=True,
query_name='Record',
log=log):
"""
Execute multiple SQL statements separtated by ';'. Returns a generator.
Example:
for fields, rows in conn.execute(sql):
print(fields)
print(len(rows))
"""
self.reconnect(min_tresh=10)
data = None
fields = None
rows = []
message_mapping = {
'drop ': 'Dropping {}.',
'truncate ': 'Truncating {}.',
'select ': 'Selecting {}.',
'create ': 'Creating {}.',
'insert ': 'Inserting {}.',
'alter ': 'Altering {}.',
'update ': 'Updating {}.',
'delete ': 'Deleting {}.',
'exec ': 'Calling Procedure {}.',
'grant ': 'Granting {}.',
}
sqls = sql.split(';')
for sql in sqls:
if not sql.strip(): continue
sql_ = sql.strip().lower()
for word, message in message_mapping.items():
if sql_.startswith(word):
if echo:
log(
message.format(' '.join(
sql_.splitlines()[0].split()[1:3]).upper()))
break
# Call procedure with callproc
if sql_.startswith('exec '):
procedure = sql_[5:].split('(')[0]
args = sql_[5:].split('(')[1][:-1].replace("'", '').split(',')
args = [a.strip() for a in args]
cursor.callproc(procedure, args)
continue
try:
self._fields = []
rows = self.query(
sql,
rec_name=query_name,
dtype=dtype,
limit=limit,
echo=echo,
log=log)
fields = self._fields
if '-- pk_test:' in sql.lower() and sql_.startswith('create'):
sql_lines = sql_.splitlines()
regexp = r'create\s+table\s+(\S*)[\sa-zA-Z\d]+ as'
table = re.findall(regexp, sql_lines[0])[0]
line = [
l for l in sql_lines if l.strip().lower().startswith('-- pk_test:')
][0]
fields = line.split(':')[-1]
self.check_pk(table, fields)
except Exception as E:
message = get_exception_message().lower()
if sql_.startswith(
'drop ') and self.error_msg['table_not_exist'] in message:
log("WARNING: Table already dropped.")
else:
raise E
if not fields: fields = []
yield fields, rows
def execute(self,
sql,
dtype='tuple',
limit=None,
echo=True,
query_name='Record',
log=log):
"""Execute SQL, return last result"""
self.reconnect(min_tresh=10)
data = None
fields = None
rows = []
message_mapping = {
'drop ': 'Dropping {}.',
'truncate ': 'Truncating {}.',
'select ': 'Selecting {}.',
'create ': 'Creating {}.',
'insert ': 'Inserting {}.',
'alter ': 'Altering {}.',
'update ': 'Updating {}.',
'delete ': 'Deleting {}.',
'exec ': 'Calling Procedure {}.',
'grant ': 'Granting {}.',
}
sql_ = sql.strip().lower()
for word, message in message_mapping.items():
if sql_.startswith(word):
if echo:
log(
message.format(' '.join(
sql_.splitlines()[0].split()[1:3]).upper()))
break
# Call procedure with callproc
if sql_.startswith('exec '):
procedure = sql_[5:].split('(')[0]
args = sql_[5:].split('(')[1][:-1].replace("'", '').split(',')
args = [a.strip() for a in args]
connection = self.engine.raw_connection()
try:
cursor = connection.cursor()
cursor.callproc(procedure, args)
self._fields = self._get_cursor_fields(cursor_desc=cursor.description)
rows = list(cursor.fetchall())
cursor.close()
connection.commit()
return fields, rows
finally:
connection.close()
try:
self._fields = []
rows = self.query(
sql,
rec_name=query_name,
dtype=dtype,
limit=limit,
echo=echo,
log=log)
fields = self._fields
if '-- pk_test:' in sql.lower() and sql_.startswith('create'):
sql_lines = sql_.splitlines()
regexp = r'create\s+table\s+(\S*)[\sa-zA-Z\d]+ as'
table = re.findall(regexp, sql_lines[0])[0]
line = [
l for l in sql_lines if l.strip().lower().startswith('-- pk_test:')
][0]
fields = line.split(':')[-1]
self.check_pk(table, fields)
except Exception as E:
message = get_exception_message().lower()
if sql_.startswith(
'drop ') and self.error_msg['table_not_exist'] in message:
log("WARNING: Table already dropped.")
else:
raise E
if not fields: fields = []
return fields, rows
def insert(self, table, data, echo=False):
"""Insert records of namedtuple or dicts"""
raise Exception('insert not implemented')
def drop_table(self, table, log=log):
"Drop table"
try:
sql = self._template('core.drop_table').format(table)
self._do_execute(sql)
except Exception as E:
message = get_exception_message().lower()
if self._template('error_filter.table_not_exist') in message:
if self.echo:
log('Table "{}" already dropped.'.format(table))
else:
raise E
def create_table(self, table, field_types, drop=False, log=log):
"Create table"
if drop:
self.drop_table(table, log=log)
new_ftypes = OrderedDict()
for f in field_types:
ftype, max_len, dec_len = field_types[f]
if dec_len:
suff = '({},{})'.format(max_len, dec_len)
elif max_len:
suff = '({})'.format(max_len)
else:
suff = ''
new_ftypes[f] = self._template('general_type_map')[ftype].replace(
'()', suff)
field_types_str = ', \n'.join([
self._fix_f_name(field) + ' ' + new_ftypes[field] for field in new_ftypes
])
sql = self._template('core.create_table').format(
table=table,
col_types=field_types_str,
)
# log('Creating table: \n' + sql))
try:
self._do_execute(sql)
except Exception as e:
raise e
log('Created table "{}"'.format(table))
def _get_cursor_fields(self, as_dict=False, native_type=True, cursor_desc=None):
"Get fields of active Select cursor"
fields = OrderedDict()
cursor_desc = cursor_desc if cursor_desc else self._cursor_description
if cursor_desc == None:
return []
for f in cursor_desc:
f_name = f[0].lower()
if as_dict:
if native_type:
f_type = f[1]
else:
f_type = self.reverse_data_map[f[1]]
# assign floa/double as needed
if 'cx_Oracle.NUMBER' in str(f[1]):
if f[4] and f[4] > 11: f_type = 'long'
if f[5] and f[5] > 0: f_type = 'double'
fields[f_name] = f_type
else:
fields[f_name] = None
if as_dict:
return fields
else:
return list(fields.keys())
def stream(self,
sql,
rec_name='Record',
dtype='namedtuple',
yield_chuncks=False,
chunk_size=None,
limit=None,
echo=True):
"Stream Select from SQL, yield records as they come in"
self.reconnect(min_tresh=10)
if echo: log("Streaming SQL for '{}'.".format(rec_name))
fetch_size = limit if limit else self.fetch_size
fetch_size = chunk_size if chunk_size else fetch_size
try:
self._do_execute(sql)
except Exception as e:
raise e
if dtype == 'tuple':
make_rec = lambda row: row
make_batch = lambda rows: rows
elif dtype == 'dataframe':
yield_chuncks=True
make_batch = lambda rows: pandas.DataFrame(rows, columns=self._fields)
else:
Record = namedtuple(
rec_name.replace(' ', '_').replace('.', '_'), self._fields)
make_rec = lambda row: Record(*row)
make_batch = lambda rows: [make_rec(r) for r in rows]
self._stream_counter = 0
while True:
if not self._fields:
break
rows = self.result.fetchmany(fetch_size)
if rows:
if yield_chuncks:
batch = make_batch(rows)
self._stream_counter += len(batch)
if len(batch):
yield batch
else:
for row in rows:
self._stream_counter += 1
yield make_rec(row)
else:
break
if limit:
break
# log('Stream finished at {} records.'.format(self._stream_counter))
def query(self,
sql,
rec_name='Record',
dtype='namedtuple',
limit=None,
echo=True,
retrying=False,
log=log):
"Select from SQL, return list of namedtuples"
# if echo: log("Running SQL for '{}'.".format(rec_name))
self.reconnect(min_tresh=10)
s_t = datetime.datetime.now()
_data = list(self.stream(sql, dtype=dtype, echo=False, limit=limit))
if not self.result.closed:
self.result.close()
fields = self._fields
if not fields: return []
if dtype == 'namedtuple':
Record = namedtuple(rec_name.replace(' ', '_').replace('.', '_'), fields)
if limit:
data = [Record(*row) for row in _data]
else:
data = [Record(*row) for row in _data]
elif dtype == 'tuple':
if limit:
data = [tuple(row) for row in _data]
else:
data = [tuple(row) for row in _data]
elif dtype == 'dataframe':
if limit:
data = pandas.DataFrame([row for row in _data], columns=fields)
else:
data = pandas.DataFrame([row for row in _data], columns=fields)
else:
raise (Exception('{} is not recongnized.'.format(dtype)))
secs = (datetime.datetime.now() - s_t).total_seconds()
rate = round(len(data) / secs, 1)
if echo:
log(" >>> Got {} rows in {} secs [{} r/s].".format(
len(data), secs, rate))
return data
def _split_schema_table(self, table_name):
schema, table = table_name.split('.') if '.' in table_name else (
self.username, table_name)
return schema, table
def _concat_fields(self, fields, as_text=False):
return ' || '.join(fields)
def _template(self, template_key_str):
val = jmespath.search(template_key_str, self.template_dict)
if isinstance(val, str):
val = str_rmv_indent(val)
return val
def get_schemas(self, echo=True):
"Get list of schemas."
Rec = namedtuple('Schemas', 'schema')
self._fields = Rec._fields
sql_tmpl = self._template('metadata.schemas')
if sql_tmpl:
schemas = [r[0] for r in self.query(sql_tmpl)]
else:
# http://docs.sqlalchemy.org/en/rel_0_9/core/reflection.html#sqlalchemy.engine.reflection.Inspector.get_schemas
self.get_engine(echo=echo)
schemas = self.engine_inspect.get_schema_names()
rows = [Rec(s) for s in schemas]
return rows
def get_objects(self, schema, object_type='all', echo=True):
"Get metadata for objects. object_type in 'all', 'table', 'view'"
Rec = namedtuple('Table', 'schema object_name object_type')
self._fields = Rec._fields
def get_rec(object_name, object_type):
r_dict = dict(
schema=schema, object_name=object_name, object_type=object_type)
return Rec(**r_dict)
if object_type == 'all':
table_rows = self.get_tables(schema)
rows = [get_rec(r.table, 'table') for r in sorted(table_rows)]
view_rows = self.get_views(schema)
rows += [get_rec(r.view, 'view') for r in sorted(view_rows)]
elif object_type == 'table':
table_rows = self.get_tables(schema)
rows = [get_rec(r.table, 'table') for r in sorted(table_rows)]
elif object_type == 'view':
view_rows = self.get_views(schema)
rows += [get_rec(r.view, 'view') for r in sorted(view_rows)]
else:
raise Exception('Object type "{}" not supported!'.format(object_type))
return rows
def get_tables(self, schema, echo=True):
"Get metadata for tables."
schemas = schema if isinstance(schema, list) else [schema]
def get_tables_for(schema):
def get_rec(table):
self._fields = ['schema', 'table']
return tuple([schema, table])
# Getting pickle.PicklingError: Can't pickle <class 'xutil.database.base.Table'>
Rec = namedtuple('Table', 'schema table')
self._fields = Rec._fields
r_dict = dict(schema=schema, table=table)
return Rec(**r_dict)
sql_tmpl = self._template('metadata.tables')
if sql_tmpl:
tables = self.query(sql_tmpl.format(schema=schema))
if hasattr(self, '_std_get_tables'):
tables = self._std_get_tables(schema, tables)
else:
self.get_engine(echo=echo)
tables = self.engine_inspect.get_table_names(schema)
return [get_rec(v) for v in sorted(tables)]
rows = []
for schema in schemas:
for row in get_tables_for(schema):
rows.append(row)
return rows
def get_views(self, schema, echo=True):
"Get metadata for views."
schemas = schema if isinstance(schema, list) else [schema]
def get_views_for(schema):
def get_rec(view):
self._fields = ['schema', 'view']
return tuple([schema, view])
# pickle.PicklingError: Can't pickle <class 'xutil.database.base.View'>
Rec = namedtuple('View', 'schema view')
self._fields = Rec._fields
r_dict = dict(schema=schema, view=view)
return Rec(**r_dict)
sql_tmpl = self._template('metadata.views')
if sql_tmpl:
views = [r[0] for r in self.query(sql_tmpl.format(schema=schema))]
else:
self.get_engine(echo=echo)
views = self.engine_inspect.get_view_names(schema)
return [get_rec(v) for v in sorted(views)]
rows = []
for schema in schemas:
for row in get_views_for(schema):
rows.append(row)
return rows
def get_columns(self,
table_name,
object_type=None,
echo=False,
include_schema_table=True,
native_type=True):
"Get column metadata for table"
if include_schema_table:
headers = 'schema table id column_name type nullable default autoincrement'
else:
headers = 'id column_name type nullable default autoincrement'
Rec = namedtuple('Columns', headers)
self._fields = Rec._fields
all_rows = []
table_names = table_name if isinstance(table_name, list) else [table_name]
for table_name in table_names:
schema, table = self._split_schema_table(table_name)
def get_rec(r_dict, column_order):
if include_schema_table:
r_dict['schema'] = schema
r_dict['table'] = table
r_dict['column_name'] = r_dict['name']
r_dict['type'] = str(r_dict['type'])
if not native_type:
r_dict['type']= r_dict['type'].lower()
r_dict['type'] = r_dict['type'].split('(')[0] if '(' in r_dict[
'type'] else r_dict['type']
native_type_map = self._template('native_type_map')
if not r_dict['type'] in native_type_map:
raise Exception('Field type "{}" not in native_type_map for {}'.format(r_dict['type'], self.type))
r_dict['type'] = native_type_map[r_dict['type']]
r_dict['id'] = column_order
for k in list(r_dict):
if k not in headers.split():
del r_dict[k]
if '(' in r_dict['type']:
r_dict['type'] = r_dict['type'].split('(')[0]
return Rec(**r_dict)
sql_tmpl = self._template('metadata.columns')
if sql_tmpl:
rows = self.query(sql_tmpl.format(table=table, schema=schema))
if hasattr(self, '_std_get_columns'):
rows = self._std_get_columns(schema, table, rows)
else:
self.get_engine(echo=echo)
rows = self.engine_inspect.get_columns(table, schema=schema)
all_rows += [get_rec(r_dict, i + 1) for i, r_dict in enumerate(rows)]
self._fields = Rec._fields
return all_rows
def get_primary_keys(self, table_name, echo=False):
"Get PK metadata for table"
Rec = namedtuple('PKs', 'schema table pk_name column_name column_order')
self._fields = Rec._fields
schema, table = self._split_schema_table(table_name)
def get_rec(col, pk_name, column_order):
r_dict = {}
r_dict['schema'] = schema
r_dict['table'] = table
r_dict['pk_name'] = pk_name
r_dict['column_name'] = col
r_dict['column_order'] = column_order
return Rec(**r_dict)
sql_tmpl = self._template('metadata.primary_keys')
if sql_tmpl:
rows = self.query(sql_tmpl.format(table=table, schema=schema))
else:
self.get_engine(echo=echo)
r_dict = self.engine_inspect.get_pk_constraint(table, schema=schema)
rows = [
get_rec(col, r_dict['name'], i + 1)
for i, col in enumerate(r_dict['constrained_columns'])
]
return rows
def get_indexes(self, table_name, echo=False):
"Get indexes metadata for table"
Rec = namedtuple(
'Indexes', 'schema table index_name column_name column_order unique')
self._fields = Rec._fields
schema, table = self._split_schema_table(table_name)
def get_rec(r_dict):
r_dict['schema'] = schema
r_dict['table'] = table
r_dict['index_name'] = r_dict['name']
r_dict['unique'] = str(r_dict['unique'])
del r_dict['name']
for i, col in enumerate(r_dict['column_names']):
r_dict['column_name'] = col
r_dict['column_order'] = i + 1
yield Rec(**r_dict)
sql_tmpl = self._template('metadata.indexes')
if sql_tmpl:
rows = self.query(sql_tmpl.format(table=table, schema=schema))
else:
self.get_engine(echo=echo)
rows = self.engine_inspect.get_indexes(table, schema=schema)
rows = [get_rec(r_dict) for r_dict in rows]
return rows
def get_ddl(self, table_name, object_type=None, echo=True):
"Get ddl for table"
Rec = namedtuple('DDL', 'ddl')
self._fields = Rec._fields
schema, table = self._split_schema_table(table_name)
sql_tmpl = self._template('metadata.ddl')
if sql_tmpl:
rows = self.query(
sql_tmpl.format(
schema=schema,
table=table,
obj_type=object_type,
))
else:
self.get_engine(echo=echo)
ddl = self.engine_inspect.get_view_definition(table, schema=schema)
rows = [Rec(ddl)] if ddl else []
self._fields = Rec._fields
return rows
def get_all_columns(self):
"Get all columns for all tables / views"
sql_tmpl = self._template('metadata.all_columns')
if not sql_tmpl:
raise Exception('get_all_columns not implemented for {}'.format(
self.type))
rows = self.query(sql_tmpl)
return rows
def get_all_tables(self, filter, as_sql=False):
"Get all tables / views"
sql_tmpl = self._template('metadata.all_tables')
if not sql_tmpl:
raise Exception('get_all_tables not implemented for {}'.format(self.type))
sql = sql_tmpl.format(filter=filter)
return sql if as_sql else self.query(sql, echo=False)
def analyze_fields(self,
analysis,
table_name,
fields=[],
as_sql=False,
union=True,
expr_func_map={},
**kwargs):
"""Base function for field level analysis
expr_func_map: contains mapping for expression to SQL function to all fields
"""
if '.' not in table_name:
raise Exception("table_name must have schema and name in it with a '.'")
if analysis not in self.template_dict['analysis']:
raise Exception("'{}' not found in template for '{}'.".format(
analysis, self.type))
schema, table = self._split_schema_table(table_name)
# get field type
field_rows = self.get_columns(table_name)
field_type = {r.column_name.lower(): r.type for r in field_rows}
if not fields:
fields = [r.column_name for r in field_rows]
for expr in list(expr_func_map):
tmpl_path = 'function.' + expr_func_map[expr]
expr_func_map[expr] = ',\n'.join([
self._template(tmpl_path).format(field=field)
for field in [r.column_name for r in field_rows]
])
sep = ' \nunion all\n' if union else ' \n ;\n'
sql = sep.join([
self._template('analysis.' + analysis).format(
schema=schema,
field=field,
table=table,
type=field_type[field.lower()] if field else '',
**expr_func_map,
**kwargs) for field in fields
])
return sql if as_sql else self.query(sql, analysis, echo=False)
def analyze_tables(self, analysis, tables=[], as_sql=False, **kwargs):
"""Base function for table level analysis"""
if analysis not in self.template_dict['analysis']:
raise Exception("'{}' not found in template for '{}'.".format(
analysis, self.type))
if not tables and 'schema' in kwargs:
# get all tables
rows = self.get_schemas(kwargs['schema'])
crt_obj = lambda r: struct(dict(schema=r.schema, table=r.object_name))
objs = [crt_obj(r) for r in rows]
else:
crt_obj = lambda schema, table: struct(dict(schema=schema, table=table))
objs = [crt_obj(*self._split_schema_table(t)) for t in tables]
sql = ' \nunion all\n'.join([
self._template('analysis.' + analysis).format(
schema=obj.schema, table=obj.table, **kwargs) for obj in objs
])
return sql if as_sql else self.query(sql, analysis, echo=False)
def analyze_join_match(self,
t1,
t2,
t1_field,
t2_field,
t1_filter='1=1',
t2_filter='1=1',
as_sql=False,
as_text=True,
lowercase=True):
def get_kwargs(t1, t2, t1_field, t2_field, t1_filter, t2_filter):
t1_field_arr = ['t1.' + f for f in t1_field.split(',')]
t2_field_arr = ['t2.' + f for f in t2_field.split(',')]
t1_field_concat = self._concat_fields(t1_field_arr, as_text=as_text)
t2_field_concat = self._concat_fields(t2_field_arr, as_text=as_text)
to_text = self._to_text
if lowercase:
conds = ' and '.join([
'lower({}) = lower({})'.format(to_text(f), to_text(t2_field_arr[i]))
for i, f in enumerate(t1_field_arr)
])
else:
conds = ' and '.join([
'{} = {}'.format(to_text(f), to_text(t2_field_arr[i]))
for i, f in enumerate(t1_field_arr)
])
t1_fields1 = t1_field
t2_fields1 = t2_field
t1_field = ', '.join(['t1.' + f for f in t1_field_arr])
t2_field = ', '.join(['t2.' + f for f in t2_field_arr])
return dict(
t1=t1,
t1_field=t1_field_concat,
t1_fields1=t1_fields1,
t1_filter=t1_filter,
t2=t2,
t2_field=t2_field_concat,
t2_fields1=t2_fields1,
t2_filter=t2_filter,
conds=conds,
)
kwargs = get_kwargs(
t1=t1,
t2=t2,
t1_field=t1_field,
t2_field=t2_field,
t1_filter=t1_filter,
t2_filter=t2_filter,
)
sql = self.analyze_fields(
'table_join_match', t1, [''], as_sql=True, **kwargs)
return sql if as_sql else self.query(sql, 'table_join_match', echo=False)
def get_conn(db,
dbs=None,
echo=True,
reconnect=False,
use_jdbc=False,
conn_expire_min=10,
spark_hive=False) -> DBConn:
global conns
dbs = dbs if dbs else get_databases()
profile = get_profile()
db_dict = struct(dbs[db])
if db_dict.type.lower() == 'hive' and spark_hive:
db_dict.type = 'spark'
use_jdbc = True if (use_jdbc or ('use_jdbc' in db_dict
and db_dict['use_jdbc'])) else use_jdbc
if db in conns and not reconnect:
if (now() - conns[db].last_connect).total_seconds() / 60 < conn_expire_min:
return conns[db]
if use_jdbc:
log('*USING JDBC for ' + db)
from .jdbc import JdbcConn
conn = JdbcConn(db_dict, profile=profile)
elif db_dict.type.lower() == 'oracle':
from .oracle import OracleConn
conn = OracleConn(db_dict, echo=echo)
elif db_dict.type.lower() == 'spark':
from .spark import SparkConn
conn = SparkConn(db_dict, echo=echo)
elif db_dict.type.lower() == 'hive':
from .hive import HiveConn, Beeline
if 'use_beeline' in db_dict and db_dict.use_beeline:
conn = Beeline(db_dict, echo=echo)
else:
conn = HiveConn(db_dict, echo=echo)
elif db_dict.type.lower() in ('postgresql', 'redshift'):
from .postgresql import PostgreSQLConn
conn = PostgreSQLConn(db_dict, echo=echo)
elif db_dict.type.lower() == 'sqlserver':
from .sqlserver import SQLServerConn
conn = SQLServerConn(db_dict, echo=echo)
elif db_dict.type.lower() == 'sqlite':
from .sqlite import SQLiteConn
conn = SQLiteConn(db_dict, echo=echo)
else:
raise Exception(f'Type {db_dict.type} not handled!')
conns[db] = conn
return conn
class SqlX:
"""
SQL Express functions. Supports CRUD transactional operations.
Suppose there is a table named 'cache', sqlx allows:
sqlx.x('cache').insert(rows)
sqlx.x('cache').insert_one(row)
sqlx.x('cache').add(**kws)
sqlx.x('cache').delete(where)
sqlx.x('cache').update(rows, pk_fields)
sqlx.x('cache').update_one(row, pk_cols)
sqlx.x('cache').replace(rows, pk_fields)
sqlx.x('cache').query(where)
sqlx.x('cache').select_one(where)
"""
def __init__(self, conn: DBConn, table, schema, ntRec: namedtuple):
self.conn = conn
self.table = table
self.schema = schema
self.ntRec = ntRec
self.pk_fields = None
self.table_obj = schema + '.' + table if schema else table
self.insert_one = lambda row: self.insert([row])
self.add = lambda **kws: self.insert([self.ntRec(**kws)])
self.update_one = lambda row, pk_cols=None: self.update([row], pk_cols)
self.update_rec=lambda pk_cols=None, **kws: self.update([make_rec(**kws)], pk_cols)
self.replace_one = lambda row, pk_cols=None: self.replace([row], pk_cols)
self.replace_rec=lambda pk_cols=None, **kws: self.replace([make_rec(**kws)], pk_cols)
# self.select_one = lambda where: self.select_one(where, one=True)
def _get_pk(self):
if not self.pk_fields:
pk_rows = self.conn.get_primary_keys(self.table_obj)
self.pk_fields = [r.column_name for r in pk_rows]
return self.pk_fields
def insert(self, data):
return self.conn.insert(self.table_obj, data)
def update(self, data, pk_fields=None):
if not pk_fields:
pk_fields = self._get_pk()
if not pk_fields:
raise Exception("Need Keys to perform UPDATE!")
t_fields = [x.lower() for x in data[0]._fields]
for f in pk_fields:
if not f.lower() in t_fields:
# if keys not provided, need to make sure PK values are provided in data records
raise Exception(
"Value of PK field '{}' must be provided to perform UPDATE!".
format(f))
self.conn.update(self.table_obj, data, pk_fields, echo=False)
def update_one(self, row, pk_cols=None):
self.update([row], pk_cols)
def update_rec(self, pk_cols=None, **kws):
self.update([make_rec(**kws)], pk_cols)
def replace(self, data, pk_fields=None):
if not pk_fields:
pk_fields = self._get_pk()
self.conn.replace(self.table_obj, data, pk_fields, echo=False)
# def replace_rec(self, pk_cols=None, **kws):
# # add default None?
# for field in self.ntRec._fields:
# kws[field] = kws.get(field, None)
# self.replace([self.ntRec(**kws)], pk_cols)
def query(self, where='1=1', one=False, limit=None, as_dict=False):
rows = self.conn.query(
"select * from {} where {}".format(self.table_obj, where),
echo=False,
limit=limit)
rows = rows_to_dicts(rows) if as_dict else rows
if one: return rows[0] if rows else None
else: return rows
def select_one(self, where, field=None, as_dict=False):
row = self.query(where, one=True, as_dict=as_dict)
if field and row:
return row[field] if as_dict else row.__getattribute__(field)
return row
def delete(self, where):
self.conn.execute("delete from {} where {}".format(self.table_obj, where))
def make_sqlx(conn, schema, tables):
"Make sqlx lookup function for given tables"
table_func_map = {}
for table in tables:
ntRec = namedtuple(table, tables[table].columns.keys())
table_func_map[table] = SqlX(conn, table, schema, ntRec)
# return table_func_map
def sqlx(expr) -> SqlX:
obj = jmespath.search(expr, table_func_map)
if not obj:
raise Exception('sqlx: Cannot find "{}"'.format(expr))
return obj
return sqlx
def get_sql_sources(sql_text, echo=False):
"""Obtain the source tables of a query
"""
import sqlparse
# replace "as(" to "as (" # this trips up the sql parser in CTEs
sql_text = re.sub(r"as\(", "as (", sql_text, 0, re.MULTILINE | re.IGNORECASE)
statements = sqlparse.parse(sql_text)
cte_aliases = set()
sql_sources = {}
def get_sources(statement):
sources_dict = {}
last_kw_from = False
last_kw_join = False
cte_mode = False
last_tok = None
done = False
while not done:
for tok in statement.tokens:
if tok.is_group:
if cte_mode and isinstance(tok, sqlparse.sql.IdentifierList):
for tok2 in tok.tokens:
if isinstance(tok2, sqlparse.sql.Identifier):
for tok3 in tok2.tokens:
if isinstance(tok3, sqlparse.sql.Parenthesis):
cte_aliases.add(tok3.parent.normalized.lower())
sources_dict2 = get_sources(tok3)
sources_dict = {**sources_dict, **sources_dict2}
elif isinstance(tok, sqlparse.sql.Parenthesis):
sources_dict2 = get_sources(tok)
sources_dict = {**sources_dict, **sources_dict2}
else:
for tok2 in tok.tokens:
if isinstance(tok2, sqlparse.sql.Parenthesis):
cte_aliases.add(tok2.parent.normalized.lower())
sources_dict2 = get_sources(tok2)
sources_dict = {**sources_dict, **sources_dict2}
if (last_kw_from or last_kw_join) and last_tok.is_whitespace:
if isinstance(tok, sqlparse.sql.IdentifierList):
for tok2 in tok.tokens:
if isinstance(tok2, sqlparse.sql.Identifier) and '(' in tok2.value:
sources_dict2 = get_sources(tok2)
sources_dict = {**sources_dict, **sources_dict2}
elif isinstance(tok2, sqlparse.sql.Identifier) and tok2.normalized.lower() not in cte_aliases:
if echo: log('+Table = ' + tok2.normalized.lower())
sources_dict[tok2.normalized.lower()] = tok.parent
elif isinstance(tok, sqlparse.sql.Identifier) and tok.normalized.lower() not in cte_aliases:
if echo: log('+Table = ' + tok.normalized.lower())
sources_dict[tok.normalized.lower()] = tok.parent
last_kw_join = False
if tok.is_keyword and tok.normalized == 'WITH':
cte_mode = True
last_kw_from = False
elif tok.is_keyword and tok.normalized == 'GROUP':
last_kw_join = False
last_kw_from = False
elif tok.is_keyword and tok.normalized == 'WHERE':
last_kw_join = False
last_kw_from = False
elif tok.is_keyword and tok.normalized == 'ORDER':
last_kw_join = False
last_kw_from = False
elif tok.is_keyword and tok.normalized == 'CREATE':
cte_mode = True
last_kw_from = False
elif tok.is_keyword and tok.normalized == 'SELECT':
cte_mode = False
last_kw_from = False
elif tok.is_keyword and tok.normalized == 'FROM':
last_kw_from = True
elif tok.is_keyword and 'JOIN' in tok.normalized:
last_kw_join = True
last_tok = tok
done = True
return sources_dict
for s, statement in enumerate(statements):
has_from = False
last_kw_create = False
last_kw_create_table = False
create_table = None
for tok in statement.tokens:
if isinstance(tok, sqlparse.sql.Identifier) and last_kw_create_table:
create_table = tok.normalized
last_kw_create_table = False
last_kw_create = False
if echo: log('-CREATE TABLE ' + create_table)
if tok.is_keyword and tok.normalized == 'TABLE' and last_kw_create:
last_kw_create_table = True
if tok.is_keyword and tok.normalized == 'CREATE':
last_kw_create = True
if tok.is_keyword and tok.normalized == 'FROM':
has_from = True
last_tok = tok
if has_from:
sources_dict = get_sources(statement)
if create_table:
sql_sources[create_table] = sorted(sources_dict)
else:
sql_sources[s] = sorted(sources_dict)
return sql_sources
|
[] |
[] |
[
"PROFILE_YAML"
] |
[]
|
["PROFILE_YAML"]
|
python
| 1 | 0 | |
Gachon_Bus_Backend/Gachon_Bus_Backend/wsgi.py
|
"""
WSGI config for Gachon_Bus_Backend project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'Gachon_Bus_Backend.settings')
application = get_wsgi_application()
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
python/ray/serve/utils.py
|
from itertools import groupby
import json
import logging
import pickle
import random
import string
import time
from typing import Iterable, Tuple
import os
import requests
import numpy as np
import pydantic
import ray
import ray.serialization_addons
from ray.util.serialization import StandaloneSerializationContext
from ray.serve.constants import HTTP_PROXY_TIMEOUT
from ray.serve.http_util import build_starlette_request, HTTPRequestWrapper
ACTOR_FAILURE_RETRY_TIMEOUT_S = 60
def parse_request_item(request_item):
if len(request_item.args) == 1:
arg = request_item.args[0]
if request_item.metadata.http_arg_is_pickled:
assert isinstance(arg, bytes)
arg: HTTPRequestWrapper = pickle.loads(arg)
return (build_starlette_request(arg.scope, arg.body), ), {}
return request_item.args, request_item.kwargs
class LoggingContext:
"""
Context manager to manage logging behaviors within a particular block, such as:
1) Overriding logging level
Source (python3 official documentation)
https://docs.python.org/3/howto/logging-cookbook.html#using-a-context-manager-for-selective-logging # noqa: E501
"""
def __init__(self, logger, level=None):
self.logger = logger
self.level = level
def __enter__(self):
if self.level is not None:
self.old_level = self.logger.level
self.logger.setLevel(self.level)
def __exit__(self, et, ev, tb):
if self.level is not None:
self.logger.setLevel(self.old_level)
def _get_logger():
logger = logging.getLogger("ray.serve")
# TODO(simon): Make logging level configurable.
log_level = os.environ.get("SERVE_LOG_DEBUG")
if log_level and int(log_level):
logger.setLevel(logging.DEBUG)
else:
logger.setLevel(logging.INFO)
return logger
logger = _get_logger()
class ServeEncoder(json.JSONEncoder):
"""Ray.Serve's utility JSON encoder. Adds support for:
- bytes
- Pydantic types
- Exceptions
- numpy.ndarray
"""
def default(self, o): # pylint: disable=E0202
if isinstance(o, bytes):
return o.decode("utf-8")
if isinstance(o, pydantic.BaseModel):
return o.dict()
if isinstance(o, Exception):
return str(o)
if isinstance(o, np.ndarray):
if o.dtype.kind == "f": # floats
o = o.astype(float)
if o.dtype.kind in {"i", "u"}: # signed and unsigned integers.
o = o.astype(int)
return o.tolist()
return super().default(o)
@ray.remote(num_cpus=0)
def block_until_http_ready(http_endpoint,
backoff_time_s=1,
check_ready=None,
timeout=HTTP_PROXY_TIMEOUT):
http_is_ready = False
start_time = time.time()
while not http_is_ready:
try:
resp = requests.get(http_endpoint)
assert resp.status_code == 200
if check_ready is None:
http_is_ready = True
else:
http_is_ready = check_ready(resp)
except Exception:
pass
if 0 < timeout < time.time() - start_time:
raise TimeoutError(
"HTTP proxy not ready after {} seconds.".format(timeout))
time.sleep(backoff_time_s)
def get_random_letters(length=6):
return "".join(random.choices(string.ascii_letters, k=length))
def format_actor_name(actor_name, controller_name=None, *modifiers):
if controller_name is None:
name = actor_name
else:
name = "{}:{}".format(controller_name, actor_name)
for modifier in modifiers:
name += "-{}".format(modifier)
return name
def get_all_node_ids():
"""Get IDs for all nodes in the cluster.
Handles multiple nodes on the same IP by appending an index to the
node_id, e.g., 'node_id-index'.
Returns a list of ('node_id-index', 'node_id') tuples (the latter can be
used as a resource requirement for actor placements).
"""
node_ids = []
# We need to use the node_id and index here because we could
# have multiple virtual nodes on the same host. In that case
# they will have the same IP and therefore node_id.
for _, node_id_group in groupby(sorted(ray.state.node_ids())):
for index, node_id in enumerate(node_id_group):
node_ids.append(("{}-{}".format(node_id, index), node_id))
return node_ids
def get_node_id_for_actor(actor_handle):
"""Given an actor handle, return the node id it's placed on."""
return ray.state.actors()[actor_handle._actor_id.hex()]["Address"][
"NodeID"]
def compute_iterable_delta(old: Iterable,
new: Iterable) -> Tuple[set, set, set]:
"""Given two iterables, return the entries that's (added, removed, updated).
Usage:
>>> old = {"a", "b"}
>>> new = {"a", "d"}
>>> compute_iterable_delta(old, new)
({"d"}, {"b"}, {"a"})
"""
old_keys, new_keys = set(old), set(new)
added_keys = new_keys - old_keys
removed_keys = old_keys - new_keys
updated_keys = old_keys.intersection(new_keys)
return added_keys, removed_keys, updated_keys
def compute_dict_delta(old_dict, new_dict) -> Tuple[dict, dict, dict]:
"""Given two dicts, return the entries that's (added, removed, updated).
Usage:
>>> old = {"a": 1, "b": 2}
>>> new = {"a": 3, "d": 4}
>>> compute_dict_delta(old, new)
({"d": 4}, {"b": 2}, {"a": 3})
"""
added_keys, removed_keys, updated_keys = compute_iterable_delta(
old_dict.keys(), new_dict.keys())
return (
{k: new_dict[k]
for k in added_keys},
{k: old_dict[k]
for k in removed_keys},
{k: new_dict[k]
for k in updated_keys},
)
def get_current_node_resource_key() -> str:
"""Get the Ray resource key for current node.
It can be used for actor placement.
"""
current_node_id = ray.get_runtime_context().node_id.hex()
for node in ray.nodes():
if node["NodeID"] == current_node_id:
# Found the node.
for key in node["Resources"].keys():
if key.startswith("node:"):
return key
else:
raise ValueError("Cannot found the node dictionary for current node.")
def ensure_serialization_context():
"""Ensure the serialization addons on registered, even when Ray has not
been started."""
ctx = StandaloneSerializationContext()
ray.serialization_addons.apply(ctx)
|
[] |
[] |
[
"SERVE_LOG_DEBUG"
] |
[]
|
["SERVE_LOG_DEBUG"]
|
python
| 1 | 0 | |
test/unit/models/test_proposals.py
|
# -*- coding: utf-8 -*-
import pytest
import sys
import os
import time
os.environ['SENTINEL_ENV'] = 'test'
os.environ['SENTINEL_CONFIG'] = os.path.normpath(os.path.join(os.path.dirname(__file__), '../../test_sentinel.conf'))
sys.path.append(os.path.normpath(os.path.join(os.path.dirname(__file__), '../../../lib')))
import misc
import config
from models import GovernanceObject, Proposal, Vote
# clear DB tables before each execution
def setup():
# clear tables first
Vote.delete().execute()
Proposal.delete().execute()
GovernanceObject.delete().execute()
def teardown():
pass
# list of proposal govobjs to import for testing
@pytest.fixture
def go_list_proposals():
items = [
{u'AbsoluteYesCount': 1000,
u'AbstainCount': 7,
u'CollateralHash': u'acb67ec3f3566c9b94a26b70b36c1f74a010a37c0950c22d683cc50da324fdca',
u'DataHex': u'5b5b2270726f706f73616c222c207b22656e645f65706f6368223a20323132323532303430302c20226e616d65223a20226465616e2d6d696c6c65722d35343933222c20227061796d656e745f61646472657373223a2022795965384b77796155753559737753596d4233713372797838585455753979375569222c20227061796d656e745f616d6f756e74223a2032352e37352c202273746172745f65706f6368223a20313437343236313038362c202274797065223a20312c202275726c223a2022687474703a2f2f6461736863656e7472616c2e6f72672f6465616e2d6d696c6c65722d35343933227d5d5d',
u'DataString': u'[["proposal", {"end_epoch": 2122520400, "name": "dean-miller-5493", "payment_address": "yYe8KwyaUu5YswSYmB3q3ryx8XTUu9y7Ui", "payment_amount": 25.75, "start_epoch": 1474261086, "type": 1, "url": "http://dashcentral.org/dean-miller-5493"}]]',
u'Hash': u'dfd7d63979c0b62456b63d5fc5306dbec451180adee85876cbf5b28c69d1a86c',
u'IsValidReason': u'',
u'NoCount': 25,
u'YesCount': 1025,
u'fBlockchainValidity': True,
u'fCachedDelete': False,
u'fCachedEndorsed': False,
u'fCachedFunding': False,
u'fCachedValid': True},
{u'AbsoluteYesCount': 1000,
u'AbstainCount': 29,
u'CollateralHash': u'3efd23283aa98c2c33f80e4d9ed6f277d195b72547b6491f43280380f6aac810',
u'DataHex': u'5b5b2270726f706f73616c222c207b22656e645f65706f6368223a20323132323532303430302c20226e616d65223a20226665726e616e64657a2d37363235222c20227061796d656e745f61646472657373223a2022795965384b77796155753559737753596d4233713372797838585455753979375569222c20227061796d656e745f616d6f756e74223a2032352e37352c202273746172745f65706f6368223a20313437343236313038362c202274797065223a20312c202275726c223a2022687474703a2f2f6461736863656e7472616c2e6f72672f6665726e616e64657a2d37363235227d5d5d',
u'DataString': u'[["proposal", {"end_epoch": 2122520400, "name": "fernandez-7625", "payment_address": "yYe8KwyaUu5YswSYmB3q3ryx8XTUu9y7Ui", "payment_amount": 25.75, "start_epoch": 1474261086, "type": 1, "url": "http://dashcentral.org/fernandez-7625"}]]',
u'Hash': u'0523445762025b2e01a2cd34f1d10f4816cf26ee1796167e5b029901e5873630',
u'IsValidReason': u'',
u'NoCount': 56,
u'YesCount': 1056,
u'fBlockchainValidity': True,
u'fCachedDelete': False,
u'fCachedEndorsed': False,
u'fCachedFunding': False,
u'fCachedValid': True},
]
return items
# Proposal
@pytest.fixture
def proposal():
# NOTE: no governance_object_id is set
pobj = Proposal(
start_epoch=1483250400, # 2017-01-01
end_epoch=2122520400,
name="wine-n-cheeze-party",
url="https://zoccentral.com/wine-n-cheeze-party",
payment_address="nFr5So8SVuREU8bghTF4eZewiMuKaZ4M2t",
payment_amount=13
)
# NOTE: this object is (intentionally) not saved yet.
# We want to return an built, but unsaved, object
return pobj
def test_proposal_is_valid(proposal):
from zerooned import ZeroOneDaemon
import zeroonelib
zerooned = ZeroOneDaemon.from_zeroone_conf(config.zeroone_conf)
orig = Proposal(**proposal.get_dict()) # make a copy
# fixture as-is should be valid
assert proposal.is_valid() is True
# ============================================================
# ensure end_date not greater than start_date
# ============================================================
proposal.end_epoch = proposal.start_epoch
assert proposal.is_valid() is False
proposal.end_epoch = proposal.start_epoch - 1
assert proposal.is_valid() is False
proposal.end_epoch = proposal.start_epoch + 0
assert proposal.is_valid() is False
proposal.end_epoch = proposal.start_epoch + 1
assert proposal.is_valid() is True
# reset
proposal = Proposal(**orig.get_dict())
# ============================================================
# ensure valid proposal name
# ============================================================
proposal.name = ' heya!@209h '
assert proposal.is_valid() is False
proposal.name = "anything' OR 'x'='x"
assert proposal.is_valid() is False
proposal.name = ' '
assert proposal.is_valid() is False
proposal.name = ''
assert proposal.is_valid() is False
proposal.name = '0'
assert proposal.is_valid() is True
proposal.name = 'R66-Y'
assert proposal.is_valid() is True
proposal.name = 'valid-name'
assert proposal.is_valid() is True
proposal.name = ' mostly-valid-name'
assert proposal.is_valid() is False
proposal.name = 'also-mostly-valid-name '
assert proposal.is_valid() is False
proposal.name = ' similarly-kinda-valid-name '
assert proposal.is_valid() is False
proposal.name = 'dean miller 5493'
assert proposal.is_valid() is False
proposal.name = 'dean-millerà-5493'
assert proposal.is_valid() is False
proposal.name = 'dean-миллер-5493'
assert proposal.is_valid() is False
# binary gibberish
proposal.name = zeroonelib.deserialise('22385c7530303933375c75303363375c75303232395c75303138635c75303064335c75303163345c75303264385c75303236615c75303134625c75303163335c75303063335c75303362385c75303266615c75303261355c75303266652f2b5c75303065395c75303164655c75303136655c75303338645c75303062385c75303138635c75303064625c75303064315c75303038325c75303133325c753032333222')
assert proposal.is_valid() is False
# reset
proposal = Proposal(**orig.get_dict())
# ============================================================
# ensure valid payment address
# ============================================================
proposal.payment_address = '7'
assert proposal.is_valid() is False
proposal.payment_address = 'YYE8KWYAUU5YSWSYMB3Q3RYX8XTUU9Y7UI'
assert proposal.is_valid() is False
proposal.payment_address = 'yYe8KwyaUu5YswSYmB3q3ryx8XTUu9y7Uj'
assert proposal.is_valid() is False
proposal.payment_address = '221 B Baker St., London, United Kingdom'
assert proposal.is_valid() is False
# this is actually the Zoc foundation multisig address...
proposal.payment_address = '7gnwGHt17heGpG9Crfeh4KGpYNFugPhJdh'
assert proposal.is_valid() is False
proposal.payment_address = 'n8XvLiyPRh5YeLEtjHwzYcDYwnyt1WX7ch'
assert proposal.is_valid() is True
proposal.payment_address = 'yYe8KwyaUu5YswSYmB3q3ryx8XTUu9y7Ui'
assert proposal.is_valid() is False
proposal.payment_address = ' n8XvLiyPRh5YeLEtjHwzYcDYwnyt1WX7ch'
assert proposal.is_valid() is False
proposal.payment_address = 'n8XvLiyPRh5YeLEtjHwzYcDYwnyt1WX7ch '
assert proposal.is_valid() is False
proposal.payment_address = ' n8XvLiyPRh5YeLEtjHwzYcDYwnyt1WX7ch '
assert proposal.is_valid() is False
# reset
proposal = Proposal(**orig.get_dict())
# validate URL
proposal.url = ' '
assert proposal.is_valid() is False
proposal.url = ' '
assert proposal.is_valid() is False
proposal.url = 'http://bit.ly/1e1EYJv'
assert proposal.is_valid() is True
proposal.url = ' http://bit.ly/1e1EYJv'
assert proposal.is_valid() is False
proposal.url = 'http://bit.ly/1e1EYJv '
assert proposal.is_valid() is False
proposal.url = ' http://bit.ly/1e1EYJv '
assert proposal.is_valid() is False
proposal.url = 'http://::12.34.56.78]/'
assert proposal.is_valid() is False
proposal.url = 'http://[::1/foo/bad]/bad'
assert proposal.is_valid() is False
proposal.url = 'http://zoccentral.org/dean-miller 5493'
assert proposal.is_valid() is False
proposal.url = 'http://zoccentralisé.org/dean-miller-5493'
assert proposal.is_valid() is True
proposal.url = 'http://zoccentralisé.org/dean-миллер-5493'
assert proposal.is_valid() is True
proposal.url = 'https://example.com/resource.ext?param=1&other=2'
assert proposal.is_valid() is True
proposal.url = 'www.com'
assert proposal.is_valid() is True
proposal.url = 'v.ht/'
assert proposal.is_valid() is True
proposal.url = 'ipfs:///ipfs/QmPwwoytFU3gZYk5tSppumxaGbHymMUgHsSvrBdQH69XRx/'
assert proposal.is_valid() is True
proposal.url = '/ipfs/QmPwwoytFU3gZYk5tSppumxaGbHymMUgHsSvrBdQH69XRx/'
assert proposal.is_valid() is True
proposal.url = 's3://bucket/thing/anotherthing/file.pdf'
assert proposal.is_valid() is True
proposal.url = 'http://zqktlwi4fecvo6ri.onion/wiki/index.php/Main_Page'
assert proposal.is_valid() is True
proposal.url = 'ftp://ftp.funet.fi/pub/standards/RFC/rfc959.txt'
assert proposal.is_valid() is True
# gibberish URL
proposal.url = zeroonelib.deserialise('22687474703a2f2f5c75303330385c75303065665c75303362345c75303362315c75303266645c75303331345c625c75303134655c75303031615c75303139655c75303133365c75303264315c75303238655c75303364395c75303230665c75303363355c75303030345c75303336665c75303238355c75303165375c75303063635c75303139305c75303262615c75303239316a5c75303130375c75303362365c7530306562645c75303133335c75303335665c7530326562715c75303038655c75303332645c75303362645c75303064665c75303135654f365c75303237335c75303363645c7530333539275c75303165345c75303339615c75303365385c75303334345c75303130615c75303265662e5c75303231625c75303164356a5c75303232345c75303163645c75303336365c75303064625c75303339665c75303230305c75303337615c75303138395c75303263325c75303038345c75303066615c75303031335c75303233655c75303135345c75303165395c75303139635c75303239375c75303039355c75303038345c75303362305c7530306233435c75303135345c75303063665c75303163345c75303261335c75303362655c75303136305c75303139365c75303263665c75303131305c7530313031475c75303162645c75303338645c75303363325c75303138625c75303235625c75303266325c75303264635c75303139335c75303066665c75303066645c75303133625c75303234305c75303137615c75303062355c75303031645c75303238655c75303166315c75303232315c75303161615c75303265325c75303335625c75303333665c75303239345c75303335315c75303038345c75303339395c75303262385c75303132375c75303330357a5c75303263625c75303066305c75303062355c75303164335c75303338385c75303364385c75303130625c75303266325c75303137305c75303335315c75303030305c75303136385c75303039646d5c75303331315c75303236615c75303330375c75303332635c75303361635c665c75303363335c75303264365c75303238645c75303136395c7530323438635c75303163385c75303261355c75303164615c75303165375c75303337355c75303332645c7530333165755c75303131665c75303338375c75303135325c75303065325c75303135326c5c75303164325c75303164615c75303136645c75303061665c75303333375c75303264375c75303339375c75303139395c75303134635c75303165385c75303234315c75303336635c75303130645c75303230635c75303161615c75303339355c75303133315c75303064615c75303165615c75303336645c75303064325c75303337365c75303363315c75303132645c75303266305c75303064364f255c75303263635c75303162645c75303062385c75303238365c75303136395c75303337335c75303232335c75303336655c75303037665c75303062616b5c75303132365c75303233305c75303330645c75303362385c75303164355c75303166615c75303338395c75303062635c75303135325c75303334365c75303139645c75303135615c75303031395c75303061385c75303133615c75303338635c75303339625c75303261655c75303065395c75303362635c75303166385c75303031665c75303230615c75303263355c75303134335c75303361635c75303334355c75303236645c75303139365c75303362665c75303135615c75303137305c75303165395c75303231395c75303332665c75303232645c75303030365c75303066305c75303134665c75303337375c75303234325d5c75303164325c75303337655c75303265665c75303331395c75303261355c75303265385c75303338395c75303235645c75303334315c75303338395c7530323230585c75303062645c75303166365c75303238645c75303231375c75303066665c75303130385c75303331305c75303330335c75303031395c75303039635c75303363315c75303039615c75303334355c75303331305c75303162335c75303263315c75303132395c75303234335c75303038627c5c75303361335c75303261635c75303165655c75303030305c75303237615c75303038385c75303066355c75303232375c75303236635c75303236355c7530336336205c75303038615c7530333561787c735c75303336305c75303362655c75303235385c75303334345c75303264365c75303262355c75303361315c75303135345c75303131625c75303061625c75303038615c75303332655c75303238325c75303031393d5c75303263335c75303332655c75303163645c75303139305c75303231305c75303131365c75303334305c75303234665c75303162635c75303333645c75303135305c75303132335c75303233645c75303133345c75303062327a5c75303331635c75303136312a5c753032316522')
assert proposal.is_valid() is False
# reset
proposal = Proposal(**orig.get_dict())
# ============================================================
# ensure proposal can't request negative zoc
# ============================================================
proposal.payment_amount = -1
assert proposal.is_valid() is False
def test_proposal_is_expired(proposal):
cycle = 24 # testnet
now = misc.now()
proposal.start_epoch = now - (86400 * 2) # two days ago
proposal.end_epoch = now - (60 * 60) # expired one hour ago
assert proposal.is_expired(superblockcycle=cycle) is False
# fudge factor + a 24-block cycle == an expiry window of 9086, so...
proposal.end_epoch = now - 9085
assert proposal.is_expired(superblockcycle=cycle) is False
proposal.end_epoch = now - 9087
assert proposal.is_expired(superblockcycle=cycle) is True
# deterministic ordering
def test_approved_and_ranked(go_list_proposals):
from zerooned import ZeroOneDaemon
zerooned = ZeroOneDaemon.from_zeroone_conf(config.zeroone_conf)
for item in go_list_proposals:
(go, subobj) = GovernanceObject.import_gobject_from_zerooned(zerooned, item)
prop_list = Proposal.approved_and_ranked(proposal_quorum=1, next_superblock_max_budget=60)
#assert prop_list[0].object_hash == u'dfd7d63979c0b62456b63d5fc5306dbec451180adee85876cbf5b28c69d1a86c'
#assert prop_list[1].object_hash == u'0523445762025b2e01a2cd34f1d10f4816cf26ee1796167e5b029901e5873630'
print prop_list
def test_proposal_size(proposal):
orig = Proposal(**proposal.get_dict()) # make a copy
proposal.url = 'https://testurl.com/'
proposal_length_bytes = len(proposal.serialise()) // 2
# how much space is available in the Proposal
extra_bytes = (Proposal.MAX_DATA_SIZE - proposal_length_bytes)
# fill URL field with max remaining space
proposal.url = proposal.url + ('x' * extra_bytes)
# ensure this is the max proposal size and is valid
assert (len(proposal.serialise()) // 2) == Proposal.MAX_DATA_SIZE
assert proposal.is_valid() is True
# add one more character to URL, Proposal should now be invalid
proposal.url = proposal.url + 'x'
assert (len(proposal.serialise()) // 2) == (Proposal.MAX_DATA_SIZE + 1)
assert proposal.is_valid() is False
|
[] |
[] |
[
"SENTINEL_ENV",
"SENTINEL_CONFIG"
] |
[]
|
["SENTINEL_ENV", "SENTINEL_CONFIG"]
|
python
| 2 | 0 | |
doc/v2/howto/cluster/src/word2vec/api_train_v2_cluster.py
|
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
import os
import paddle.v2 as paddle
import pickle
embsize = 32
hiddensize = 256
N = 5
cluster_train_file = "./train_data_dir/train/train.txt"
cluster_test_file = "./test_data_dir/test/test.txt"
node_id = os.getenv("OMPI_COMM_WORLD_RANK")
if not node_id:
raise EnvironmentError("must provied OMPI_COMM_WORLD_RANK")
def wordemb(inlayer):
wordemb = paddle.layer.embedding(
input=inlayer,
size=embsize,
param_attr=paddle.attr.Param(
name="_proj",
initial_std=0.001,
learning_rate=1,
l2_rate=0,
sparse_update=True))
return wordemb
def cluster_reader_cluster(filename, node_id):
def cluster_reader():
with open("-".join([filename, "%05d" % int(node_id)]), "r") as f:
for l in f:
csv_data = [int(cell) for cell in l.split(",")]
yield tuple(csv_data)
return cluster_reader
def main():
# get arguments from env
# for local training
TRUTH = ["true", "True", "TRUE", "1", "yes", "Yes", "YES"]
cluster_train = os.getenv('PADDLE_CLUSTER_TRAIN', "False") in TRUTH
use_gpu = os.getenv('PADDLE_INIT_USE_GPU', "False")
if not cluster_train:
paddle.init(
use_gpu=use_gpu,
trainer_count=int(os.getenv("PADDLE_INIT_TRAINER_COUNT", "1")))
else:
paddle.init(
use_gpu=use_gpu,
trainer_count=int(os.getenv("PADDLE_INIT_TRAINER_COUNT", "1")),
port=int(os.getenv("PADDLE_INIT_PORT", "7164")),
ports_num=int(os.getenv("PADDLE_INIT_PORTS_NUM", "1")),
ports_num_for_sparse=int(
os.getenv("PADDLE_INIT_PORTS_NUM_FOR_SPARSE", "1")),
num_gradient_servers=int(
os.getenv("PADDLE_INIT_NUM_GRADIENT_SERVERS", "1")),
trainer_id=int(os.getenv("PADDLE_INIT_TRAINER_ID", "0")),
pservers=os.getenv("PADDLE_INIT_PSERVERS", "127.0.0.1"))
fn = open("thirdparty/wuyi_train_thdpty/word_dict.pickle", "r")
word_dict = pickle.load(fn)
fn.close()
dict_size = len(word_dict)
firstword = paddle.layer.data(
name="firstw", type=paddle.data_type.integer_value(dict_size))
secondword = paddle.layer.data(
name="secondw", type=paddle.data_type.integer_value(dict_size))
thirdword = paddle.layer.data(
name="thirdw", type=paddle.data_type.integer_value(dict_size))
fourthword = paddle.layer.data(
name="fourthw", type=paddle.data_type.integer_value(dict_size))
nextword = paddle.layer.data(
name="fifthw", type=paddle.data_type.integer_value(dict_size))
Efirst = wordemb(firstword)
Esecond = wordemb(secondword)
Ethird = wordemb(thirdword)
Efourth = wordemb(fourthword)
contextemb = paddle.layer.concat(input=[Efirst, Esecond, Ethird, Efourth])
hidden1 = paddle.layer.fc(input=contextemb,
size=hiddensize,
act=paddle.activation.Sigmoid(),
layer_attr=paddle.attr.Extra(drop_rate=0.5),
bias_attr=paddle.attr.Param(learning_rate=2),
param_attr=paddle.attr.Param(
initial_std=1. / math.sqrt(embsize * 8),
learning_rate=1))
predictword = paddle.layer.fc(input=hidden1,
size=dict_size,
bias_attr=paddle.attr.Param(learning_rate=2),
act=paddle.activation.Softmax())
def event_handler(event):
if isinstance(event, paddle.event.EndIteration):
if event.batch_id % 100 == 0:
result = trainer.test(
paddle.batch(
cluster_reader_cluster(cluster_test_file, node_id), 32))
print "Pass %d, Batch %d, Cost %f, %s, Testing metrics %s" % (
event.pass_id, event.batch_id, event.cost, event.metrics,
result.metrics)
cost = paddle.layer.classification_cost(input=predictword, label=nextword)
parameters = paddle.parameters.create(cost)
adagrad = paddle.optimizer.AdaGrad(
learning_rate=3e-3,
regularization=paddle.optimizer.L2Regularization(8e-4))
trainer = paddle.trainer.SGD(cost,
parameters,
adagrad,
is_local=not cluster_train)
trainer.train(
paddle.batch(cluster_reader_cluster(cluster_train_file, node_id), 32),
num_passes=30,
event_handler=event_handler)
if __name__ == '__main__':
main()
|
[] |
[] |
[
"PADDLE_INIT_TRAINER_COUNT",
"PADDLE_INIT_PSERVERS",
"PADDLE_INIT_PORTS_NUM_FOR_SPARSE",
"PADDLE_INIT_USE_GPU",
"PADDLE_INIT_PORTS_NUM",
"PADDLE_INIT_PORT",
"PADDLE_INIT_TRAINER_ID",
"OMPI_COMM_WORLD_RANK",
"PADDLE_CLUSTER_TRAIN",
"PADDLE_INIT_NUM_GRADIENT_SERVERS"
] |
[]
|
["PADDLE_INIT_TRAINER_COUNT", "PADDLE_INIT_PSERVERS", "PADDLE_INIT_PORTS_NUM_FOR_SPARSE", "PADDLE_INIT_USE_GPU", "PADDLE_INIT_PORTS_NUM", "PADDLE_INIT_PORT", "PADDLE_INIT_TRAINER_ID", "OMPI_COMM_WORLD_RANK", "PADDLE_CLUSTER_TRAIN", "PADDLE_INIT_NUM_GRADIENT_SERVERS"]
|
python
| 10 | 0 | |
traffic-deprecated.py
|
#!/usr/bin/python3
import threading
import requests
import time, re, os
import random
from stem import Signal
from stem.control import Controller
from bs4 import BeautifulSoup
# from dotenv import load_dotenv
# load_dotenv()
TOR_KEY = os.getenv('TOR')
#Create list of sites to run requests against from ./data/sites.txt
def site_list():
with open('./data/sites.txt') as file:
sites = [site.strip() for site in file]
return sites
#This function serves to get user_agents from text file and transform it to a list
def user_agents_list():
with open('./data/user-agents.txt') as file:
user_agents = [user_agent.strip() for user_agent in file] #lambda replacing the newline during extraction
return user_agents #new list of user_agents in list format
# def pick_user_agent():
# user_agent = random.choice(get_user_agents)
# return user_agent
def get_request():
session = requests.session()
# TO Request URL with SOCKS over TOR
session.proxies = {}
session.proxies['http']='socks5h://localhost:9050'
session.proxies['https']='socks5h://localhost:9050'
#website = str(input("Enter the website > "))
try:
# user_agent = pick_user_agent()
headers = {'User-Agent':user_agent}
url = "http://"+site
r = session.get(url, headers=headers)
except Exception as e:
print (str(e))
else:
return session.cookies
def use_requests(site):
for i in range(5):
result = get_request(site)
print ("\n Session Cookie is " + str(result))
switch_ip()
time.sleep(5)
return
#This is the controller responsible for rotating exit IP Address
def switch_ip():
with Controller.from_port(port = 9051) as controller:
controller.authenticate(password=TOR_KEY)
controller.signal(Signal.NEWNYM)
controller.close()
return
def start_requests(site):
print ("Starting request for " +site)
use_requests(site)
return
#Creating multiple threads to scale the events generated
def load_threading():
r1 = threading.Thread(target=start_requests(site))
# r1 = threading.Thread(target=start_requests, args=(random.choice(sites), 0))
# r2 = threading.Thread(target=start_requests, args=(random.choice(sites), 0))
# r3 = threading.Thread(target=start_requests, args=(random.choice(sites), 0))
# r4 = threading.Thread(target=start_requests, args=(random.choice(sites), 0))
try:
r1.start()
# r2.start()
# r3.start()
# r4.start()
except Exception as e:
print (e)
return
if __name__ == "__main__":
sites = site_list()
user_agents = user_agents_list()
while True:
site = random.choice(sites)
user_agent = random.choice(user_agents)
print (site + " " + user_agent)
time.sleep(3)
# load_threading(site, user_agent)
|
[] |
[] |
[
"TOR"
] |
[]
|
["TOR"]
|
python
| 1 | 0 | |
adminapi/request.py
|
"""Serveradmin - adminapi
Copyright (c) 2019 InnoGames GmbH
"""
import os
from hashlib import sha1
import hmac
from http.client import IncompleteRead
from socket import timeout
from ssl import SSLError
import time
import json
from base64 import b64encode
from datetime import datetime, timezone
from urllib.error import HTTPError, URLError
from urllib.parse import urlencode
from urllib.request import urlopen, Request
from paramiko.agent import Agent
from paramiko.message import Message
from paramiko.ssh_exception import SSHException, PasswordRequiredException
from adminapi import VERSION
try:
from paramiko import RSAKey, ECDSAKey, Ed25519Key
key_classes = (RSAKey, ECDSAKey, Ed25519Key)
except ImportError:
# Ed25519Key requires paramiko >= 2.2
from paramiko import RSAKey, ECDSAKey
key_classes = (RSAKey, ECDSAKey)
from adminapi.cmduser import get_auth_token
from adminapi.filters import BaseFilter
from adminapi.exceptions import (
ApiError,
AuthenticationError,
ConfigurationError,
)
def load_private_key_file(private_key_path):
"""Try to load a private ssh key from disk
We support RSA, ECDSA and Ed25519 keys and return instances of:
* paramiko.rsakey.RSAKey
* paramiko.ecdsakey.ECDSAKey
* paramiko.ed25519key.Ed25519Key (requires paramiko >= 2.2)
"""
# I don't think there is a key type independent way of doing this
for key_class in key_classes:
try:
return key_class.from_private_key_file(private_key_path)
except PasswordRequiredException as e:
raise AuthenticationError(e)
except SSHException:
continue
raise AuthenticationError('Loading private key failed')
class Settings:
base_url = os.environ.get('SERVERADMIN_BASE_URL')
auth_key_path = os.environ.get('SERVERADMIN_KEY_PATH')
auth_key = load_private_key_file(auth_key_path) if auth_key_path else None
auth_token = os.environ.get('SERVERADMIN_TOKEN') or get_auth_token()
timeout = 60.0
tries = 3
sleep_interval = 5
def calc_message(timestamp, data=None):
return str(timestamp) + (':' + data) if data else str(timestamp)
def signable(key):
"""Checks if key is able to sign the message
"""
try:
key.sign_ssh_data(b'')
return True
except SSHException:
return False
def calc_signature(private_key, timestamp, data=None):
"""Create a proof that we posess the private key
Use paramikos sign_ssh_data to sign the request body together with a
timestamp. As we send the signature and the timestamp in each request,
serveradmin can use the public key to check if we have the private key.
The timestamp is used to prevent a MITM to replay this request over and
over again. Unfortunately an attacker will still be able to replay this
message for the grace period serveradmin requires the timestamp to be in.
we can't prevent this without asking serveradmin for a nonce before every
request.
Returns the signature as base64 encoded unicode, ready for transport.
"""
message = calc_message(timestamp, data)
sig = private_key.sign_ssh_data(message.encode())
if isinstance(sig, Message):
# sign_ssh_data returns bytes for agent keys but a Message instance
# for keys loaded from a file. Fix the file loaded once:
sig = sig.asbytes()
return b64encode(sig).decode()
def calc_security_token(auth_token, timestamp, data=None):
message = calc_message(timestamp, data)
return hmac.new(
auth_token.encode('utf8'), message.encode('utf8'), sha1
).hexdigest()
def calc_app_id(auth_token):
return sha1(auth_token.encode('utf8')).hexdigest()
def send_request(endpoint, get_params=None, post_params=None):
for retry in reversed(range(Settings.tries)):
request = _build_request(endpoint, get_params, post_params)
response = _try_request(request, retry)
if response:
break
# In case of an error, sleep before trying again
time.sleep(Settings.sleep_interval)
else:
assert False # Cannot happen
return json.loads(response.read().decode())
def _build_request(endpoint, get_params, post_params):
"""Wrap request data in an urllib Request instance
Aside from preparing the get and post data for transport, this function
authenticates the request using either an auth token or ssh keys.
Returns an urllib Request.
"""
if post_params:
post_data = json.dumps(post_params, default=json_encode_extra)
else:
post_data = None
timestamp = int(time.time())
headers = {
'Content-Encoding': 'application/x-json',
'X-Timestamp': str(timestamp),
'X-API-Version': '.'.join(str(v) for v in VERSION),
}
if Settings.auth_key:
headers['X-PublicKeys'] = Settings.auth_key.get_base64()
headers['X-Signatures'] = calc_signature(
Settings.auth_key, timestamp, post_data
)
elif Settings.auth_token:
headers['X-Application'] = calc_app_id(Settings.auth_token)
headers['X-SecurityToken'] = calc_security_token(
Settings.auth_token, timestamp, post_data
)
else:
try:
agent = Agent()
agent_keys = filter(signable, agent.get_keys())
except SSHException:
raise AuthenticationError('No token and ssh agent found')
if not agent_keys:
raise AuthenticationError('No token and ssh agent keys found')
key_signatures = {
key.get_base64(): calc_signature(key, timestamp, post_data)
for key in agent_keys
}
headers['X-PublicKeys'] = ','.join(key_signatures.keys())
headers['X-Signatures'] = ','.join(key_signatures.values())
if not Settings.base_url:
raise ConfigurationError(
'Environment variable SERVERADMIN_BASE_URL not set'
)
url = Settings.base_url + endpoint
if get_params:
url += '?' + urlencode(get_params)
if post_data:
post_data = post_data.encode('utf8')
return Request(url, post_data, headers)
def _try_request(request, retry=False):
try:
return urlopen(request, timeout=Settings.timeout)
except HTTPError as error:
if error.code >= 500:
if retry:
return None
elif error.code >= 400:
content_type = error.info()['Content-Type']
message = str(error)
if content_type == 'application/x-json':
payload = json.loads(error.read().decode())
message = payload['error']['message']
raise ApiError(message, status_code=error.code)
raise
except (SSLError, URLError, timeout, IncompleteRead):
if retry:
return None
raise
def json_encode_extra(obj):
if isinstance(obj, BaseFilter):
return obj.serialize()
if isinstance(obj, datetime):
# Assume naive datetime objects passed in are in UTC. This makes sense
# for python as even datetime.datetime.utcnow() returns naive datetimes
if obj.tzinfo is None:
obj = obj.replace(tzinfo=timezone.utc)
return obj.astimezone(timezone.utc).strftime('%Y-%m-%d %H:%M:%S%z')
if isinstance(obj, set):
return list(obj)
return str(obj)
|
[] |
[] |
[
"SERVERADMIN_KEY_PATH",
"SERVERADMIN_TOKEN",
"SERVERADMIN_BASE_URL"
] |
[]
|
["SERVERADMIN_KEY_PATH", "SERVERADMIN_TOKEN", "SERVERADMIN_BASE_URL"]
|
python
| 3 | 0 | |
common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hive.conf;
import com.google.common.base.Joiner;
import com.google.common.collect.ImmutableSet;
import com.google.common.collect.Iterables;
import org.apache.commons.lang.StringUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hive.common.FileUtils;
import org.apache.hadoop.hive.common.classification.InterfaceAudience;
import org.apache.hadoop.hive.common.classification.InterfaceAudience.LimitedPrivate;
import org.apache.hadoop.hive.common.type.TimestampTZUtil;
import org.apache.hadoop.hive.common.ZooKeeperHiveHelper;
import org.apache.hadoop.hive.conf.Validator.PatternSet;
import org.apache.hadoop.hive.conf.Validator.RangeValidator;
import org.apache.hadoop.hive.conf.Validator.RatioValidator;
import org.apache.hadoop.hive.conf.Validator.SizeValidator;
import org.apache.hadoop.hive.conf.Validator.StringSet;
import org.apache.hadoop.hive.conf.Validator.TimeValidator;
import org.apache.hadoop.hive.conf.Validator.WritableDirectoryValidator;
import org.apache.hadoop.hive.shims.Utils;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapreduce.lib.input.CombineFileInputFormat;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hive.common.HiveCompat;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import javax.security.auth.login.LoginException;
import java.io.ByteArrayOutputStream;
import java.io.File;
import java.io.IOException;
import java.io.InputStream;
import java.io.PrintStream;
import java.io.UnsupportedEncodingException;
import java.net.URI;
import java.net.URL;
import java.net.URLDecoder;
import java.net.URLEncoder;
import java.nio.charset.StandardCharsets;
import java.time.ZoneId;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
import java.util.LinkedHashSet;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Properties;
import java.util.Set;
import java.util.concurrent.TimeUnit;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
/**
* Hive Configuration.
*/
public class HiveConf extends Configuration {
protected String hiveJar;
protected Properties origProp;
protected String auxJars;
private static final Logger LOG = LoggerFactory.getLogger(HiveConf.class);
private static boolean loadMetastoreConfig = false;
private static boolean loadHiveServer2Config = false;
private static URL hiveDefaultURL = null;
private static URL hiveSiteURL = null;
private static URL hivemetastoreSiteUrl = null;
private static URL hiveServer2SiteUrl = null;
private static byte[] confVarByteArray = null;
private static final Map<String, ConfVars> vars = new HashMap<String, ConfVars>();
private static final Map<String, ConfVars> metaConfs = new HashMap<String, ConfVars>();
private final List<String> restrictList = new ArrayList<String>();
private final Set<String> hiddenSet = new HashSet<String>();
private final List<String> rscList = new ArrayList<>();
private Pattern modWhiteListPattern = null;
private volatile boolean isSparkConfigUpdated = false;
private static final int LOG_PREFIX_LENGTH = 64;
public boolean getSparkConfigUpdated() {
return isSparkConfigUpdated;
}
public void setSparkConfigUpdated(boolean isSparkConfigUpdated) {
this.isSparkConfigUpdated = isSparkConfigUpdated;
}
public interface EncoderDecoder<K, V> {
V encode(K key);
K decode(V value);
}
public static class URLEncoderDecoder implements EncoderDecoder<String, String> {
@Override
public String encode(String key) {
try {
return URLEncoder.encode(key, StandardCharsets.UTF_8.name());
} catch (UnsupportedEncodingException e) {
return key;
}
}
@Override
public String decode(String value) {
try {
return URLDecoder.decode(value, StandardCharsets.UTF_8.name());
} catch (UnsupportedEncodingException e) {
return value;
}
}
}
public static class EncoderDecoderFactory {
public static final URLEncoderDecoder URL_ENCODER_DECODER = new URLEncoderDecoder();
}
static {
ClassLoader classLoader = Thread.currentThread().getContextClassLoader();
if (classLoader == null) {
classLoader = HiveConf.class.getClassLoader();
}
hiveDefaultURL = classLoader.getResource("hive-default.xml");
// Look for hive-site.xml on the CLASSPATH and log its location if found.
hiveSiteURL = findConfigFile(classLoader, "hive-site.xml", true);
hivemetastoreSiteUrl = findConfigFile(classLoader, "hivemetastore-site.xml", false);
hiveServer2SiteUrl = findConfigFile(classLoader, "hiveserver2-site.xml", false);
for (ConfVars confVar : ConfVars.values()) {
vars.put(confVar.varname, confVar);
}
Set<String> llapDaemonConfVarsSetLocal = new LinkedHashSet<>();
populateLlapDaemonVarsSet(llapDaemonConfVarsSetLocal);
llapDaemonVarsSet = Collections.unmodifiableSet(llapDaemonConfVarsSetLocal);
}
private static URL findConfigFile(ClassLoader classLoader, String name, boolean doLog) {
URL result = classLoader.getResource(name);
if (result == null) {
String confPath = System.getenv("HIVE_CONF_DIR");
result = checkConfigFile(new File(confPath, name));
if (result == null) {
String homePath = System.getenv("HIVE_HOME");
String nameInConf = "conf" + File.separator + name;
result = checkConfigFile(new File(homePath, nameInConf));
if (result == null) {
URI jarUri = null;
try {
// Handle both file:// and jar:<url>!{entry} in the case of shaded hive libs
URL sourceUrl = HiveConf.class.getProtectionDomain().getCodeSource().getLocation();
jarUri = sourceUrl.getProtocol().equalsIgnoreCase("jar") ? new URI(sourceUrl.getPath()) : sourceUrl.toURI();
} catch (Throwable e) {
LOG.info("Cannot get jar URI", e);
System.err.println("Cannot get jar URI: " + e.getMessage());
}
// From the jar file, the parent is /lib folder
File parent = new File(jarUri).getParentFile();
if (parent != null) {
result = checkConfigFile(new File(parent.getParentFile(), nameInConf));
}
}
}
}
if (doLog) {
LOG.info("Found configuration file {}", result);
}
return result;
}
private static URL checkConfigFile(File f) {
try {
return (f.exists() && f.isFile()) ? f.toURI().toURL() : null;
} catch (Throwable e) {
LOG.info("Error looking for config {}", f, e);
System.err.println("Error looking for config " + f + ": " + e.getMessage());
return null;
}
}
@InterfaceAudience.Private
public static final String PREFIX_LLAP = "llap.";
@InterfaceAudience.Private
public static final String PREFIX_HIVE_LLAP = "hive.llap.";
/**
* Metastore related options that the db is initialized against. When a conf
* var in this is list is changed, the metastore instance for the CLI will
* be recreated so that the change will take effect.
*/
public static final HiveConf.ConfVars[] metaVars = {
HiveConf.ConfVars.METASTOREWAREHOUSE,
HiveConf.ConfVars.REPLDIR,
HiveConf.ConfVars.METASTOREURIS,
HiveConf.ConfVars.METASTORESELECTION,
HiveConf.ConfVars.METASTORE_SERVER_PORT,
HiveConf.ConfVars.METASTORETHRIFTCONNECTIONRETRIES,
HiveConf.ConfVars.METASTORETHRIFTFAILURERETRIES,
HiveConf.ConfVars.METASTORE_CLIENT_CONNECT_RETRY_DELAY,
HiveConf.ConfVars.METASTORE_CLIENT_SOCKET_TIMEOUT,
HiveConf.ConfVars.METASTORE_CLIENT_SOCKET_LIFETIME,
HiveConf.ConfVars.METASTOREPWD,
HiveConf.ConfVars.METASTORECONNECTURLHOOK,
HiveConf.ConfVars.METASTORECONNECTURLKEY,
HiveConf.ConfVars.METASTORESERVERMINTHREADS,
HiveConf.ConfVars.METASTORESERVERMAXTHREADS,
HiveConf.ConfVars.METASTORE_TCP_KEEP_ALIVE,
HiveConf.ConfVars.METASTORE_INT_ORIGINAL,
HiveConf.ConfVars.METASTORE_INT_ARCHIVED,
HiveConf.ConfVars.METASTORE_INT_EXTRACTED,
HiveConf.ConfVars.METASTORE_KERBEROS_KEYTAB_FILE,
HiveConf.ConfVars.METASTORE_KERBEROS_PRINCIPAL,
HiveConf.ConfVars.METASTORE_USE_THRIFT_SASL,
HiveConf.ConfVars.METASTORE_TOKEN_SIGNATURE,
HiveConf.ConfVars.METASTORE_CACHE_PINOBJTYPES,
HiveConf.ConfVars.METASTORE_CONNECTION_POOLING_TYPE,
HiveConf.ConfVars.METASTORE_VALIDATE_TABLES,
HiveConf.ConfVars.METASTORE_DATANUCLEUS_INIT_COL_INFO,
HiveConf.ConfVars.METASTORE_VALIDATE_COLUMNS,
HiveConf.ConfVars.METASTORE_VALIDATE_CONSTRAINTS,
HiveConf.ConfVars.METASTORE_STORE_MANAGER_TYPE,
HiveConf.ConfVars.METASTORE_AUTO_CREATE_ALL,
HiveConf.ConfVars.METASTORE_TRANSACTION_ISOLATION,
HiveConf.ConfVars.METASTORE_CACHE_LEVEL2,
HiveConf.ConfVars.METASTORE_CACHE_LEVEL2_TYPE,
HiveConf.ConfVars.METASTORE_IDENTIFIER_FACTORY,
HiveConf.ConfVars.METASTORE_PLUGIN_REGISTRY_BUNDLE_CHECK,
HiveConf.ConfVars.METASTORE_AUTHORIZATION_STORAGE_AUTH_CHECKS,
HiveConf.ConfVars.METASTORE_BATCH_RETRIEVE_MAX,
HiveConf.ConfVars.METASTORE_EVENT_LISTENERS,
HiveConf.ConfVars.METASTORE_TRANSACTIONAL_EVENT_LISTENERS,
HiveConf.ConfVars.METASTORE_EVENT_CLEAN_FREQ,
HiveConf.ConfVars.METASTORE_EVENT_EXPIRY_DURATION,
HiveConf.ConfVars.METASTORE_EVENT_MESSAGE_FACTORY,
HiveConf.ConfVars.METASTORE_FILTER_HOOK,
HiveConf.ConfVars.METASTORE_RAW_STORE_IMPL,
HiveConf.ConfVars.METASTORE_END_FUNCTION_LISTENERS,
HiveConf.ConfVars.METASTORE_PART_INHERIT_TBL_PROPS,
HiveConf.ConfVars.METASTORE_BATCH_RETRIEVE_OBJECTS_MAX,
HiveConf.ConfVars.METASTORE_INIT_HOOKS,
HiveConf.ConfVars.METASTORE_PRE_EVENT_LISTENERS,
HiveConf.ConfVars.HMSHANDLERATTEMPTS,
HiveConf.ConfVars.HMSHANDLERINTERVAL,
HiveConf.ConfVars.HMSHANDLERFORCERELOADCONF,
HiveConf.ConfVars.METASTORE_PARTITION_NAME_WHITELIST_PATTERN,
HiveConf.ConfVars.METASTORE_ORM_RETRIEVE_MAPNULLS_AS_EMPTY_STRINGS,
HiveConf.ConfVars.METASTORE_DISALLOW_INCOMPATIBLE_COL_TYPE_CHANGES,
HiveConf.ConfVars.USERS_IN_ADMIN_ROLE,
HiveConf.ConfVars.HIVE_AUTHORIZATION_MANAGER,
HiveConf.ConfVars.HIVE_TXN_MANAGER,
HiveConf.ConfVars.HIVE_TXN_TIMEOUT,
HiveConf.ConfVars.HIVE_TXN_OPERATIONAL_PROPERTIES,
HiveConf.ConfVars.HIVE_TXN_HEARTBEAT_THREADPOOL_SIZE,
HiveConf.ConfVars.HIVE_TXN_MAX_OPEN_BATCH,
HiveConf.ConfVars.HIVE_TXN_RETRYABLE_SQLEX_REGEX,
HiveConf.ConfVars.HIVE_METASTORE_STATS_NDV_TUNER,
HiveConf.ConfVars.HIVE_METASTORE_STATS_NDV_DENSITY_FUNCTION,
HiveConf.ConfVars.METASTORE_AGGREGATE_STATS_CACHE_ENABLED,
HiveConf.ConfVars.METASTORE_AGGREGATE_STATS_CACHE_SIZE,
HiveConf.ConfVars.METASTORE_AGGREGATE_STATS_CACHE_MAX_PARTITIONS,
HiveConf.ConfVars.METASTORE_AGGREGATE_STATS_CACHE_FPP,
HiveConf.ConfVars.METASTORE_AGGREGATE_STATS_CACHE_MAX_VARIANCE,
HiveConf.ConfVars.METASTORE_AGGREGATE_STATS_CACHE_TTL,
HiveConf.ConfVars.METASTORE_AGGREGATE_STATS_CACHE_MAX_WRITER_WAIT,
HiveConf.ConfVars.METASTORE_AGGREGATE_STATS_CACHE_MAX_READER_WAIT,
HiveConf.ConfVars.METASTORE_AGGREGATE_STATS_CACHE_MAX_FULL,
HiveConf.ConfVars.METASTORE_AGGREGATE_STATS_CACHE_CLEAN_UNTIL,
HiveConf.ConfVars.METASTORE_FASTPATH,
HiveConf.ConfVars.METASTORE_HBASE_FILE_METADATA_THREADS,
HiveConf.ConfVars.METASTORE_WM_DEFAULT_POOL_SIZE
};
/**
* User configurable Metastore vars
*/
public static final HiveConf.ConfVars[] metaConfVars = {
HiveConf.ConfVars.METASTORE_TRY_DIRECT_SQL,
HiveConf.ConfVars.METASTORE_TRY_DIRECT_SQL_DDL,
HiveConf.ConfVars.METASTORE_CLIENT_SOCKET_TIMEOUT,
HiveConf.ConfVars.METASTORE_PARTITION_NAME_WHITELIST_PATTERN,
HiveConf.ConfVars.METASTORE_CAPABILITY_CHECK,
HiveConf.ConfVars.METASTORE_DISALLOW_INCOMPATIBLE_COL_TYPE_CHANGES
};
static {
for (ConfVars confVar : metaConfVars) {
metaConfs.put(confVar.varname, confVar);
}
}
public static final String HIVE_LLAP_DAEMON_SERVICE_PRINCIPAL_NAME = "hive.llap.daemon.service.principal";
public static final String HIVE_SERVER2_AUTHENTICATION_LDAP_USERMEMBERSHIPKEY_NAME =
"hive.server2.authentication.ldap.userMembershipKey";
public static final String HIVE_SPARK_SUBMIT_CLIENT = "spark-submit";
public static final String HIVE_SPARK_LAUNCHER_CLIENT = "spark-launcher";
/**
* dbVars are the parameters can be set per database. If these
* parameters are set as a database property, when switching to that
* database, the HiveConf variable will be changed. The change of these
* parameters will effectively change the DFS and MapReduce clusters
* for different databases.
*/
public static final HiveConf.ConfVars[] dbVars = {
HiveConf.ConfVars.HADOOPBIN,
HiveConf.ConfVars.METASTOREWAREHOUSE,
HiveConf.ConfVars.SCRATCHDIR
};
/**
* encoded parameter values are ;-) encoded. Use decoder to get ;-) decoded string
*/
public static final HiveConf.ConfVars[] ENCODED_CONF = {
ConfVars.HIVEQUERYSTRING
};
/**
* Variables used by LLAP daemons.
* TODO: Eventually auto-populate this based on prefixes. The conf variables
* will need to be renamed for this.
*/
private static final Set<String> llapDaemonVarsSet;
private static void populateLlapDaemonVarsSet(Set<String> llapDaemonVarsSetLocal) {
llapDaemonVarsSetLocal.add(ConfVars.LLAP_IO_ENABLED.varname);
llapDaemonVarsSetLocal.add(ConfVars.LLAP_IO_MEMORY_MODE.varname);
llapDaemonVarsSetLocal.add(ConfVars.LLAP_ALLOCATOR_MIN_ALLOC.varname);
llapDaemonVarsSetLocal.add(ConfVars.LLAP_ALLOCATOR_MAX_ALLOC.varname);
llapDaemonVarsSetLocal.add(ConfVars.LLAP_ALLOCATOR_ARENA_COUNT.varname);
llapDaemonVarsSetLocal.add(ConfVars.LLAP_IO_MEMORY_MAX_SIZE.varname);
llapDaemonVarsSetLocal.add(ConfVars.LLAP_ALLOCATOR_DIRECT.varname);
llapDaemonVarsSetLocal.add(ConfVars.LLAP_USE_LRFU.varname);
llapDaemonVarsSetLocal.add(ConfVars.LLAP_LRFU_LAMBDA.varname);
llapDaemonVarsSetLocal.add(ConfVars.LLAP_CACHE_ALLOW_SYNTHETIC_FILEID.varname);
llapDaemonVarsSetLocal.add(ConfVars.LLAP_IO_USE_FILEID_PATH.varname);
llapDaemonVarsSetLocal.add(ConfVars.LLAP_IO_DECODING_METRICS_PERCENTILE_INTERVALS.varname);
llapDaemonVarsSetLocal.add(ConfVars.LLAP_ORC_ENABLE_TIME_COUNTERS.varname);
llapDaemonVarsSetLocal.add(ConfVars.LLAP_IO_THREADPOOL_SIZE.varname);
llapDaemonVarsSetLocal.add(ConfVars.LLAP_KERBEROS_PRINCIPAL.varname);
llapDaemonVarsSetLocal.add(ConfVars.LLAP_KERBEROS_KEYTAB_FILE.varname);
llapDaemonVarsSetLocal.add(ConfVars.LLAP_ZKSM_ZK_CONNECTION_STRING.varname);
llapDaemonVarsSetLocal.add(ConfVars.LLAP_SECURITY_ACL.varname);
llapDaemonVarsSetLocal.add(ConfVars.LLAP_MANAGEMENT_ACL.varname);
llapDaemonVarsSetLocal.add(ConfVars.LLAP_SECURITY_ACL_DENY.varname);
llapDaemonVarsSetLocal.add(ConfVars.LLAP_MANAGEMENT_ACL_DENY.varname);
llapDaemonVarsSetLocal.add(ConfVars.LLAP_DELEGATION_TOKEN_LIFETIME.varname);
llapDaemonVarsSetLocal.add(ConfVars.LLAP_MANAGEMENT_RPC_PORT.varname);
llapDaemonVarsSetLocal.add(ConfVars.LLAP_WEB_AUTO_AUTH.varname);
llapDaemonVarsSetLocal.add(ConfVars.LLAP_DAEMON_RPC_NUM_HANDLERS.varname);
llapDaemonVarsSetLocal.add(ConfVars.LLAP_DAEMON_WORK_DIRS.varname);
llapDaemonVarsSetLocal.add(ConfVars.LLAP_DAEMON_YARN_SHUFFLE_PORT.varname);
llapDaemonVarsSetLocal.add(ConfVars.LLAP_DAEMON_YARN_CONTAINER_MB.varname);
llapDaemonVarsSetLocal.add(ConfVars.LLAP_DAEMON_SHUFFLE_DIR_WATCHER_ENABLED.varname);
llapDaemonVarsSetLocal.add(ConfVars.LLAP_DAEMON_AM_LIVENESS_HEARTBEAT_INTERVAL_MS.varname);
llapDaemonVarsSetLocal.add(ConfVars.LLAP_DAEMON_AM_LIVENESS_CONNECTION_TIMEOUT_MS.varname);
llapDaemonVarsSetLocal.add(ConfVars.LLAP_DAEMON_AM_LIVENESS_CONNECTION_SLEEP_BETWEEN_RETRIES_MS.varname);
llapDaemonVarsSetLocal.add(ConfVars.LLAP_DAEMON_NUM_EXECUTORS.varname);
llapDaemonVarsSetLocal.add(ConfVars.LLAP_DAEMON_RPC_PORT.varname);
llapDaemonVarsSetLocal.add(ConfVars.LLAP_DAEMON_MEMORY_PER_INSTANCE_MB.varname);
llapDaemonVarsSetLocal.add(ConfVars.LLAP_DAEMON_XMX_HEADROOM.varname);
llapDaemonVarsSetLocal.add(ConfVars.LLAP_DAEMON_VCPUS_PER_INSTANCE.varname);
llapDaemonVarsSetLocal.add(ConfVars.LLAP_DAEMON_NUM_FILE_CLEANER_THREADS.varname);
llapDaemonVarsSetLocal.add(ConfVars.LLAP_FILE_CLEANUP_DELAY_SECONDS.varname);
llapDaemonVarsSetLocal.add(ConfVars.LLAP_DAEMON_SERVICE_HOSTS.varname);
llapDaemonVarsSetLocal.add(ConfVars.LLAP_DAEMON_SERVICE_REFRESH_INTERVAL.varname);
llapDaemonVarsSetLocal.add(ConfVars.LLAP_ALLOW_PERMANENT_FNS.varname);
llapDaemonVarsSetLocal.add(ConfVars.LLAP_DAEMON_DOWNLOAD_PERMANENT_FNS.varname);
llapDaemonVarsSetLocal.add(ConfVars.LLAP_DAEMON_TASK_SCHEDULER_WAIT_QUEUE_SIZE.varname);
llapDaemonVarsSetLocal.add(ConfVars.LLAP_DAEMON_WAIT_QUEUE_COMPARATOR_CLASS_NAME.varname);
llapDaemonVarsSetLocal.add(ConfVars.LLAP_DAEMON_TASK_SCHEDULER_ENABLE_PREEMPTION.varname);
llapDaemonVarsSetLocal.add(ConfVars.LLAP_DAEMON_TASK_PREEMPTION_METRICS_INTERVALS.varname);
llapDaemonVarsSetLocal.add(ConfVars.LLAP_DAEMON_WEB_PORT.varname);
llapDaemonVarsSetLocal.add(ConfVars.LLAP_DAEMON_WEB_SSL.varname);
llapDaemonVarsSetLocal.add(ConfVars.LLAP_DAEMON_CONTAINER_ID.varname);
llapDaemonVarsSetLocal.add(ConfVars.LLAP_VALIDATE_ACLS.varname);
llapDaemonVarsSetLocal.add(ConfVars.LLAP_DAEMON_LOGGER.varname);
llapDaemonVarsSetLocal.add(ConfVars.LLAP_DAEMON_AM_USE_FQDN.varname);
llapDaemonVarsSetLocal.add(ConfVars.LLAP_OUTPUT_FORMAT_ARROW.varname);
}
/**
* Get a set containing configuration parameter names used by LLAP Server isntances
* @return an unmodifiable set containing llap ConfVars
*/
public static final Set<String> getLlapDaemonConfVars() {
return llapDaemonVarsSet;
}
/**
* ConfVars.
*
* These are the default configuration properties for Hive. Each HiveConf
* object is initialized as follows:
*
* 1) Hadoop configuration properties are applied.
* 2) ConfVar properties with non-null values are overlayed.
* 3) hive-site.xml properties are overlayed.
* 4) System Properties and Manual Overrides are overlayed.
*
* WARNING: think twice before adding any Hadoop configuration properties
* with non-null values to this list as they will override any values defined
* in the underlying Hadoop configuration.
*/
public static enum ConfVars {
// QL execution stuff
SCRIPTWRAPPER("hive.exec.script.wrapper", null, ""),
PLAN("hive.exec.plan", "", ""),
STAGINGDIR("hive.exec.stagingdir", ".hive-staging",
"Directory name that will be created inside table locations in order to support HDFS encryption. " +
"This is replaces ${hive.exec.scratchdir} for query results with the exception of read-only tables. " +
"In all cases ${hive.exec.scratchdir} is still used for other temporary files, such as job plans."),
SCRATCHDIR("hive.exec.scratchdir", "/tmp/hive",
"HDFS root scratch dir for Hive jobs which gets created with write all (733) permission. " +
"For each connecting user, an HDFS scratch dir: ${hive.exec.scratchdir}/<username> is created, " +
"with ${hive.scratch.dir.permission}."),
REPLDIR("hive.repl.rootdir","/user/${system:user.name}/repl/",
"HDFS root dir for all replication dumps."),
REPLCMENABLED("hive.repl.cm.enabled", false,
"Turn on ChangeManager, so delete files will go to cmrootdir."),
REPLCMDIR("hive.repl.cmrootdir","/user/${system:user.name}/cmroot/",
"Root dir for ChangeManager, used for deleted files."),
REPLCMRETIAN("hive.repl.cm.retain","24h",
new TimeValidator(TimeUnit.HOURS),
"Time to retain removed files in cmrootdir."),
REPLCMINTERVAL("hive.repl.cm.interval","3600s",
new TimeValidator(TimeUnit.SECONDS),
"Inteval for cmroot cleanup thread."),
REPL_FUNCTIONS_ROOT_DIR("hive.repl.replica.functions.root.dir","/user/${system:user.name}/repl/functions/",
"Root directory on the replica warehouse where the repl sub-system will store jars from the primary warehouse"),
REPL_APPROX_MAX_LOAD_TASKS("hive.repl.approx.max.load.tasks", 10000,
"Provide an approximation of the maximum number of tasks that should be executed before \n"
+ "dynamically generating the next set of tasks. The number is approximate as Hive \n"
+ "will stop at a slightly higher number, the reason being some events might lead to a \n"
+ "task increment that would cross the specified limit."),
REPL_PARTITIONS_DUMP_PARALLELISM("hive.repl.partitions.dump.parallelism",100,
"Number of threads that will be used to dump partition data information during repl dump."),
REPL_DUMPDIR_CLEAN_FREQ("hive.repl.dumpdir.clean.freq", "0s",
new TimeValidator(TimeUnit.SECONDS),
"Frequency at which timer task runs to purge expired dump dirs."),
REPL_DUMPDIR_TTL("hive.repl.dumpdir.ttl", "7d",
new TimeValidator(TimeUnit.DAYS),
"TTL of dump dirs before cleanup."),
REPL_DUMP_METADATA_ONLY("hive.repl.dump.metadata.only", false,
"Indicates whether replication dump only metadata information or data + metadata. \n"
+ "This config makes hive.repl.include.external.tables config ineffective."),
REPL_BOOTSTRAP_ACID_TABLES("hive.repl.bootstrap.acid.tables", false,
"Indicates if repl dump should bootstrap the information about ACID tables along with \n"
+ "incremental dump for replication. It is recommended to keep this config parameter \n"
+ "as false always and should be set to true only via WITH clause of REPL DUMP \n"
+ "command. It should be set to true only once for incremental repl dump on \n"
+ "each of the existing replication policies after enabling acid tables replication."),
REPL_BOOTSTRAP_DUMP_OPEN_TXN_TIMEOUT("hive.repl.bootstrap.dump.open.txn.timeout", "1h",
new TimeValidator(TimeUnit.HOURS),
"Indicates the timeout for all transactions which are opened before triggering bootstrap REPL DUMP. "
+ "If these open transactions are not closed within the timeout value, then REPL DUMP will "
+ "forcefully abort those transactions and continue with bootstrap dump."),
//https://hadoop.apache.org/docs/stable/hadoop-project-dist/hadoop-hdfs/TransparentEncryption.html#Running_as_the_superuser
REPL_ADD_RAW_RESERVED_NAMESPACE("hive.repl.add.raw.reserved.namespace", false,
"For TDE with same encryption keys on source and target, allow Distcp super user to access \n"
+ "the raw bytes from filesystem without decrypting on source and then encrypting on target."),
REPL_INCLUDE_EXTERNAL_TABLES("hive.repl.include.external.tables", false,
"Indicates if repl dump should include information about external tables. It should be \n"
+ "used in conjunction with 'hive.repl.dump.metadata.only' set to false. if 'hive.repl.dump.metadata.only' \n"
+ " is set to true then this config parameter has no effect as external table meta data is flushed \n"
+ " always by default. If this config parameter is enabled on an on-going replication policy which is in\n"
+ " incremental phase, then need to set 'hive.repl.bootstrap.external.tables' to true for the first \n"
+ " repl dump to bootstrap all external tables."),
REPL_BOOTSTRAP_EXTERNAL_TABLES("hive.repl.bootstrap.external.tables", false,
"Indicates if repl dump should bootstrap the information about external tables along with incremental \n"
+ "dump for replication. It is recommended to keep this config parameter as false always and should be \n"
+ "set to true only via WITH clause of REPL DUMP command. It should be used in conjunction with \n"
+ "'hive.repl.include.external.tables' when sets to true. If 'hive.repl.include.external.tables' is \n"
+ "set to false, then this config parameter has no effect. It should be set to true only once for \n"
+ "incremental repl dump on each existing replication policy after enabling external tables replication."),
REPL_ENABLE_MOVE_OPTIMIZATION("hive.repl.enable.move.optimization", false,
"If its set to true, REPL LOAD copies data files directly to the target table/partition location \n"
+ "instead of copying to staging directory first and then move to target location. This optimizes \n"
+ " the REPL LOAD on object data stores such as S3 or WASB where creating a directory and move \n"
+ " files are costly operations. In file system like HDFS where move operation is atomic, this \n"
+ " optimization should not be enabled as it may lead to inconsistent data read for non acid tables."),
REPL_MOVE_OPTIMIZED_FILE_SCHEMES("hive.repl.move.optimized.scheme", "s3a, wasb",
"Comma separated list of schemes for which move optimization will be enabled during repl load. \n"
+ "This configuration overrides the value set using REPL_ENABLE_MOVE_OPTIMIZATION for the given schemes. \n"
+ " Schemes of the file system which does not support atomic move (rename) can be specified here to \n "
+ " speed up the repl load operation. In file system like HDFS where move operation is atomic, this \n"
+ " optimization should not be enabled as it may lead to inconsistent data read for non acid tables."),
REPL_EXTERNAL_TABLE_BASE_DIR("hive.repl.replica.external.table.base.dir", "/",
"This is the base directory on the target/replica warehouse under which data for "
+ "external tables is stored. This is relative base path and hence prefixed to the source "
+ "external table path on target cluster."),
LOCALSCRATCHDIR("hive.exec.local.scratchdir",
"${system:java.io.tmpdir}" + File.separator + "${system:user.name}",
"Local scratch space for Hive jobs"),
DOWNLOADED_RESOURCES_DIR("hive.downloaded.resources.dir",
"${system:java.io.tmpdir}" + File.separator + "${hive.session.id}_resources",
"Temporary local directory for added resources in the remote file system."),
SCRATCHDIRPERMISSION("hive.scratch.dir.permission", "700",
"The permission for the user specific scratch directories that get created."),
SUBMITVIACHILD("hive.exec.submitviachild", false, ""),
SUBMITLOCALTASKVIACHILD("hive.exec.submit.local.task.via.child", true,
"Determines whether local tasks (typically mapjoin hashtable generation phase) runs in \n" +
"separate JVM (true recommended) or not. \n" +
"Avoids the overhead of spawning new JVM, but can lead to out-of-memory issues."),
SCRIPTERRORLIMIT("hive.exec.script.maxerrsize", 100000,
"Maximum number of bytes a script is allowed to emit to standard error (per map-reduce task). \n" +
"This prevents runaway scripts from filling logs partitions to capacity"),
ALLOWPARTIALCONSUMP("hive.exec.script.allow.partial.consumption", false,
"When enabled, this option allows a user script to exit successfully without consuming \n" +
"all the data from the standard input."),
STREAMREPORTERPERFIX("stream.stderr.reporter.prefix", "reporter:",
"Streaming jobs that log to standard error with this prefix can log counter or status information."),
STREAMREPORTERENABLED("stream.stderr.reporter.enabled", true,
"Enable consumption of status and counter messages for streaming jobs."),
COMPRESSRESULT("hive.exec.compress.output", false,
"This controls whether the final outputs of a query (to a local/HDFS file or a Hive table) is compressed. \n" +
"The compression codec and other options are determined from Hadoop config variables mapred.output.compress*"),
COMPRESSINTERMEDIATE("hive.exec.compress.intermediate", false,
"This controls whether intermediate files produced by Hive between multiple map-reduce jobs are compressed. \n" +
"The compression codec and other options are determined from Hadoop config variables mapred.output.compress*"),
COMPRESSINTERMEDIATECODEC("hive.intermediate.compression.codec", "", ""),
COMPRESSINTERMEDIATETYPE("hive.intermediate.compression.type", "", ""),
BYTESPERREDUCER("hive.exec.reducers.bytes.per.reducer", (long) (256 * 1000 * 1000),
"size per reducer.The default is 256Mb, i.e if the input size is 1G, it will use 4 reducers."),
MAXREDUCERS("hive.exec.reducers.max", 1009,
"max number of reducers will be used. If the one specified in the configuration parameter mapred.reduce.tasks is\n" +
"negative, Hive will use this one as the max number of reducers when automatically determine number of reducers."),
PREEXECHOOKS("hive.exec.pre.hooks", "",
"Comma-separated list of pre-execution hooks to be invoked for each statement. \n" +
"A pre-execution hook is specified as the name of a Java class which implements the \n" +
"org.apache.hadoop.hive.ql.hooks.ExecuteWithHookContext interface."),
POSTEXECHOOKS("hive.exec.post.hooks", "",
"Comma-separated list of post-execution hooks to be invoked for each statement. \n" +
"A post-execution hook is specified as the name of a Java class which implements the \n" +
"org.apache.hadoop.hive.ql.hooks.ExecuteWithHookContext interface."),
ONFAILUREHOOKS("hive.exec.failure.hooks", "",
"Comma-separated list of on-failure hooks to be invoked for each statement. \n" +
"An on-failure hook is specified as the name of Java class which implements the \n" +
"org.apache.hadoop.hive.ql.hooks.ExecuteWithHookContext interface."),
QUERYREDACTORHOOKS("hive.exec.query.redactor.hooks", "",
"Comma-separated list of hooks to be invoked for each query which can \n" +
"tranform the query before it's placed in the job.xml file. Must be a Java class which \n" +
"extends from the org.apache.hadoop.hive.ql.hooks.Redactor abstract class."),
CLIENTSTATSPUBLISHERS("hive.client.stats.publishers", "",
"Comma-separated list of statistics publishers to be invoked on counters on each job. \n" +
"A client stats publisher is specified as the name of a Java class which implements the \n" +
"org.apache.hadoop.hive.ql.stats.ClientStatsPublisher interface."),
ATSHOOKQUEUECAPACITY("hive.ats.hook.queue.capacity", 64,
"Queue size for the ATS Hook executor. If the number of outstanding submissions \n" +
"to the ATS executor exceed this amount, the Hive ATS Hook will not try to log queries to ATS."),
EXECPARALLEL("hive.exec.parallel", false, "Whether to execute jobs in parallel"),
EXECPARALLETHREADNUMBER("hive.exec.parallel.thread.number", 8,
"How many jobs at most can be executed in parallel"),
HIVESPECULATIVEEXECREDUCERS("hive.mapred.reduce.tasks.speculative.execution", true,
"Whether speculative execution for reducers should be turned on. "),
HIVECOUNTERSPULLINTERVAL("hive.exec.counters.pull.interval", 1000L,
"The interval with which to poll the JobTracker for the counters the running job. \n" +
"The smaller it is the more load there will be on the jobtracker, the higher it is the less granular the caught will be."),
DYNAMICPARTITIONING("hive.exec.dynamic.partition", true,
"Whether or not to allow dynamic partitions in DML/DDL."),
DYNAMICPARTITIONINGMODE("hive.exec.dynamic.partition.mode", "strict",
"In strict mode, the user must specify at least one static partition\n" +
"in case the user accidentally overwrites all partitions.\n" +
"In nonstrict mode all partitions are allowed to be dynamic."),
DYNAMICPARTITIONMAXPARTS("hive.exec.max.dynamic.partitions", 1000,
"Maximum number of dynamic partitions allowed to be created in total."),
DYNAMICPARTITIONMAXPARTSPERNODE("hive.exec.max.dynamic.partitions.pernode", 100,
"Maximum number of dynamic partitions allowed to be created in each mapper/reducer node."),
MAXCREATEDFILES("hive.exec.max.created.files", 100000L,
"Maximum number of HDFS files created by all mappers/reducers in a MapReduce job."),
DEFAULTPARTITIONNAME("hive.exec.default.partition.name", "__HIVE_DEFAULT_PARTITION__",
"The default partition name in case the dynamic partition column value is null/empty string or any other values that cannot be escaped. \n" +
"This value must not contain any special character used in HDFS URI (e.g., ':', '%', '/' etc). \n" +
"The user has to be aware that the dynamic partition value should not contain this value to avoid confusions."),
DEFAULT_ZOOKEEPER_PARTITION_NAME("hive.lockmgr.zookeeper.default.partition.name", "__HIVE_DEFAULT_ZOOKEEPER_PARTITION__", ""),
// Whether to show a link to the most failed task + debugging tips
SHOW_JOB_FAIL_DEBUG_INFO("hive.exec.show.job.failure.debug.info", true,
"If a job fails, whether to provide a link in the CLI to the task with the\n" +
"most failures, along with debugging hints if applicable."),
JOB_DEBUG_CAPTURE_STACKTRACES("hive.exec.job.debug.capture.stacktraces", true,
"Whether or not stack traces parsed from the task logs of a sampled failed task \n" +
"for each failed job should be stored in the SessionState"),
JOB_DEBUG_TIMEOUT("hive.exec.job.debug.timeout", 30000, ""),
TASKLOG_DEBUG_TIMEOUT("hive.exec.tasklog.debug.timeout", 20000, ""),
OUTPUT_FILE_EXTENSION("hive.output.file.extension", null,
"String used as a file extension for output files. \n" +
"If not set, defaults to the codec extension for text files (e.g. \".gz\"), or no extension otherwise."),
HIVE_IN_TEST("hive.in.test", false, "internal usage only, true in test mode", true),
HIVE_IN_TEST_SSL("hive.in.ssl.test", false, "internal usage only, true in SSL test mode", true),
// TODO: this needs to be removed; see TestReplicationScenarios* comments.
HIVE_IN_TEST_REPL("hive.in.repl.test", false, "internal usage only, true in replication test mode", true),
HIVE_IN_TEST_IDE("hive.in.ide.test", false, "internal usage only, true if test running in ide",
true),
HIVE_TESTING_SHORT_LOGS("hive.testing.short.logs", false,
"internal usage only, used only in test mode. If set true, when requesting the " +
"operation logs the short version (generated by LogDivertAppenderForTest) will be " +
"returned"),
HIVE_TESTING_REMOVE_LOGS("hive.testing.remove.logs", true,
"internal usage only, used only in test mode. If set false, the operation logs, and the " +
"operation log directory will not be removed, so they can be found after the test runs."),
HIVE_IN_TEZ_TEST("hive.in.tez.test", false, "internal use only, true when in testing tez",
true),
HIVE_MAPJOIN_TESTING_NO_HASH_TABLE_LOAD("hive.mapjoin.testing.no.hash.table.load", false, "internal use only, true when in testing map join",
true),
HIVE_ADDITIONAL_PARTIAL_MASKS_PATTERN("hive.qtest.additional.partial.mask.pattern", "",
"internal use only, used in only qtests. Provide additional partial masks pattern" +
"for qtests as a ',' separated list"),
HIVE_ADDITIONAL_PARTIAL_MASKS_REPLACEMENT_TEXT("hive.qtest.additional.partial.mask.replacement.text", "",
"internal use only, used in only qtests. Provide additional partial masks replacement" +
"text for qtests as a ',' separated list"),
HIVE_IN_REPL_TEST_FILES_SORTED("hive.in.repl.test.files.sorted", false,
"internal usage only, set to true if the file listing is required in sorted order during bootstrap load", true),
LOCALMODEAUTO("hive.exec.mode.local.auto", false,
"Let Hive determine whether to run in local mode automatically"),
LOCALMODEMAXBYTES("hive.exec.mode.local.auto.inputbytes.max", 134217728L,
"When hive.exec.mode.local.auto is true, input bytes should less than this for local mode."),
LOCALMODEMAXINPUTFILES("hive.exec.mode.local.auto.input.files.max", 4,
"When hive.exec.mode.local.auto is true, the number of tasks should less than this for local mode."),
DROPIGNORESNONEXISTENT("hive.exec.drop.ignorenonexistent", true,
"Do not report an error if DROP TABLE/VIEW/Index/Function specifies a non-existent table/view/function"),
HIVEIGNOREMAPJOINHINT("hive.ignore.mapjoin.hint", true, "Ignore the mapjoin hint"),
HIVE_FILE_MAX_FOOTER("hive.file.max.footer", 100,
"maximum number of lines for footer user can define for a table file"),
HIVE_RESULTSET_USE_UNIQUE_COLUMN_NAMES("hive.resultset.use.unique.column.names", true,
"Make column names unique in the result set by qualifying column names with table alias if needed.\n" +
"Table alias will be added to column names for queries of type \"select *\" or \n" +
"if query explicitly uses table alias \"select r1.x..\"."),
HIVE_PROTO_EVENTS_BASE_PATH("hive.hook.proto.base-directory", "",
"Base directory into which the proto event messages are written by HiveProtoLoggingHook."),
HIVE_PROTO_EVENTS_ROLLOVER_CHECK_INTERVAL("hive.hook.proto.rollover-interval", "600s",
new TimeValidator(TimeUnit.SECONDS, 0L, true, 3600 * 24L, true),
"Frequency at which the file rollover check is triggered."),
HIVE_PROTO_EVENTS_CLEAN_FREQ("hive.hook.proto.events.clean.freq", "1d",
new TimeValidator(TimeUnit.DAYS),
"Frequency at which timer task runs to purge expired proto event files."),
HIVE_PROTO_EVENTS_TTL("hive.hook.proto.events.ttl", "7d",
new TimeValidator(TimeUnit.DAYS),
"Time-To-Live (TTL) of proto event files before cleanup."),
HIVE_PROTO_FILE_PER_EVENT("hive.hook.proto.file.per.event", false,
"Whether each proto event has to be written to separate file. " +
"(Use this for FS that does not hflush immediately like S3A)"),
// Hadoop Configuration Properties
// Properties with null values are ignored and exist only for the purpose of giving us
// a symbolic name to reference in the Hive source code. Properties with non-null
// values will override any values set in the underlying Hadoop configuration.
HADOOPBIN("hadoop.bin.path", findHadoopBinary(), "", true),
YARNBIN("yarn.bin.path", findYarnBinary(), "", true),
MAPREDBIN("mapred.bin.path", findMapRedBinary(), "", true),
HIVE_FS_HAR_IMPL("fs.har.impl", "org.apache.hadoop.hive.shims.HiveHarFileSystem",
"The implementation for accessing Hadoop Archives. Note that this won't be applicable to Hadoop versions less than 0.20"),
MAPREDMAXSPLITSIZE(FileInputFormat.SPLIT_MAXSIZE, 256000000L, "", true),
MAPREDMINSPLITSIZE(FileInputFormat.SPLIT_MINSIZE, 1L, "", true),
MAPREDMINSPLITSIZEPERNODE(CombineFileInputFormat.SPLIT_MINSIZE_PERNODE, 1L, "", true),
MAPREDMINSPLITSIZEPERRACK(CombineFileInputFormat.SPLIT_MINSIZE_PERRACK, 1L, "", true),
// The number of reduce tasks per job. Hadoop sets this value to 1 by default
// By setting this property to -1, Hive will automatically determine the correct
// number of reducers.
HADOOPNUMREDUCERS("mapreduce.job.reduces", -1, "", true),
// Metastore stuff. Be sure to update HiveConf.metaVars when you add something here!
METASTOREDBTYPE("hive.metastore.db.type", "DERBY", new StringSet("DERBY", "ORACLE", "MYSQL", "MSSQL", "POSTGRES"),
"Type of database used by the metastore. Information schema & JDBCStorageHandler depend on it."),
/**
* @deprecated Use MetastoreConf.WAREHOUSE
*/
@Deprecated
METASTOREWAREHOUSE("hive.metastore.warehouse.dir", "/user/hive/warehouse",
"location of default database for the warehouse"),
HIVE_METASTORE_WAREHOUSE_EXTERNAL("hive.metastore.warehouse.external.dir", null,
"Default location for external tables created in the warehouse. " +
"If not set or null, then the normal warehouse location will be used as the default location."),
/**
* @deprecated Use MetastoreConf.THRIFT_URIS
*/
@Deprecated
METASTOREURIS("hive.metastore.uris", "",
"Thrift URI for the remote metastore. Used by metastore client to connect to remote metastore."),
/**
* @deprecated Use MetastoreConf.THRIFT_URI_SELECTION
*/
@Deprecated
METASTORESELECTION("hive.metastore.uri.selection", "RANDOM",
new StringSet("SEQUENTIAL", "RANDOM"),
"Determines the selection mechanism used by metastore client to connect to remote " +
"metastore. SEQUENTIAL implies that the first valid metastore from the URIs specified " +
"as part of hive.metastore.uris will be picked. RANDOM implies that the metastore " +
"will be picked randomly"),
/**
* @deprecated Use MetastoreConf.CAPABILITY_CHECK
*/
@Deprecated
METASTORE_CAPABILITY_CHECK("hive.metastore.client.capability.check", true,
"Whether to check client capabilities for potentially breaking API usage."),
METASTORE_CLIENT_CACHE_ENABLED("hive.metastore.client.cache.enabled", false,
"Whether to enable metastore client cache"),
METASTORE_CLIENT_CACHE_EXPIRY_TIME("hive.metastore.client.cache.expiry.time", "120s",
new TimeValidator(TimeUnit.SECONDS), "Expiry time for metastore client cache"),
METASTORE_CLIENT_CACHE_INITIAL_CAPACITY("hive.metastore.client.cache.initial.capacity", 50,
"Initial capacity for metastore client cache"),
METASTORE_CLIENT_CACHE_MAX_CAPACITY("hive.metastore.client.cache.max.capacity", 50,
"Max capacity for metastore client cache"),
METASTORE_CLIENT_CACHE_STATS_ENABLED("hive.metastore.client.cache.stats.enabled", false,
"Whether to enable metastore client cache stats"),
METASTORE_FASTPATH("hive.metastore.fastpath", false,
"Used to avoid all of the proxies and object copies in the metastore. Note, if this is " +
"set, you MUST use a local metastore (hive.metastore.uris must be empty) otherwise " +
"undefined and most likely undesired behavior will result"),
/**
* @deprecated Use MetastoreConf.FS_HANDLER_THREADS_COUNT
*/
@Deprecated
METASTORE_FS_HANDLER_THREADS_COUNT("hive.metastore.fshandler.threads", 15,
"Number of threads to be allocated for metastore handler for fs operations."),
/**
* @deprecated Use MetastoreConf.FILE_METADATA_THREADS
*/
@Deprecated
METASTORE_HBASE_FILE_METADATA_THREADS("hive.metastore.hbase.file.metadata.threads", 1,
"Number of threads to use to read file metadata in background to cache it."),
/**
* @deprecated Use MetastoreConf.URI_RESOLVER
*/
@Deprecated
METASTORE_URI_RESOLVER("hive.metastore.uri.resolver", "",
"If set, fully qualified class name of resolver for hive metastore uri's"),
/**
* @deprecated Use MetastoreConf.THRIFT_CONNECTION_RETRIES
*/
@Deprecated
METASTORETHRIFTCONNECTIONRETRIES("hive.metastore.connect.retries", 3,
"Number of retries while opening a connection to metastore"),
/**
* @deprecated Use MetastoreConf.THRIFT_FAILURE_RETRIES
*/
@Deprecated
METASTORETHRIFTFAILURERETRIES("hive.metastore.failure.retries", 1,
"Number of retries upon failure of Thrift metastore calls"),
/**
* @deprecated Use MetastoreConf.SERVER_PORT
*/
@Deprecated
METASTORE_SERVER_PORT("hive.metastore.port", 9083, "Hive metastore listener port"),
/**
* @deprecated Use MetastoreConf.CLIENT_CONNECT_RETRY_DELAY
*/
@Deprecated
METASTORE_CLIENT_CONNECT_RETRY_DELAY("hive.metastore.client.connect.retry.delay", "1s",
new TimeValidator(TimeUnit.SECONDS),
"Number of seconds for the client to wait between consecutive connection attempts"),
/**
* @deprecated Use MetastoreConf.CLIENT_SOCKET_TIMEOUT
*/
@Deprecated
METASTORE_CLIENT_SOCKET_TIMEOUT("hive.metastore.client.socket.timeout", "600s",
new TimeValidator(TimeUnit.SECONDS),
"MetaStore Client socket timeout in seconds"),
/**
* @deprecated Use MetastoreConf.CLIENT_SOCKET_LIFETIME
*/
@Deprecated
METASTORE_CLIENT_SOCKET_LIFETIME("hive.metastore.client.socket.lifetime", "0s",
new TimeValidator(TimeUnit.SECONDS),
"MetaStore Client socket lifetime in seconds. After this time is exceeded, client\n" +
"reconnects on the next MetaStore operation. A value of 0s means the connection\n" +
"has an infinite lifetime."),
/**
* @deprecated Use MetastoreConf.PWD
*/
@Deprecated
METASTOREPWD("javax.jdo.option.ConnectionPassword", "mine",
"password to use against metastore database"),
/**
* @deprecated Use MetastoreConf.CONNECT_URL_HOOK
*/
@Deprecated
METASTORECONNECTURLHOOK("hive.metastore.ds.connection.url.hook", "",
"Name of the hook to use for retrieving the JDO connection URL. If empty, the value in javax.jdo.option.ConnectionURL is used"),
/**
* @deprecated Use MetastoreConf.MULTITHREADED
*/
@Deprecated
METASTOREMULTITHREADED("javax.jdo.option.Multithreaded", true,
"Set this to true if multiple threads access metastore through JDO concurrently."),
/**
* @deprecated Use MetastoreConf.CONNECT_URL_KEY
*/
@Deprecated
METASTORECONNECTURLKEY("javax.jdo.option.ConnectionURL",
"jdbc:derby:;databaseName=metastore_db;create=true",
"JDBC connect string for a JDBC metastore.\n" +
"To use SSL to encrypt/authenticate the connection, provide database-specific SSL flag in the connection URL.\n" +
"For example, jdbc:postgresql://myhost/db?ssl=true for postgres database."),
/**
* @deprecated Use MetastoreConf.DBACCESS_SSL_PROPS
*/
@Deprecated
METASTORE_DBACCESS_SSL_PROPS("hive.metastore.dbaccess.ssl.properties", "",
"Comma-separated SSL properties for metastore to access database when JDO connection URL\n" +
"enables SSL access. e.g. javax.net.ssl.trustStore=/tmp/truststore,javax.net.ssl.trustStorePassword=pwd."),
/**
* @deprecated Use MetastoreConf.HMS_HANDLER_ATTEMPTS
*/
@Deprecated
HMSHANDLERATTEMPTS("hive.hmshandler.retry.attempts", 10,
"The number of times to retry a HMSHandler call if there were a connection error."),
/**
* @deprecated Use MetastoreConf.HMS_HANDLER_INTERVAL
*/
@Deprecated
HMSHANDLERINTERVAL("hive.hmshandler.retry.interval", "2000ms",
new TimeValidator(TimeUnit.MILLISECONDS), "The time between HMSHandler retry attempts on failure."),
/**
* @deprecated Use MetastoreConf.HMS_HANDLER_FORCE_RELOAD_CONF
*/
@Deprecated
HMSHANDLERFORCERELOADCONF("hive.hmshandler.force.reload.conf", false,
"Whether to force reloading of the HMSHandler configuration (including\n" +
"the connection URL, before the next metastore query that accesses the\n" +
"datastore. Once reloaded, this value is reset to false. Used for\n" +
"testing only."),
/**
* @deprecated Use MetastoreConf.SERVER_MAX_MESSAGE_SIZE
*/
@Deprecated
METASTORESERVERMAXMESSAGESIZE("hive.metastore.server.max.message.size", 100*1024*1024L,
"Maximum message size in bytes a HMS will accept."),
/**
* @deprecated Use MetastoreConf.SERVER_MIN_THREADS
*/
@Deprecated
METASTORESERVERMINTHREADS("hive.metastore.server.min.threads", 200,
"Minimum number of worker threads in the Thrift server's pool."),
/**
* @deprecated Use MetastoreConf.SERVER_MAX_THREADS
*/
@Deprecated
METASTORESERVERMAXTHREADS("hive.metastore.server.max.threads", 1000,
"Maximum number of worker threads in the Thrift server's pool."),
/**
* @deprecated Use MetastoreConf.TCP_KEEP_ALIVE
*/
@Deprecated
METASTORE_TCP_KEEP_ALIVE("hive.metastore.server.tcp.keepalive", true,
"Whether to enable TCP keepalive for the metastore server. Keepalive will prevent accumulation of half-open connections."),
/**
* @deprecated Use MetastoreConf.WM_DEFAULT_POOL_SIZE
*/
@Deprecated
METASTORE_WM_DEFAULT_POOL_SIZE("hive.metastore.wm.default.pool.size", 4,
"The size of a default pool to create when creating an empty resource plan;\n" +
"If not positive, no default pool will be created."),
METASTORE_INT_ORIGINAL("hive.metastore.archive.intermediate.original",
"_INTERMEDIATE_ORIGINAL",
"Intermediate dir suffixes used for archiving. Not important what they\n" +
"are, as long as collisions are avoided"),
METASTORE_INT_ARCHIVED("hive.metastore.archive.intermediate.archived",
"_INTERMEDIATE_ARCHIVED", ""),
METASTORE_INT_EXTRACTED("hive.metastore.archive.intermediate.extracted",
"_INTERMEDIATE_EXTRACTED", ""),
/**
* @deprecated Use MetastoreConf.KERBEROS_KEYTAB_FILE
*/
@Deprecated
METASTORE_KERBEROS_KEYTAB_FILE("hive.metastore.kerberos.keytab.file", "",
"The path to the Kerberos Keytab file containing the metastore Thrift server's service principal."),
/**
* @deprecated Use MetastoreConf.KERBEROS_PRINCIPAL
*/
@Deprecated
METASTORE_KERBEROS_PRINCIPAL("hive.metastore.kerberos.principal",
"hive-metastore/[email protected]",
"The service principal for the metastore Thrift server. \n" +
"The special string _HOST will be replaced automatically with the correct host name."),
/**
* @deprecated Use MetastoreConf.CLIENT_KERBEROS_PRINCIPAL
*/
@Deprecated
METASTORE_CLIENT_KERBEROS_PRINCIPAL("hive.metastore.client.kerberos.principal",
"", // E.g. "hive-metastore/[email protected]".
"The Kerberos principal associated with the HA cluster of hcat_servers."),
/**
* @deprecated Use MetastoreConf.USE_THRIFT_SASL
*/
@Deprecated
METASTORE_USE_THRIFT_SASL("hive.metastore.sasl.enabled", false,
"If true, the metastore Thrift interface will be secured with SASL. Clients must authenticate with Kerberos."),
/**
* @deprecated Use MetastoreConf.USE_THRIFT_FRAMED_TRANSPORT
*/
@Deprecated
METASTORE_USE_THRIFT_FRAMED_TRANSPORT("hive.metastore.thrift.framed.transport.enabled", false,
"If true, the metastore Thrift interface will use TFramedTransport. When false (default) a standard TTransport is used."),
/**
* @deprecated Use MetastoreConf.USE_THRIFT_COMPACT_PROTOCOL
*/
@Deprecated
METASTORE_USE_THRIFT_COMPACT_PROTOCOL("hive.metastore.thrift.compact.protocol.enabled", false,
"If true, the metastore Thrift interface will use TCompactProtocol. When false (default) TBinaryProtocol will be used.\n" +
"Setting it to true will break compatibility with older clients running TBinaryProtocol."),
/**
* @deprecated Use MetastoreConf.TOKEN_SIGNATURE
*/
@Deprecated
METASTORE_TOKEN_SIGNATURE("hive.metastore.token.signature", "",
"The delegation token service name to match when selecting a token from the current user's tokens."),
/**
* @deprecated Use MetastoreConf.DELEGATION_TOKEN_STORE_CLS
*/
@Deprecated
METASTORE_CLUSTER_DELEGATION_TOKEN_STORE_CLS("hive.cluster.delegation.token.store.class",
"org.apache.hadoop.hive.thrift.MemoryTokenStore",
"The delegation token store implementation. Set to org.apache.hadoop.hive.thrift.ZooKeeperTokenStore for load-balanced cluster."),
METASTORE_CLUSTER_DELEGATION_TOKEN_STORE_ZK_CONNECTSTR(
"hive.cluster.delegation.token.store.zookeeper.connectString", "",
"The ZooKeeper token store connect string. You can re-use the configuration value\n" +
"set in hive.zookeeper.quorum, by leaving this parameter unset."),
METASTORE_CLUSTER_DELEGATION_TOKEN_STORE_ZK_ZNODE(
"hive.cluster.delegation.token.store.zookeeper.znode", "/hivedelegation",
"The root path for token store data. Note that this is used by both HiveServer2 and\n" +
"MetaStore to store delegation Token. One directory gets created for each of them.\n" +
"The final directory names would have the servername appended to it (HIVESERVER2,\n" +
"METASTORE)."),
METASTORE_CLUSTER_DELEGATION_TOKEN_STORE_ZK_ACL(
"hive.cluster.delegation.token.store.zookeeper.acl", "",
"ACL for token store entries. Comma separated list of ACL entries. For example:\n" +
"sasl:hive/[email protected]:cdrwa,sasl:hive/[email protected]:cdrwa\n" +
"Defaults to all permissions for the hiveserver2/metastore process user."),
/**
* @deprecated Use MetastoreConf.CACHE_PINOBJTYPES
*/
@Deprecated
METASTORE_CACHE_PINOBJTYPES("hive.metastore.cache.pinobjtypes", "Table,StorageDescriptor,SerDeInfo,Partition,Database,Type,FieldSchema,Order",
"List of comma separated metastore object types that should be pinned in the cache"),
/**
* @deprecated Use MetastoreConf.CONNECTION_POOLING_TYPE
*/
@Deprecated
METASTORE_CONNECTION_POOLING_TYPE("datanucleus.connectionPoolingType", "HikariCP", new StringSet("BONECP", "DBCP",
"HikariCP", "NONE"),
"Specify connection pool library for datanucleus"),
/**
* @deprecated Use MetastoreConf.CONNECTION_POOLING_MAX_CONNECTIONS
*/
@Deprecated
METASTORE_CONNECTION_POOLING_MAX_CONNECTIONS("datanucleus.connectionPool.maxPoolSize", 10,
"Specify the maximum number of connections in the connection pool. Note: The configured size will be used by\n" +
"2 connection pools (TxnHandler and ObjectStore). When configuring the max connection pool size, it is\n" +
"recommended to take into account the number of metastore instances and the number of HiveServer2 instances\n" +
"configured with embedded metastore. To get optimal performance, set config to meet the following condition\n"+
"(2 * pool_size * metastore_instances + 2 * pool_size * HS2_instances_with_embedded_metastore) = \n" +
"(2 * physical_core_count + hard_disk_count)."),
// Workaround for DN bug on Postgres:
// http://www.datanucleus.org/servlet/forum/viewthread_thread,7985_offset
/**
* @deprecated Use MetastoreConf.DATANUCLEUS_INIT_COL_INFO
*/
@Deprecated
METASTORE_DATANUCLEUS_INIT_COL_INFO("datanucleus.rdbms.initializeColumnInfo", "NONE",
"initializeColumnInfo setting for DataNucleus; set to NONE at least on Postgres."),
/**
* @deprecated Use MetastoreConf.VALIDATE_TABLES
*/
@Deprecated
METASTORE_VALIDATE_TABLES("datanucleus.schema.validateTables", false,
"validates existing schema against code. turn this on if you want to verify existing schema"),
/**
* @deprecated Use MetastoreConf.VALIDATE_COLUMNS
*/
@Deprecated
METASTORE_VALIDATE_COLUMNS("datanucleus.schema.validateColumns", false,
"validates existing schema against code. turn this on if you want to verify existing schema"),
/**
* @deprecated Use MetastoreConf.VALIDATE_CONSTRAINTS
*/
@Deprecated
METASTORE_VALIDATE_CONSTRAINTS("datanucleus.schema.validateConstraints", false,
"validates existing schema against code. turn this on if you want to verify existing schema"),
/**
* @deprecated Use MetastoreConf.STORE_MANAGER_TYPE
*/
@Deprecated
METASTORE_STORE_MANAGER_TYPE("datanucleus.storeManagerType", "rdbms", "metadata store type"),
/**
* @deprecated Use MetastoreConf.AUTO_CREATE_ALL
*/
@Deprecated
METASTORE_AUTO_CREATE_ALL("datanucleus.schema.autoCreateAll", false,
"Auto creates necessary schema on a startup if one doesn't exist. Set this to false, after creating it once."
+ "To enable auto create also set hive.metastore.schema.verification=false. Auto creation is not "
+ "recommended for production use cases, run schematool command instead." ),
/**
* @deprecated Use MetastoreConf.SCHEMA_VERIFICATION
*/
@Deprecated
METASTORE_SCHEMA_VERIFICATION("hive.metastore.schema.verification", true,
"Enforce metastore schema version consistency.\n" +
"True: Verify that version information stored in is compatible with one from Hive jars. Also disable automatic\n" +
" schema migration attempt. Users are required to manually migrate schema after Hive upgrade which ensures\n" +
" proper metastore schema migration. (Default)\n" +
"False: Warn if the version information stored in metastore doesn't match with one from in Hive jars."),
/**
* @deprecated Use MetastoreConf.SCHEMA_VERIFICATION_RECORD_VERSION
*/
@Deprecated
METASTORE_SCHEMA_VERIFICATION_RECORD_VERSION("hive.metastore.schema.verification.record.version", false,
"When true the current MS version is recorded in the VERSION table. If this is disabled and verification is\n" +
" enabled the MS will be unusable."),
/**
* @deprecated Use MetastoreConf.SCHEMA_INFO_CLASS
*/
@Deprecated
METASTORE_SCHEMA_INFO_CLASS("hive.metastore.schema.info.class",
"org.apache.hadoop.hive.metastore.MetaStoreSchemaInfo",
"Fully qualified class name for the metastore schema information class \n"
+ "which is used by schematool to fetch the schema information.\n"
+ " This class should implement the IMetaStoreSchemaInfo interface"),
/**
* @deprecated Use MetastoreConf.DATANUCLEUS_TRANSACTION_ISOLATION
*/
@Deprecated
METASTORE_TRANSACTION_ISOLATION("datanucleus.transactionIsolation", "read-committed",
"Default transaction isolation level for identity generation."),
/**
* @deprecated Use MetastoreConf.DATANUCLEUS_CACHE_LEVEL2
*/
@Deprecated
METASTORE_CACHE_LEVEL2("datanucleus.cache.level2", false,
"Use a level 2 cache. Turn this off if metadata is changed independently of Hive metastore server"),
METASTORE_CACHE_LEVEL2_TYPE("datanucleus.cache.level2.type", "none", ""),
/**
* @deprecated Use MetastoreConf.IDENTIFIER_FACTORY
*/
@Deprecated
METASTORE_IDENTIFIER_FACTORY("datanucleus.identifierFactory", "datanucleus1",
"Name of the identifier factory to use when generating table/column names etc. \n" +
"'datanucleus1' is used for backward compatibility with DataNucleus v1"),
/**
* @deprecated Use MetastoreConf.DATANUCLEUS_USE_LEGACY_VALUE_STRATEGY
*/
@Deprecated
METASTORE_USE_LEGACY_VALUE_STRATEGY("datanucleus.rdbms.useLegacyNativeValueStrategy", true, ""),
/**
* @deprecated Use MetastoreConf.DATANUCLEUS_PLUGIN_REGISTRY_BUNDLE_CHECK
*/
@Deprecated
METASTORE_PLUGIN_REGISTRY_BUNDLE_CHECK("datanucleus.plugin.pluginRegistryBundleCheck", "LOG",
"Defines what happens when plugin bundles are found and are duplicated [EXCEPTION|LOG|NONE]"),
/**
* @deprecated Use MetastoreConf.BATCH_RETRIEVE_MAX
*/
@Deprecated
METASTORE_BATCH_RETRIEVE_MAX("hive.metastore.batch.retrieve.max", 300,
"Maximum number of objects (tables/partitions) can be retrieved from metastore in one batch. \n" +
"The higher the number, the less the number of round trips is needed to the Hive metastore server, \n" +
"but it may also cause higher memory requirement at the client side."),
/**
* @deprecated Use MetastoreConf.BATCH_RETRIEVE_OBJECTS_MAX
*/
@Deprecated
METASTORE_BATCH_RETRIEVE_OBJECTS_MAX(
"hive.metastore.batch.retrieve.table.partition.max", 1000,
"Maximum number of objects that metastore internally retrieves in one batch."),
/**
* @deprecated Use MetastoreConf.INIT_HOOKS
*/
@Deprecated
METASTORE_INIT_HOOKS("hive.metastore.init.hooks", "",
"A comma separated list of hooks to be invoked at the beginning of HMSHandler initialization. \n" +
"An init hook is specified as the name of Java class which extends org.apache.hadoop.hive.metastore.MetaStoreInitListener."),
/**
* @deprecated Use MetastoreConf.PRE_EVENT_LISTENERS
*/
@Deprecated
METASTORE_PRE_EVENT_LISTENERS("hive.metastore.pre.event.listeners", "",
"List of comma separated listeners for metastore events."),
/**
* @deprecated Use MetastoreConf.EVENT_LISTENERS
*/
@Deprecated
METASTORE_EVENT_LISTENERS("hive.metastore.event.listeners", "",
"A comma separated list of Java classes that implement the org.apache.hadoop.hive.metastore.MetaStoreEventListener" +
" interface. The metastore event and corresponding listener method will be invoked in separate JDO transactions. " +
"Alternatively, configure hive.metastore.transactional.event.listeners to ensure both are invoked in same JDO transaction."),
/**
* @deprecated Use MetastoreConf.TRANSACTIONAL_EVENT_LISTENERS
*/
@Deprecated
METASTORE_TRANSACTIONAL_EVENT_LISTENERS("hive.metastore.transactional.event.listeners", "",
"A comma separated list of Java classes that implement the org.apache.hadoop.hive.metastore.MetaStoreEventListener" +
" interface. Both the metastore event and corresponding listener method will be invoked in the same JDO transaction."),
/**
* @deprecated Use MetastoreConf.NOTIFICATION_SEQUENCE_LOCK_MAX_RETRIES
*/
@Deprecated
NOTIFICATION_SEQUENCE_LOCK_MAX_RETRIES("hive.notification.sequence.lock.max.retries", 10,
"Number of retries required to acquire a lock when getting the next notification sequential ID for entries "
+ "in the NOTIFICATION_LOG table."),
/**
* @deprecated Use MetastoreConf.NOTIFICATION_SEQUENCE_LOCK_RETRY_SLEEP_INTERVAL
*/
@Deprecated
NOTIFICATION_SEQUENCE_LOCK_RETRY_SLEEP_INTERVAL("hive.notification.sequence.lock.retry.sleep.interval", 10L,
new TimeValidator(TimeUnit.SECONDS),
"Sleep interval between retries to acquire a notification lock as described part of property "
+ NOTIFICATION_SEQUENCE_LOCK_MAX_RETRIES.name()),
/**
* @deprecated Use MetastoreConf.EVENT_DB_LISTENER_TTL
*/
@Deprecated
METASTORE_EVENT_DB_LISTENER_TTL("hive.metastore.event.db.listener.timetolive", "86400s",
new TimeValidator(TimeUnit.SECONDS),
"time after which events will be removed from the database listener queue"),
/**
* @deprecated Use MetastoreConf.EVENT_DB_NOTIFICATION_API_AUTH
*/
@Deprecated
METASTORE_EVENT_DB_NOTIFICATION_API_AUTH("hive.metastore.event.db.notification.api.auth", true,
"Should metastore do authorization against database notification related APIs such as get_next_notification.\n" +
"If set to true, then only the superusers in proxy settings have the permission"),
/**
* @deprecated Use MetastoreConf.AUTHORIZATION_STORAGE_AUTH_CHECKS
*/
@Deprecated
METASTORE_AUTHORIZATION_STORAGE_AUTH_CHECKS("hive.metastore.authorization.storage.checks", false,
"Should the metastore do authorization checks against the underlying storage (usually hdfs) \n" +
"for operations like drop-partition (disallow the drop-partition if the user in\n" +
"question doesn't have permissions to delete the corresponding directory\n" +
"on the storage)."),
METASTORE_AUTHORIZATION_EXTERNALTABLE_DROP_CHECK("hive.metastore.authorization.storage.check.externaltable.drop", true,
"Should StorageBasedAuthorization check permission of the storage before dropping external table.\n" +
"StorageBasedAuthorization already does this check for managed table. For external table however,\n" +
"anyone who has read permission of the directory could drop external table, which is surprising.\n" +
"The flag is set to false by default to maintain backward compatibility."),
/**
* @deprecated Use MetastoreConf.EVENT_CLEAN_FREQ
*/
@Deprecated
METASTORE_EVENT_CLEAN_FREQ("hive.metastore.event.clean.freq", "0s",
new TimeValidator(TimeUnit.SECONDS),
"Frequency at which timer task runs to purge expired events in metastore."),
/**
* @deprecated Use MetastoreConf.EVENT_EXPIRY_DURATION
*/
@Deprecated
METASTORE_EVENT_EXPIRY_DURATION("hive.metastore.event.expiry.duration", "0s",
new TimeValidator(TimeUnit.SECONDS),
"Duration after which events expire from events table"),
/**
* @deprecated Use MetastoreConf.EVENT_MESSAGE_FACTORY
*/
@Deprecated
METASTORE_EVENT_MESSAGE_FACTORY("hive.metastore.event.message.factory",
"org.apache.hadoop.hive.metastore.messaging.json.gzip.GzipJSONMessageEncoder",
"Factory class for making encoding and decoding messages in the events generated."),
/**
* @deprecated Use MetastoreConf.EXECUTE_SET_UGI
*/
@Deprecated
METASTORE_EXECUTE_SET_UGI("hive.metastore.execute.setugi", true,
"In unsecure mode, setting this property to true will cause the metastore to execute DFS operations using \n" +
"the client's reported user and group permissions. Note that this property must be set on \n" +
"both the client and server sides. Further note that its best effort. \n" +
"If client sets its to true and server sets it to false, client setting will be ignored."),
/**
* @deprecated Use MetastoreConf.PARTITION_NAME_WHITELIST_PATTERN
*/
@Deprecated
METASTORE_PARTITION_NAME_WHITELIST_PATTERN("hive.metastore.partition.name.whitelist.pattern", "",
"Partition names will be checked against this regex pattern and rejected if not matched."),
/**
* @deprecated Use MetastoreConf.INTEGER_JDO_PUSHDOWN
*/
@Deprecated
METASTORE_INTEGER_JDO_PUSHDOWN("hive.metastore.integral.jdo.pushdown", false,
"Allow JDO query pushdown for integral partition columns in metastore. Off by default. This\n" +
"improves metastore perf for integral columns, especially if there's a large number of partitions.\n" +
"However, it doesn't work correctly with integral values that are not normalized (e.g. have\n" +
"leading zeroes, like 0012). If metastore direct SQL is enabled and works, this optimization\n" +
"is also irrelevant."),
/**
* @deprecated Use MetastoreConf.TRY_DIRECT_SQL
*/
@Deprecated
METASTORE_TRY_DIRECT_SQL("hive.metastore.try.direct.sql", true,
"Whether the Hive metastore should try to use direct SQL queries instead of the\n" +
"DataNucleus for certain read paths. This can improve metastore performance when\n" +
"fetching many partitions or column statistics by orders of magnitude; however, it\n" +
"is not guaranteed to work on all RDBMS-es and all versions. In case of SQL failures,\n" +
"the metastore will fall back to the DataNucleus, so it's safe even if SQL doesn't\n" +
"work for all queries on your datastore. If all SQL queries fail (for example, your\n" +
"metastore is backed by MongoDB), you might want to disable this to save the\n" +
"try-and-fall-back cost."),
/**
* @deprecated Use MetastoreConf.DIRECT_SQL_PARTITION_BATCH_SIZE
*/
@Deprecated
METASTORE_DIRECT_SQL_PARTITION_BATCH_SIZE("hive.metastore.direct.sql.batch.size", 0,
"Batch size for partition and other object retrieval from the underlying DB in direct\n" +
"SQL. For some DBs like Oracle and MSSQL, there are hardcoded or perf-based limitations\n" +
"that necessitate this. For DBs that can handle the queries, this isn't necessary and\n" +
"may impede performance. -1 means no batching, 0 means automatic batching."),
/**
* @deprecated Use MetastoreConf.TRY_DIRECT_SQL_DDL
*/
@Deprecated
METASTORE_TRY_DIRECT_SQL_DDL("hive.metastore.try.direct.sql.ddl", true,
"Same as hive.metastore.try.direct.sql, for read statements within a transaction that\n" +
"modifies metastore data. Due to non-standard behavior in Postgres, if a direct SQL\n" +
"select query has incorrect syntax or something similar inside a transaction, the\n" +
"entire transaction will fail and fall-back to DataNucleus will not be possible. You\n" +
"should disable the usage of direct SQL inside transactions if that happens in your case."),
/**
* @deprecated Use MetastoreConf.DIRECT_SQL_MAX_QUERY_LENGTH
*/
@Deprecated
METASTORE_DIRECT_SQL_MAX_QUERY_LENGTH("hive.direct.sql.max.query.length", 100, "The maximum\n" +
" size of a query string (in KB)."),
/**
* @deprecated Use MetastoreConf.DIRECT_SQL_MAX_ELEMENTS_IN_CLAUSE
*/
@Deprecated
METASTORE_DIRECT_SQL_MAX_ELEMENTS_IN_CLAUSE("hive.direct.sql.max.elements.in.clause", 1000,
"The maximum number of values in a IN clause. Once exceeded, it will be broken into\n" +
" multiple OR separated IN clauses."),
/**
* @deprecated Use MetastoreConf.DIRECT_SQL_MAX_ELEMENTS_VALUES_CLAUSE
*/
@Deprecated
METASTORE_DIRECT_SQL_MAX_ELEMENTS_VALUES_CLAUSE("hive.direct.sql.max.elements.values.clause",
1000, "The maximum number of values in a VALUES clause for INSERT statement."),
/**
* @deprecated Use MetastoreConf.ORM_RETRIEVE_MAPNULLS_AS_EMPTY_STRINGS
*/
@Deprecated
METASTORE_ORM_RETRIEVE_MAPNULLS_AS_EMPTY_STRINGS("hive.metastore.orm.retrieveMapNullsAsEmptyStrings",false,
"Thrift does not support nulls in maps, so any nulls present in maps retrieved from ORM must " +
"either be pruned or converted to empty strings. Some backing dbs such as Oracle persist empty strings " +
"as nulls, so we should set this parameter if we wish to reverse that behaviour. For others, " +
"pruning is the correct behaviour"),
/**
* @deprecated Use MetastoreConf.DISALLOW_INCOMPATIBLE_COL_TYPE_CHANGES
*/
@Deprecated
METASTORE_DISALLOW_INCOMPATIBLE_COL_TYPE_CHANGES(
"hive.metastore.disallow.incompatible.col.type.changes", true,
"If true (default is false), ALTER TABLE operations which change the type of a\n" +
"column (say STRING) to an incompatible type (say MAP) are disallowed.\n" +
"RCFile default SerDe (ColumnarSerDe) serializes the values in such a way that the\n" +
"datatypes can be converted from string to any type. The map is also serialized as\n" +
"a string, which can be read as a string as well. However, with any binary\n" +
"serialization, this is not true. Blocking the ALTER TABLE prevents ClassCastExceptions\n" +
"when subsequently trying to access old partitions.\n" +
"\n" +
"Primitive types like INT, STRING, BIGINT, etc., are compatible with each other and are\n" +
"not blocked.\n" +
"\n" +
"See HIVE-4409 for more details."),
/**
* @deprecated Use MetastoreConf.LIMIT_PARTITION_REQUEST
*/
@Deprecated
METASTORE_LIMIT_PARTITION_REQUEST("hive.metastore.limit.partition.request", -1,
"This limits the number of partitions that can be requested from the metastore for a given table.\n" +
"The default value \"-1\" means no limit."),
NEWTABLEDEFAULTPARA("hive.table.parameters.default", "",
"Default property values for newly created tables"),
DDL_CTL_PARAMETERS_WHITELIST("hive.ddl.createtablelike.properties.whitelist", "",
"Table Properties to copy over when executing a Create Table Like."),
/**
* @deprecated Use MetastoreConf.RAW_STORE_IMPL
*/
@Deprecated
METASTORE_RAW_STORE_IMPL("hive.metastore.rawstore.impl", "org.apache.hadoop.hive.metastore.ObjectStore",
"Name of the class that implements org.apache.hadoop.hive.metastore.rawstore interface. \n" +
"This class is used to store and retrieval of raw metadata objects such as table, database"),
/**
* @deprecated Use MetastoreConf.TXN_STORE_IMPL
*/
@Deprecated
METASTORE_TXN_STORE_IMPL("hive.metastore.txn.store.impl",
"org.apache.hadoop.hive.metastore.txn.CompactionTxnHandler",
"Name of class that implements org.apache.hadoop.hive.metastore.txn.TxnStore. This " +
"class is used to store and retrieve transactions and locks"),
/**
* @deprecated Use MetastoreConf.CONNECTION_DRIVER
*/
@Deprecated
METASTORE_CONNECTION_DRIVER("javax.jdo.option.ConnectionDriverName", "org.apache.derby.jdbc.EmbeddedDriver",
"Driver class name for a JDBC metastore"),
/**
* @deprecated Use MetastoreConf.MANAGER_FACTORY_CLASS
*/
@Deprecated
METASTORE_MANAGER_FACTORY_CLASS("javax.jdo.PersistenceManagerFactoryClass",
"org.datanucleus.api.jdo.JDOPersistenceManagerFactory",
"class implementing the jdo persistence"),
/**
* @deprecated Use MetastoreConf.EXPRESSION_PROXY_CLASS
*/
@Deprecated
METASTORE_EXPRESSION_PROXY_CLASS("hive.metastore.expression.proxy",
"org.apache.hadoop.hive.ql.optimizer.ppr.PartitionExpressionForMetastore", ""),
/**
* @deprecated Use MetastoreConf.DETACH_ALL_ON_COMMIT
*/
@Deprecated
METASTORE_DETACH_ALL_ON_COMMIT("javax.jdo.option.DetachAllOnCommit", true,
"Detaches all objects from session so that they can be used after transaction is committed"),
/**
* @deprecated Use MetastoreConf.NON_TRANSACTIONAL_READ
*/
@Deprecated
METASTORE_NON_TRANSACTIONAL_READ("javax.jdo.option.NonTransactionalRead", true,
"Reads outside of transactions"),
/**
* @deprecated Use MetastoreConf.CONNECTION_USER_NAME
*/
@Deprecated
METASTORE_CONNECTION_USER_NAME("javax.jdo.option.ConnectionUserName", "APP",
"Username to use against metastore database"),
/**
* @deprecated Use MetastoreConf.END_FUNCTION_LISTENERS
*/
@Deprecated
METASTORE_END_FUNCTION_LISTENERS("hive.metastore.end.function.listeners", "",
"List of comma separated listeners for the end of metastore functions."),
/**
* @deprecated Use MetastoreConf.PART_INHERIT_TBL_PROPS
*/
@Deprecated
METASTORE_PART_INHERIT_TBL_PROPS("hive.metastore.partition.inherit.table.properties", "",
"List of comma separated keys occurring in table properties which will get inherited to newly created partitions. \n" +
"* implies all the keys will get inherited."),
/**
* @deprecated Use MetastoreConf.FILTER_HOOK
*/
@Deprecated
METASTORE_FILTER_HOOK("hive.metastore.filter.hook", "org.apache.hadoop.hive.metastore.DefaultMetaStoreFilterHookImpl",
"Metastore hook class for filtering the metadata read results. If hive.security.authorization.manager"
+ "is set to instance of HiveAuthorizerFactory, then this value is ignored."),
FIRE_EVENTS_FOR_DML("hive.metastore.dml.events", false, "If true, the metastore will be asked" +
" to fire events for DML operations"),
METASTORE_CLIENT_DROP_PARTITIONS_WITH_EXPRESSIONS("hive.metastore.client.drop.partitions.using.expressions", true,
"Choose whether dropping partitions with HCatClient pushes the partition-predicate to the metastore, " +
"or drops partitions iteratively"),
/**
* @deprecated Use MetastoreConf.AGGREGATE_STATS_CACHE_ENABLED
*/
@Deprecated
METASTORE_AGGREGATE_STATS_CACHE_ENABLED("hive.metastore.aggregate.stats.cache.enabled", false,
"Whether aggregate stats caching is enabled or not."),
/**
* @deprecated Use MetastoreConf.AGGREGATE_STATS_CACHE_SIZE
*/
@Deprecated
METASTORE_AGGREGATE_STATS_CACHE_SIZE("hive.metastore.aggregate.stats.cache.size", 10000,
"Maximum number of aggregate stats nodes that we will place in the metastore aggregate stats cache."),
/**
* @deprecated Use MetastoreConf.AGGREGATE_STATS_CACHE_MAX_PARTITIONS
*/
@Deprecated
METASTORE_AGGREGATE_STATS_CACHE_MAX_PARTITIONS("hive.metastore.aggregate.stats.cache.max.partitions", 10000,
"Maximum number of partitions that are aggregated per cache node."),
/**
* @deprecated Use MetastoreConf.AGGREGATE_STATS_CACHE_FPP
*/
@Deprecated
METASTORE_AGGREGATE_STATS_CACHE_FPP("hive.metastore.aggregate.stats.cache.fpp", (float) 0.01,
"Maximum false positive probability for the Bloom Filter used in each aggregate stats cache node (default 1%)."),
/**
* @deprecated Use MetastoreConf.AGGREGATE_STATS_CACHE_MAX_VARIANCE
*/
@Deprecated
METASTORE_AGGREGATE_STATS_CACHE_MAX_VARIANCE("hive.metastore.aggregate.stats.cache.max.variance", (float) 0.01,
"Maximum tolerable variance in number of partitions between a cached node and our request (default 1%)."),
/**
* @deprecated Use MetastoreConf.AGGREGATE_STATS_CACHE_TTL
*/
@Deprecated
METASTORE_AGGREGATE_STATS_CACHE_TTL("hive.metastore.aggregate.stats.cache.ttl", "600s", new TimeValidator(TimeUnit.SECONDS),
"Number of seconds for a cached node to be active in the cache before they become stale."),
/**
* @deprecated Use MetastoreConf.AGGREGATE_STATS_CACHE_MAX_WRITER_WAIT
*/
@Deprecated
METASTORE_AGGREGATE_STATS_CACHE_MAX_WRITER_WAIT("hive.metastore.aggregate.stats.cache.max.writer.wait", "5000ms",
new TimeValidator(TimeUnit.MILLISECONDS),
"Number of milliseconds a writer will wait to acquire the writelock before giving up."),
/**
* @deprecated Use MetastoreConf.AGGREGATE_STATS_CACHE_MAX_READER_WAIT
*/
@Deprecated
METASTORE_AGGREGATE_STATS_CACHE_MAX_READER_WAIT("hive.metastore.aggregate.stats.cache.max.reader.wait", "1000ms",
new TimeValidator(TimeUnit.MILLISECONDS),
"Number of milliseconds a reader will wait to acquire the readlock before giving up."),
/**
* @deprecated Use MetastoreConf.AGGREGATE_STATS_CACHE_MAX_FULL
*/
@Deprecated
METASTORE_AGGREGATE_STATS_CACHE_MAX_FULL("hive.metastore.aggregate.stats.cache.max.full", (float) 0.9,
"Maximum cache full % after which the cache cleaner thread kicks in."),
/**
* @deprecated Use MetastoreConf.AGGREGATE_STATS_CACHE_CLEAN_UNTIL
*/
@Deprecated
METASTORE_AGGREGATE_STATS_CACHE_CLEAN_UNTIL("hive.metastore.aggregate.stats.cache.clean.until", (float) 0.8,
"The cleaner thread cleans until cache reaches this % full size."),
/**
* @deprecated Use MetastoreConf.METRICS_ENABLED
*/
@Deprecated
METASTORE_METRICS("hive.metastore.metrics.enabled", false, "Enable metrics on the metastore."),
/**
* @deprecated Use MetastoreConf.INIT_METADATA_COUNT_ENABLED
*/
@Deprecated
METASTORE_INIT_METADATA_COUNT_ENABLED("hive.metastore.initial.metadata.count.enabled", true,
"Enable a metadata count at metastore startup for metrics."),
// Metastore SSL settings
/**
* @deprecated Use MetastoreConf.USE_SSL
*/
@Deprecated
HIVE_METASTORE_USE_SSL("hive.metastore.use.SSL", false,
"Set this to true for using SSL encryption in HMS server."),
/**
* @deprecated Use MetastoreConf.SSL_KEYSTORE_PATH
*/
@Deprecated
HIVE_METASTORE_SSL_KEYSTORE_PATH("hive.metastore.keystore.path", "",
"Metastore SSL certificate keystore location."),
/**
* @deprecated Use MetastoreConf.SSL_KEYSTORE_PASSWORD
*/
@Deprecated
HIVE_METASTORE_SSL_KEYSTORE_PASSWORD("hive.metastore.keystore.password", "",
"Metastore SSL certificate keystore password."),
/**
* @deprecated Use MetastoreConf.SSL_TRUSTSTORE_PATH
*/
@Deprecated
HIVE_METASTORE_SSL_TRUSTSTORE_PATH("hive.metastore.truststore.path", "",
"Metastore SSL certificate truststore location."),
/**
* @deprecated Use MetastoreConf.SSL_TRUSTSTORE_PASSWORD
*/
@Deprecated
HIVE_METASTORE_SSL_TRUSTSTORE_PASSWORD("hive.metastore.truststore.password", "",
"Metastore SSL certificate truststore password."),
// Parameters for exporting metadata on table drop (requires the use of the)
// org.apache.hadoop.hive.ql.parse.MetaDataExportListener preevent listener
/**
* @deprecated Use MetastoreConf.METADATA_EXPORT_LOCATION
*/
@Deprecated
METADATA_EXPORT_LOCATION("hive.metadata.export.location", "",
"When used in conjunction with the org.apache.hadoop.hive.ql.parse.MetaDataExportListener pre event listener, \n" +
"it is the location to which the metadata will be exported. The default is an empty string, which results in the \n" +
"metadata being exported to the current user's home directory on HDFS."),
/**
* @deprecated Use MetastoreConf.MOVE_EXPORTED_METADATA_TO_TRASH
*/
@Deprecated
MOVE_EXPORTED_METADATA_TO_TRASH("hive.metadata.move.exported.metadata.to.trash", true,
"When used in conjunction with the org.apache.hadoop.hive.ql.parse.MetaDataExportListener pre event listener, \n" +
"this setting determines if the metadata that is exported will subsequently be moved to the user's trash directory \n" +
"alongside the dropped table data. This ensures that the metadata will be cleaned up along with the dropped table data."),
// CLI
CLIIGNOREERRORS("hive.cli.errors.ignore", false, ""),
CLIPRINTCURRENTDB("hive.cli.print.current.db", false,
"Whether to include the current database in the Hive prompt."),
CLIPROMPT("hive.cli.prompt", "hive",
"Command line prompt configuration value. Other hiveconf can be used in this configuration value. \n" +
"Variable substitution will only be invoked at the Hive CLI startup."),
/**
* @deprecated Use MetastoreConf.FS_HANDLER_CLS
*/
@Deprecated
HIVE_METASTORE_FS_HANDLER_CLS("hive.metastore.fs.handler.class", "org.apache.hadoop.hive.metastore.HiveMetaStoreFsImpl", ""),
// Things we log in the jobconf
// session identifier
HIVESESSIONID("hive.session.id", "", ""),
// whether session is running in silent mode or not
HIVESESSIONSILENT("hive.session.silent", false, ""),
HIVE_LOCAL_TIME_ZONE("hive.local.time.zone", "LOCAL",
"Sets the time-zone for displaying and interpreting time stamps. If this property value is set to\n" +
"LOCAL, it is not specified, or it is not a correct time-zone, the system default time-zone will be\n " +
"used instead. Time-zone IDs can be specified as region-based zone IDs (based on IANA time-zone data),\n" +
"abbreviated zone IDs, or offset IDs."),
HIVE_SESSION_HISTORY_ENABLED("hive.session.history.enabled", false,
"Whether to log Hive query, query plan, runtime statistics etc."),
HIVEQUERYSTRING("hive.query.string", "",
"Query being executed (might be multiple per a session)"),
HIVEQUERYID("hive.query.id", "",
"ID for query being executed (might be multiple per a session)"),
HIVEQUERYTAG("hive.query.tag", null, "Tag for the queries in the session. User can kill the queries with the tag " +
"in another session. Currently there is no tag duplication check, user need to make sure his tag is unique. " +
"Also 'kill query' needs to be issued to all HiveServer2 instances to proper kill the queries"),
HIVESPARKJOBNAMELENGTH("hive.spark.jobname.length", 100000, "max jobname length for Hive on " +
"Spark queries"),
HIVEJOBNAMELENGTH("hive.jobname.length", 50, "max jobname length"),
// hive jar
HIVEJAR("hive.jar.path", "",
"The location of hive_cli.jar that is used when submitting jobs in a separate jvm."),
HIVEAUXJARS("hive.aux.jars.path", "",
"The location of the plugin jars that contain implementations of user defined functions and serdes."),
// reloadable jars
HIVERELOADABLEJARS("hive.reloadable.aux.jars.path", "",
"The locations of the plugin jars, which can be a comma-separated folders or jars. Jars can be renewed\n"
+ "by executing reload command. And these jars can be "
+ "used as the auxiliary classes like creating a UDF or SerDe."),
// hive added files and jars
HIVEADDEDFILES("hive.added.files.path", "", "This an internal parameter."),
HIVEADDEDJARS("hive.added.jars.path", "", "This an internal parameter."),
HIVEADDEDARCHIVES("hive.added.archives.path", "", "This an internal parameter."),
HIVEADDFILESUSEHDFSLOCATION("hive.resource.use.hdfs.location", true, "Reference HDFS based files/jars directly instead of "
+ "copy to session based HDFS scratch directory, to make distributed cache more useful."),
HIVE_CURRENT_DATABASE("hive.current.database", "", "Database name used by current session. Internal usage only.", true),
// for hive script operator
HIVES_AUTO_PROGRESS_TIMEOUT("hive.auto.progress.timeout", "0s",
new TimeValidator(TimeUnit.SECONDS),
"How long to run autoprogressor for the script/UDTF operators.\n" +
"Set to 0 for forever."),
HIVESCRIPTAUTOPROGRESS("hive.script.auto.progress", false,
"Whether Hive Transform/Map/Reduce Clause should automatically send progress information to TaskTracker \n" +
"to avoid the task getting killed because of inactivity. Hive sends progress information when the script is \n" +
"outputting to stderr. This option removes the need of periodically producing stderr messages, \n" +
"but users should be cautious because this may prevent infinite loops in the scripts to be killed by TaskTracker."),
HIVESCRIPTIDENVVAR("hive.script.operator.id.env.var", "HIVE_SCRIPT_OPERATOR_ID",
"Name of the environment variable that holds the unique script operator ID in the user's \n" +
"transform function (the custom mapper/reducer that the user has specified in the query)"),
HIVESCRIPTTRUNCATEENV("hive.script.operator.truncate.env", false,
"Truncate each environment variable for external script in scripts operator to 20KB (to fit system limits)"),
HIVESCRIPT_ENV_BLACKLIST("hive.script.operator.env.blacklist",
"hive.txn.valid.txns,hive.txn.tables.valid.writeids,hive.txn.valid.writeids,hive.script.operator.env.blacklist,hive.repl.current.table.write.id",
"Comma separated list of keys from the configuration file not to convert to environment " +
"variables when invoking the script operator"),
HIVE_STRICT_CHECKS_ORDERBY_NO_LIMIT("hive.strict.checks.orderby.no.limit", false,
"Enabling strict large query checks disallows the following:\n" +
" Orderby without limit.\n" +
"Note that this check currently does not consider data size, only the query pattern."),
HIVE_STRICT_CHECKS_NO_PARTITION_FILTER("hive.strict.checks.no.partition.filter", false,
"Enabling strict large query checks disallows the following:\n" +
" No partition being picked up for a query against partitioned table.\n" +
"Note that this check currently does not consider data size, only the query pattern."),
HIVE_STRICT_CHECKS_TYPE_SAFETY("hive.strict.checks.type.safety", true,
"Enabling strict type safety checks disallows the following:\n" +
" Comparing bigints and strings.\n" +
" Comparing bigints and doubles."),
HIVE_STRICT_CHECKS_CARTESIAN("hive.strict.checks.cartesian.product", false,
"Enabling strict Cartesian join checks disallows the following:\n" +
" Cartesian product (cross join)."),
HIVE_STRICT_CHECKS_BUCKETING("hive.strict.checks.bucketing", true,
"Enabling strict bucketing checks disallows the following:\n" +
" Load into bucketed tables."),
HIVE_LOAD_DATA_OWNER("hive.load.data.owner", "",
"Set the owner of files loaded using load data in managed tables."),
@Deprecated
HIVEMAPREDMODE("hive.mapred.mode", null,
"Deprecated; use hive.strict.checks.* settings instead."),
HIVEALIAS("hive.alias", "", ""),
HIVEMAPSIDEAGGREGATE("hive.map.aggr", true, "Whether to use map-side aggregation in Hive Group By queries"),
HIVEGROUPBYSKEW("hive.groupby.skewindata", false, "Whether there is skew in data to optimize group by queries"),
HIVEJOINEMITINTERVAL("hive.join.emit.interval", 1000,
"How many rows in the right-most join operand Hive should buffer before emitting the join result."),
HIVEJOINCACHESIZE("hive.join.cache.size", 25000,
"How many rows in the joining tables (except the streaming table) should be cached in memory."),
HIVE_PUSH_RESIDUAL_INNER("hive.join.inner.residual", false,
"Whether to push non-equi filter predicates within inner joins. This can improve efficiency in "
+ "the evaluation of certain joins, since we will not be emitting rows which are thrown away by "
+ "a Filter operator straight away. However, currently vectorization does not support them, thus "
+ "enabling it is only recommended when vectorization is disabled."),
HIVE_PTF_RANGECACHE_SIZE("hive.ptf.rangecache.size", 10000,
"Size of the cache used on reducer side, that stores boundaries of ranges within a PTF " +
"partition. Used if a query specifies a RANGE type window including an orderby clause." +
"Set this to 0 to disable this cache."),
// CBO related
HIVE_CBO_ENABLED("hive.cbo.enable", true, "Flag to control enabling Cost Based Optimizations using Calcite framework."),
HIVE_CBO_CNF_NODES_LIMIT("hive.cbo.cnf.maxnodes", -1, "When converting to conjunctive normal form (CNF), fail if" +
"the expression exceeds this threshold; the threshold is expressed in terms of number of nodes (leaves and" +
"interior nodes). -1 to not set up a threshold."),
HIVE_CBO_RETPATH_HIVEOP("hive.cbo.returnpath.hiveop", false, "Flag to control calcite plan to hive operator conversion"),
HIVE_CBO_EXTENDED_COST_MODEL("hive.cbo.costmodel.extended", false, "Flag to control enabling the extended cost model based on"
+ "CPU, IO and cardinality. Otherwise, the cost model is based on cardinality."),
HIVE_CBO_COST_MODEL_CPU("hive.cbo.costmodel.cpu", "0.000001", "Default cost of a comparison"),
HIVE_CBO_COST_MODEL_NET("hive.cbo.costmodel.network", "150.0", "Default cost of a transferring a byte over network;"
+ " expressed as multiple of CPU cost"),
HIVE_CBO_COST_MODEL_LFS_WRITE("hive.cbo.costmodel.local.fs.write", "4.0", "Default cost of writing a byte to local FS;"
+ " expressed as multiple of NETWORK cost"),
HIVE_CBO_COST_MODEL_LFS_READ("hive.cbo.costmodel.local.fs.read", "4.0", "Default cost of reading a byte from local FS;"
+ " expressed as multiple of NETWORK cost"),
HIVE_CBO_COST_MODEL_HDFS_WRITE("hive.cbo.costmodel.hdfs.write", "10.0", "Default cost of writing a byte to HDFS;"
+ " expressed as multiple of Local FS write cost"),
HIVE_CBO_COST_MODEL_HDFS_READ("hive.cbo.costmodel.hdfs.read", "1.5", "Default cost of reading a byte from HDFS;"
+ " expressed as multiple of Local FS read cost"),
HIVE_CBO_SHOW_WARNINGS("hive.cbo.show.warnings", true,
"Toggle display of CBO warnings like missing column stats"),
HIVE_CBO_STATS_CORRELATED_MULTI_KEY_JOINS("hive.cbo.stats.correlated.multi.key.joins", true,
"When CBO estimates output rows for a join involving multiple columns, the default behavior assumes" +
"the columns are independent. Setting this flag to true will cause the estimator to assume" +
"the columns are correlated."),
AGGR_JOIN_TRANSPOSE("hive.transpose.aggr.join", false, "push aggregates through join"),
SEMIJOIN_CONVERSION("hive.optimize.semijoin.conversion", true, "convert group by followed by inner equi join into semijoin"),
HIVE_COLUMN_ALIGNMENT("hive.order.columnalignment", true, "Flag to control whether we want to try to align" +
"columns in operators such as Aggregate or Join so that we try to reduce the number of shuffling stages"),
// materialized views
HIVE_MATERIALIZED_VIEW_ENABLE_AUTO_REWRITING("hive.materializedview.rewriting", true,
"Whether to try to rewrite queries using the materialized views enabled for rewriting"),
HIVE_MATERIALIZED_VIEW_REWRITING_SELECTION_STRATEGY("hive.materializedview.rewriting.strategy", "heuristic",
new StringSet("heuristic", "costbased"),
"The strategy that should be used to cost and select the materialized view rewriting. \n" +
" heuristic: Always try to select the plan using the materialized view if rewriting produced one," +
"choosing the plan with lower cost among possible plans containing a materialized view\n" +
" costbased: Fully cost-based strategy, always use plan with lower cost, independently on whether " +
"it uses a materialized view or not"),
HIVE_MATERIALIZED_VIEW_REWRITING_TIME_WINDOW("hive.materializedview.rewriting.time.window", "0min", new TimeValidator(TimeUnit.MINUTES),
"Time window, specified in seconds, after which outdated materialized views become invalid for automatic query rewriting.\n" +
"For instance, if more time than the value assigned to the property has passed since the materialized view " +
"was created or rebuilt, and one of its source tables has changed since, the materialized view will not be " +
"considered for rewriting. Default value 0 means that the materialized view cannot be " +
"outdated to be used automatically in query rewriting. Value -1 means to skip this check."),
HIVE_MATERIALIZED_VIEW_REWRITING_INCREMENTAL("hive.materializedview.rewriting.incremental", false,
"Whether to try to execute incremental rewritings based on outdated materializations and\n" +
"current content of tables. Default value of true effectively amounts to enabling incremental\n" +
"rebuild for the materializations too."),
HIVE_MATERIALIZED_VIEW_REBUILD_INCREMENTAL("hive.materializedview.rebuild.incremental", true,
"Whether to try to execute incremental rebuild for the materialized views. Incremental rebuild\n" +
"tries to modify the original materialization contents to reflect the latest changes to the\n" +
"materialized view source tables, instead of rebuilding the contents fully. Incremental rebuild\n" +
"is based on the materialized view algebraic incremental rewriting."),
HIVE_MATERIALIZED_VIEW_REBUILD_INCREMENTAL_FACTOR("hive.materializedview.rebuild.incremental.factor", 0.1f,
"The estimated cost of the resulting plan for incremental maintenance of materialization\n" +
"with aggregations will be multiplied by this value. Reducing the value can be useful to\n" +
"favour incremental rebuild over full rebuild."),
HIVE_MATERIALIZED_VIEW_FILE_FORMAT("hive.materializedview.fileformat", "ORC",
new StringSet("none", "TextFile", "SequenceFile", "RCfile", "ORC"),
"Default file format for CREATE MATERIALIZED VIEW statement"),
HIVE_MATERIALIZED_VIEW_SERDE("hive.materializedview.serde",
"org.apache.hadoop.hive.ql.io.orc.OrcSerde", "Default SerDe used for materialized views"),
HIVE_ENABLE_JDBC_PUSHDOWN("hive.jdbc.pushdown.enable", true,
"Flag to control enabling pushdown of operators into JDBC connection and subsequent SQL generation\n" +
"using Calcite"),
HIVE_ENABLE_JDBC_SAFE_PUSHDOWN("hive.jdbc.pushdown.safe.enable", false,
"Flag to control enabling pushdown of operators using Calcite that prevent splitting results\n" +
"retrieval in the JDBC storage handler"),
// hive.mapjoin.bucket.cache.size has been replaced by hive.smbjoin.cache.row,
// need to remove by hive .13. Also, do not change default (see SMB operator)
HIVEMAPJOINBUCKETCACHESIZE("hive.mapjoin.bucket.cache.size", 100, ""),
HIVEMAPJOINUSEOPTIMIZEDTABLE("hive.mapjoin.optimized.hashtable", true,
"Whether Hive should use memory-optimized hash table for MapJoin.\n" +
"Only works on Tez and Spark, because memory-optimized hashtable cannot be serialized."),
HIVEMAPJOINOPTIMIZEDTABLEPROBEPERCENT("hive.mapjoin.optimized.hashtable.probe.percent",
(float) 0.5, "Probing space percentage of the optimized hashtable"),
HIVEUSEHYBRIDGRACEHASHJOIN("hive.mapjoin.hybridgrace.hashtable", true, "Whether to use hybrid" +
"grace hash join as the join method for mapjoin. Tez only."),
HIVEHYBRIDGRACEHASHJOINMEMCHECKFREQ("hive.mapjoin.hybridgrace.memcheckfrequency", 1024, "For " +
"hybrid grace hash join, how often (how many rows apart) we check if memory is full. " +
"This number should be power of 2."),
HIVEHYBRIDGRACEHASHJOINMINWBSIZE("hive.mapjoin.hybridgrace.minwbsize", 524288, "For hybrid grace" +
"Hash join, the minimum write buffer size used by optimized hashtable. Default is 512 KB."),
HIVEHYBRIDGRACEHASHJOINMINNUMPARTITIONS("hive.mapjoin.hybridgrace.minnumpartitions", 16, "For" +
"Hybrid grace hash join, the minimum number of partitions to create."),
HIVEHASHTABLEWBSIZE("hive.mapjoin.optimized.hashtable.wbsize", 8 * 1024 * 1024,
"Optimized hashtable (see hive.mapjoin.optimized.hashtable) uses a chain of buffers to\n" +
"store data. This is one buffer size. HT may be slightly faster if this is larger, but for small\n" +
"joins unnecessary memory will be allocated and then trimmed."),
HIVEHYBRIDGRACEHASHJOINBLOOMFILTER("hive.mapjoin.hybridgrace.bloomfilter", true, "Whether to " +
"use BloomFilter in Hybrid grace hash join to minimize unnecessary spilling."),
HIVEMAPJOINFULLOUTER("hive.mapjoin.full.outer", true,
"Whether to use MapJoin for FULL OUTER JOINs."),
HIVE_TEST_MAPJOINFULLOUTER_OVERRIDE(
"hive.test.mapjoin.full.outer.override",
"none", new StringSet("none", "enable", "disable"),
"internal use only, used to override the hive.mapjoin.full.outer\n" +
"setting. Using enable will force it on and disable will force it off.\n" +
"The default none is do nothing, of course",
true),
HIVESMBJOINCACHEROWS("hive.smbjoin.cache.rows", 10000,
"How many rows with the same key value should be cached in memory per smb joined table."),
HIVEGROUPBYMAPINTERVAL("hive.groupby.mapaggr.checkinterval", 100000,
"Number of rows after which size of the grouping keys/aggregation classes is performed"),
HIVEMAPAGGRHASHMEMORY("hive.map.aggr.hash.percentmemory", (float) 0.5,
"Portion of total memory to be used by map-side group aggregation hash table"),
HIVEMAPJOINFOLLOWEDBYMAPAGGRHASHMEMORY("hive.mapjoin.followby.map.aggr.hash.percentmemory", (float) 0.3,
"Portion of total memory to be used by map-side group aggregation hash table, when this group by is followed by map join"),
HIVEMAPAGGRMEMORYTHRESHOLD("hive.map.aggr.hash.force.flush.memory.threshold", (float) 0.9,
"The max memory to be used by map-side group aggregation hash table.\n" +
"If the memory usage is higher than this number, force to flush data"),
HIVEMAPAGGRHASHMINREDUCTION("hive.map.aggr.hash.min.reduction", (float) 0.99,
"Hash aggregation will be turned off if the ratio between hash table size and input rows is bigger than this number. \n" +
"Set to 1 to make sure hash aggregation is never turned off."),
HIVEMAPAGGRHASHMINREDUCTIONSTATSADJUST("hive.map.aggr.hash.min.reduction.stats", true,
"Whether the value for hive.map.aggr.hash.min.reduction should be set statically using stats estimates. \n" +
"If this is enabled, the default value for hive.map.aggr.hash.min.reduction is only used as an upper-bound\n" +
"for the value set in the map-side group by operators."),
HIVEMULTIGROUPBYSINGLEREDUCER("hive.multigroupby.singlereducer", true,
"Whether to optimize multi group by query to generate single M/R job plan. If the multi group by query has \n" +
"common group by keys, it will be optimized to generate single M/R job."),
HIVE_MAP_GROUPBY_SORT("hive.map.groupby.sorted", true,
"If the bucketing/sorting properties of the table exactly match the grouping key, whether to perform \n" +
"the group by in the mapper by using BucketizedHiveInputFormat. The only downside to this\n" +
"is that it limits the number of mappers to the number of files."),
HIVE_DEFAULT_NULLS_LAST("hive.default.nulls.last", true,
"Whether to set NULLS LAST as the default null ordering"),
HIVE_GROUPBY_POSITION_ALIAS("hive.groupby.position.alias", false,
"Whether to enable using Column Position Alias in Group By"),
HIVE_ORDERBY_POSITION_ALIAS("hive.orderby.position.alias", true,
"Whether to enable using Column Position Alias in Order By"),
@Deprecated
HIVE_GROUPBY_ORDERBY_POSITION_ALIAS("hive.groupby.orderby.position.alias", false,
"Whether to enable using Column Position Alias in Group By or Order By (deprecated).\n" +
"Use " + HIVE_ORDERBY_POSITION_ALIAS.varname + " or " + HIVE_GROUPBY_POSITION_ALIAS.varname + " instead"),
HIVE_NEW_JOB_GROUPING_SET_CARDINALITY("hive.new.job.grouping.set.cardinality", 30,
"Whether a new map-reduce job should be launched for grouping sets/rollups/cubes.\n" +
"For a query like: select a, b, c, count(1) from T group by a, b, c with rollup;\n" +
"4 rows are created per row: (a, b, c), (a, b, null), (a, null, null), (null, null, null).\n" +
"This can lead to explosion across map-reduce boundary if the cardinality of T is very high,\n" +
"and map-side aggregation does not do a very good job. \n" +
"\n" +
"This parameter decides if Hive should add an additional map-reduce job. If the grouping set\n" +
"cardinality (4 in the example above), is more than this value, a new MR job is added under the\n" +
"assumption that the original group by will reduce the data size."),
HIVE_GROUPBY_LIMIT_EXTRASTEP("hive.groupby.limit.extrastep", true, "This parameter decides if Hive should \n" +
"create new MR job for sorting final output"),
// Max file num and size used to do a single copy (after that, distcp is used)
HIVE_EXEC_COPYFILE_MAXNUMFILES("hive.exec.copyfile.maxnumfiles", 1L,
"Maximum number of files Hive uses to do sequential HDFS copies between directories." +
"Distributed copies (distcp) will be used instead for larger numbers of files so that copies can be done faster."),
HIVE_EXEC_COPYFILE_MAXSIZE("hive.exec.copyfile.maxsize", 32L * 1024 * 1024 /*32M*/,
"Maximum file size (in bytes) that Hive uses to do single HDFS copies between directories." +
"Distributed copies (distcp) will be used instead for bigger files so that copies can be done faster."),
// for hive udtf operator
HIVEUDTFAUTOPROGRESS("hive.udtf.auto.progress", false,
"Whether Hive should automatically send progress information to TaskTracker \n" +
"when using UDTF's to prevent the task getting killed because of inactivity. Users should be cautious \n" +
"because this may prevent TaskTracker from killing tasks with infinite loops."),
HIVEDEFAULTFILEFORMAT("hive.default.fileformat", "TextFile", new StringSet("TextFile", "SequenceFile", "RCfile", "ORC", "parquet"),
"Default file format for CREATE TABLE statement. Users can explicitly override it by CREATE TABLE ... STORED AS [FORMAT]"),
HIVEDEFAULTMANAGEDFILEFORMAT("hive.default.fileformat.managed", "none",
new StringSet("none", "TextFile", "SequenceFile", "RCfile", "ORC", "parquet"),
"Default file format for CREATE TABLE statement applied to managed tables only. External tables will be \n" +
"created with format specified by hive.default.fileformat. Leaving this null will result in using hive.default.fileformat \n" +
"for all tables."),
HIVEQUERYRESULTFILEFORMAT("hive.query.result.fileformat", "SequenceFile", new StringSet("TextFile", "SequenceFile", "RCfile", "Llap"),
"Default file format for storing result of the query."),
HIVECHECKFILEFORMAT("hive.fileformat.check", true, "Whether to check file format or not when loading data files"),
// default serde for rcfile
HIVEDEFAULTRCFILESERDE("hive.default.rcfile.serde",
"org.apache.hadoop.hive.serde2.columnar.LazyBinaryColumnarSerDe",
"The default SerDe Hive will use for the RCFile format"),
HIVEDEFAULTSERDE("hive.default.serde",
"org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe",
"The default SerDe Hive will use for storage formats that do not specify a SerDe."),
/**
* @deprecated Use MetastoreConf.SERDES_USING_METASTORE_FOR_SCHEMA
*/
@Deprecated
SERDESUSINGMETASTOREFORSCHEMA("hive.serdes.using.metastore.for.schema",
"org.apache.hadoop.hive.ql.io.orc.OrcSerde," +
"org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe," +
"org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe," +
"org.apache.hadoop.hive.serde2.dynamic_type.DynamicSerDe," +
"org.apache.hadoop.hive.serde2.MetadataTypedColumnsetSerDe," +
"org.apache.hadoop.hive.serde2.columnar.LazyBinaryColumnarSerDe," +
"org.apache.hadoop.hive.ql.io.parquet.serde.ParquetHiveSerDe," +
"org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe",
"SerDes retrieving schema from metastore. This is an internal parameter."),
@Deprecated
HIVE_LEGACY_SCHEMA_FOR_ALL_SERDES("hive.legacy.schema.for.all.serdes",
false,
"A backward compatibility setting for external metastore users that do not handle \n" +
SERDESUSINGMETASTOREFORSCHEMA.varname + " correctly. This may be removed at any time."),
HIVEHISTORYFILELOC("hive.querylog.location",
"${system:java.io.tmpdir}" + File.separator + "${system:user.name}",
"Location of Hive run time structured log file"),
HIVE_LOG_INCREMENTAL_PLAN_PROGRESS("hive.querylog.enable.plan.progress", true,
"Whether to log the plan's progress every time a job's progress is checked.\n" +
"These logs are written to the location specified by hive.querylog.location"),
HIVE_LOG_INCREMENTAL_PLAN_PROGRESS_INTERVAL("hive.querylog.plan.progress.interval", "60000ms",
new TimeValidator(TimeUnit.MILLISECONDS),
"The interval to wait between logging the plan's progress.\n" +
"If there is a whole number percentage change in the progress of the mappers or the reducers,\n" +
"the progress is logged regardless of this value.\n" +
"The actual interval will be the ceiling of (this value divided by the value of\n" +
"hive.exec.counters.pull.interval) multiplied by the value of hive.exec.counters.pull.interval\n" +
"I.e. if it is not divide evenly by the value of hive.exec.counters.pull.interval it will be\n" +
"logged less frequently than specified.\n" +
"This only has an effect if hive.querylog.enable.plan.progress is set to true."),
HIVESCRIPTSERDE("hive.script.serde", "org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe",
"The default SerDe for transmitting input data to and reading output data from the user scripts. "),
HIVESCRIPTRECORDREADER("hive.script.recordreader",
"org.apache.hadoop.hive.ql.exec.TextRecordReader",
"The default record reader for reading data from the user scripts. "),
HIVESCRIPTRECORDWRITER("hive.script.recordwriter",
"org.apache.hadoop.hive.ql.exec.TextRecordWriter",
"The default record writer for writing data to the user scripts. "),
HIVESCRIPTESCAPE("hive.transform.escape.input", false,
"This adds an option to escape special chars (newlines, carriage returns and\n" +
"tabs) when they are passed to the user script. This is useful if the Hive tables\n" +
"can contain data that contains special characters."),
HIVEBINARYRECORDMAX("hive.binary.record.max.length", 1000,
"Read from a binary stream and treat each hive.binary.record.max.length bytes as a record. \n" +
"The last record before the end of stream can have less than hive.binary.record.max.length bytes"),
HIVEHADOOPMAXMEM("hive.mapred.local.mem", 0, "mapper/reducer memory in local mode"),
//small table file size
HIVESMALLTABLESFILESIZE("hive.mapjoin.smalltable.filesize", 25000000L,
"The threshold for the input file size of the small tables; if the file size is smaller \n" +
"than this threshold, it will try to convert the common join into map join"),
HIVE_SCHEMA_EVOLUTION("hive.exec.schema.evolution", true,
"Use schema evolution to convert self-describing file format's data to the schema desired by the reader."),
HIVE_ORC_FORCE_POSITIONAL_SCHEMA_EVOLUTION("orc.force.positional.evolution", true,
"Whether to use column position based schema evolution or not (as opposed to column name based evolution)"),
/** Don't use this directly - use AcidUtils! */
HIVE_TRANSACTIONAL_TABLE_SCAN("hive.transactional.table.scan", false,
"internal usage only -- do transaction (ACID or insert-only) table scan.", true),
HIVE_TRANSACTIONAL_NUM_EVENTS_IN_MEMORY("hive.transactional.events.mem", 10000000,
"Vectorized ACID readers can often load all the delete events from all the delete deltas\n"
+ "into memory to optimize for performance. To prevent out-of-memory errors, this is a rough heuristic\n"
+ "that limits the total number of delete events that can be loaded into memory at once.\n"
+ "Roughly it has been set to 10 million delete events per bucket (~160 MB).\n"),
FILTER_DELETE_EVENTS("hive.txn.filter.delete.events", true,
"If true, VectorizedOrcAcidRowBatchReader will compute min/max " +
"ROW__ID for the split and only load delete events in that range.\n"
),
HIVESAMPLERANDOMNUM("hive.sample.seednumber", 0,
"A number used to percentage sampling. By changing this number, user will change the subsets of data sampled."),
// test mode in hive mode
HIVETESTMODE("hive.test.mode", false,
"Whether Hive is running in test mode. If yes, it turns on sampling and prefixes the output tablename.",
false),
HIVEEXIMTESTMODE("hive.exim.test.mode", false,
"The subset of test mode that only enables custom path handling for ExIm.", false),
HIVETESTMODEPREFIX("hive.test.mode.prefix", "test_",
"In test mode, specifies prefixes for the output table", false),
HIVETESTMODESAMPLEFREQ("hive.test.mode.samplefreq", 32,
"In test mode, specifies sampling frequency for table, which is not bucketed,\n" +
"For example, the following query:\n" +
" INSERT OVERWRITE TABLE dest SELECT col1 from src\n" +
"would be converted to\n" +
" INSERT OVERWRITE TABLE test_dest\n" +
" SELECT col1 from src TABLESAMPLE (BUCKET 1 out of 32 on rand(1))", false),
HIVETESTMODENOSAMPLE("hive.test.mode.nosamplelist", "",
"In test mode, specifies comma separated table names which would not apply sampling", false),
HIVETESTMODEDUMMYSTATAGGR("hive.test.dummystats.aggregator", "", "internal variable for test", false),
HIVETESTMODEDUMMYSTATPUB("hive.test.dummystats.publisher", "", "internal variable for test", false),
HIVETESTCURRENTTIMESTAMP("hive.test.currenttimestamp", null, "current timestamp for test", false),
HIVETESTMODEROLLBACKTXN("hive.test.rollbacktxn", false, "For testing only. Will mark every ACID transaction aborted", false),
HIVETESTMODEFAILCOMPACTION("hive.test.fail.compaction", false, "For testing only. Will cause CompactorMR to fail.", false),
HIVETESTMODEFAILHEARTBEATER("hive.test.fail.heartbeater", false, "For testing only. Will cause Heartbeater to fail.", false),
TESTMODE_BUCKET_CODEC_VERSION("hive.test.bucketcodec.version", 1,
"For testing only. Will make ACID subsystem write RecordIdentifier.bucketId in specified\n" +
"format", false),
HIVETESTMODEACIDKEYIDXSKIP("hive.test.acid.key.index.skip", false, "For testing only. OrcRecordUpdater will skip "
+ "generation of the hive.acid.key.index", false),
HIVEMERGEMAPFILES("hive.merge.mapfiles", true,
"Merge small files at the end of a map-only job"),
HIVEMERGEMAPREDFILES("hive.merge.mapredfiles", false,
"Merge small files at the end of a map-reduce job"),
HIVEMERGETEZFILES("hive.merge.tezfiles", false, "Merge small files at the end of a Tez DAG"),
HIVEMERGESPARKFILES("hive.merge.sparkfiles", false, "Merge small files at the end of a Spark DAG Transformation"),
HIVEMERGEMAPFILESSIZE("hive.merge.size.per.task", (long) (256 * 1000 * 1000),
"Size of merged files at the end of the job"),
HIVEMERGEMAPFILESAVGSIZE("hive.merge.smallfiles.avgsize", (long) (16 * 1000 * 1000),
"When the average output file size of a job is less than this number, Hive will start an additional \n" +
"map-reduce job to merge the output files into bigger files. This is only done for map-only jobs \n" +
"if hive.merge.mapfiles is true, and for map-reduce jobs if hive.merge.mapredfiles is true."),
HIVEMERGERCFILEBLOCKLEVEL("hive.merge.rcfile.block.level", true, ""),
HIVEMERGEORCFILESTRIPELEVEL("hive.merge.orcfile.stripe.level", true,
"When hive.merge.mapfiles, hive.merge.mapredfiles or hive.merge.tezfiles is enabled\n" +
"while writing a table with ORC file format, enabling this config will do stripe-level\n" +
"fast merge for small ORC files. Note that enabling this config will not honor the\n" +
"padding tolerance config (hive.exec.orc.block.padding.tolerance)."),
HIVE_ORC_CODEC_POOL("hive.use.orc.codec.pool", false,
"Whether to use codec pool in ORC. Disable if there are bugs with codec reuse."),
HIVEUSEEXPLICITRCFILEHEADER("hive.exec.rcfile.use.explicit.header", true,
"If this is set the header for RCFiles will simply be RCF. If this is not\n" +
"set the header will be that borrowed from sequence files, e.g. SEQ- followed\n" +
"by the input and output RCFile formats."),
HIVEUSERCFILESYNCCACHE("hive.exec.rcfile.use.sync.cache", true, ""),
HIVE_RCFILE_RECORD_INTERVAL("hive.io.rcfile.record.interval", Integer.MAX_VALUE, ""),
HIVE_RCFILE_COLUMN_NUMBER_CONF("hive.io.rcfile.column.number.conf", 0, ""),
HIVE_RCFILE_TOLERATE_CORRUPTIONS("hive.io.rcfile.tolerate.corruptions", false, ""),
HIVE_RCFILE_RECORD_BUFFER_SIZE("hive.io.rcfile.record.buffer.size", 4194304, ""), // 4M
PARQUET_MEMORY_POOL_RATIO("parquet.memory.pool.ratio", 0.5f,
"Maximum fraction of heap that can be used by Parquet file writers in one task.\n" +
"It is for avoiding OutOfMemory error in tasks. Work with Parquet 1.6.0 and above.\n" +
"This config parameter is defined in Parquet, so that it does not start with 'hive.'."),
HIVE_PARQUET_TIMESTAMP_SKIP_CONVERSION("hive.parquet.timestamp.skip.conversion", false,
"Current Hive implementation of parquet stores timestamps to UTC, this flag allows skipping of the conversion" +
"on reading parquet files from other tools"),
HIVE_AVRO_TIMESTAMP_SKIP_CONVERSION("hive.avro.timestamp.skip.conversion", false,
"Some older Hive implementations (pre-3.1) wrote Avro timestamps in a UTC-normalized" +
"manner, while from version 3.1 until now Hive wrote time zone agnostic timestamps. " +
"Setting this flag to true will treat legacy timestamps as time zone agnostic. Setting " +
"it to false will treat legacy timestamps as UTC-normalized. This flag will not affect " +
"timestamps written after this change."),
HIVE_INT_TIMESTAMP_CONVERSION_IN_SECONDS("hive.int.timestamp.conversion.in.seconds", false,
"Boolean/tinyint/smallint/int/bigint value is interpreted as milliseconds during the timestamp conversion.\n" +
"Set this flag to true to interpret the value as seconds to be consistent with float/double." ),
HIVE_ORC_BASE_DELTA_RATIO("hive.exec.orc.base.delta.ratio", 8, "The ratio of base writer and\n" +
"delta writer in terms of STRIPE_SIZE and BUFFER_SIZE."),
HIVE_ORC_DELTA_STREAMING_OPTIMIZATIONS_ENABLED("hive.exec.orc.delta.streaming.optimizations.enabled", false,
"Whether to enable streaming optimizations for ORC delta files. This will disable ORC's internal indexes,\n" +
"disable compression, enable fast encoding and disable dictionary encoding."),
HIVE_ORC_SPLIT_STRATEGY("hive.exec.orc.split.strategy", "HYBRID", new StringSet("HYBRID", "BI", "ETL"),
"This is not a user level config. BI strategy is used when the requirement is to spend less time in split generation" +
" as opposed to query execution (split generation does not read or cache file footers)." +
" ETL strategy is used when spending little more time in split generation is acceptable" +
" (split generation reads and caches file footers). HYBRID chooses between the above strategies" +
" based on heuristics."),
HIVE_ORC_BLOB_STORAGE_SPLIT_SIZE("hive.exec.orc.blob.storage.split.size", 128L * 1024 * 1024,
"When blob storage is used, BI split strategy does not have block locations for splitting orc files.\n" +
"In such cases, split generation will use this config to split orc file"),
HIVE_ORC_WRITER_LLAP_MEMORY_MANAGER_ENABLED("hive.exec.orc.writer.llap.memory.manager.enabled", true,
"Whether orc writers should use llap-aware memory manager. LLAP aware memory manager will use memory\n" +
"per executor instead of entire heap memory when concurrent orc writers are involved. This will let\n" +
"task fragments to use memory within its limit (memory per executor) when performing ETL in LLAP."),
// hive streaming ingest settings
HIVE_STREAMING_AUTO_FLUSH_ENABLED("hive.streaming.auto.flush.enabled", true, "Whether to enable memory \n" +
"monitoring and automatic flushing of open record updaters during streaming ingest. This is an expert level \n" +
"setting and disabling this may have severe performance impact under memory pressure."),
HIVE_HEAP_MEMORY_MONITOR_USAGE_THRESHOLD("hive.heap.memory.monitor.usage.threshold", 0.7f,
"Hive streaming does automatic memory management across all open record writers. This threshold will let the \n" +
"memory monitor take an action (flush open files) when heap memory usage exceeded this threshold."),
HIVE_STREAMING_AUTO_FLUSH_CHECK_INTERVAL_SIZE("hive.streaming.auto.flush.check.interval.size", "100Mb",
new SizeValidator(),
"Hive streaming ingest has auto flush mechanism to flush all open record updaters under memory pressure.\n" +
"When memory usage exceed hive.heap.memory.monitor.default.usage.threshold, the auto-flush mechanism will \n" +
"wait until this size (default 100Mb) of records are ingested before triggering flush."),
HIVE_CLASSLOADER_SHADE_PREFIX("hive.classloader.shade.prefix", "", "During reflective instantiation of a class\n" +
"(input, output formats, serde etc.), when classloader throws ClassNotFoundException, as a fallback this\n" +
"shade prefix will be used before class reference and retried."),
HIVE_ORC_MS_FOOTER_CACHE_ENABLED("hive.orc.splits.ms.footer.cache.enabled", false,
"Whether to enable using file metadata cache in metastore for ORC file footers."),
HIVE_ORC_MS_FOOTER_CACHE_PPD("hive.orc.splits.ms.footer.cache.ppd.enabled", true,
"Whether to enable file footer cache PPD (hive.orc.splits.ms.footer.cache.enabled\n" +
"must also be set to true for this to work)."),
HIVE_ORC_INCLUDE_FILE_FOOTER_IN_SPLITS("hive.orc.splits.include.file.footer", false,
"If turned on splits generated by orc will include metadata about the stripes in the file. This\n" +
"data is read remotely (from the client or HS2 machine) and sent to all the tasks."),
HIVE_ORC_SPLIT_DIRECTORY_BATCH_MS("hive.orc.splits.directory.batch.ms", 0,
"How long, in ms, to wait to batch input directories for processing during ORC split\n" +
"generation. 0 means process directories individually. This can increase the number of\n" +
"metastore calls if metastore metadata cache is used."),
HIVE_ORC_INCLUDE_FILE_ID_IN_SPLITS("hive.orc.splits.include.fileid", true,
"Include file ID in splits on file systems that support it."),
HIVE_ORC_ALLOW_SYNTHETIC_FILE_ID_IN_SPLITS("hive.orc.splits.allow.synthetic.fileid", true,
"Allow synthetic file ID in splits on file systems that don't have a native one."),
HIVE_ORC_CACHE_STRIPE_DETAILS_MEMORY_SIZE("hive.orc.cache.stripe.details.mem.size", "256Mb",
new SizeValidator(), "Maximum size of orc splits cached in the client."),
HIVE_ORC_COMPUTE_SPLITS_NUM_THREADS("hive.orc.compute.splits.num.threads", 10,
"How many threads orc should use to create splits in parallel."),
HIVE_ORC_CACHE_USE_SOFT_REFERENCES("hive.orc.cache.use.soft.references", false,
"By default, the cache that ORC input format uses to store orc file footer use hard\n" +
"references for the cached object. Setting this to true can help avoid out of memory\n" +
"issues under memory pressure (in some cases) at the cost of slight unpredictability in\n" +
"overall query performance."),
HIVE_IO_SARG_CACHE_MAX_WEIGHT_MB("hive.io.sarg.cache.max.weight.mb", 10,
"The max weight allowed for the SearchArgument Cache. By default, the cache allows a max-weight of 10MB, " +
"after which entries will be evicted."),
HIVE_LAZYSIMPLE_EXTENDED_BOOLEAN_LITERAL("hive.lazysimple.extended_boolean_literal", false,
"LazySimpleSerde uses this property to determine if it treats 'T', 't', 'F', 'f',\n" +
"'1', and '0' as extended, legal boolean literal, in addition to 'TRUE' and 'FALSE'.\n" +
"The default is false, which means only 'TRUE' and 'FALSE' are treated as legal\n" +
"boolean literal."),
HIVESKEWJOIN("hive.optimize.skewjoin", false,
"Whether to enable skew join optimization. \n" +
"The algorithm is as follows: At runtime, detect the keys with a large skew. Instead of\n" +
"processing those keys, store them temporarily in an HDFS directory. In a follow-up map-reduce\n" +
"job, process those skewed keys. The same key need not be skewed for all the tables, and so,\n" +
"the follow-up map-reduce job (for the skewed keys) would be much faster, since it would be a\n" +
"map-join."),
HIVEDYNAMICPARTITIONHASHJOIN("hive.optimize.dynamic.partition.hashjoin", false,
"Whether to enable dynamically partitioned hash join optimization. \n" +
"This setting is also dependent on enabling hive.auto.convert.join"),
HIVECONVERTJOIN("hive.auto.convert.join", true,
"Whether Hive enables the optimization about converting common join into mapjoin based on the input file size"),
HIVECONVERTJOINNOCONDITIONALTASK("hive.auto.convert.join.noconditionaltask", true,
"Whether Hive enables the optimization about converting common join into mapjoin based on the input file size. \n" +
"If this parameter is on, and the sum of size for n-1 of the tables/partitions for a n-way join is smaller than the\n" +
"specified size, the join is directly converted to a mapjoin (there is no conditional task)."),
HIVECONVERTJOINNOCONDITIONALTASKTHRESHOLD("hive.auto.convert.join.noconditionaltask.size",
10000000L,
"If hive.auto.convert.join.noconditionaltask is off, this parameter does not take affect. \n" +
"However, if it is on, and the sum of size for n-1 of the tables/partitions for a n-way join is smaller than this size, \n" +
"the join is directly converted to a mapjoin(there is no conditional task). The default is 10MB"),
HIVECONVERTJOINUSENONSTAGED("hive.auto.convert.join.use.nonstaged", false,
"For conditional joins, if input stream from a small alias can be directly applied to join operator without \n" +
"filtering or projection, the alias need not to be pre-staged in distributed cache via mapred local task.\n" +
"Currently, this is not working with vectorization or tez execution engine."),
HIVESKEWJOINKEY("hive.skewjoin.key", 100000,
"Determine if we get a skew key in join. If we see more than the specified number of rows with the same key in join operator,\n" +
"we think the key as a skew join key. "),
HIVESKEWJOINMAPJOINNUMMAPTASK("hive.skewjoin.mapjoin.map.tasks", 10000,
"Determine the number of map task used in the follow up map join job for a skew join.\n" +
"It should be used together with hive.skewjoin.mapjoin.min.split to perform a fine grained control."),
HIVESKEWJOINMAPJOINMINSPLIT("hive.skewjoin.mapjoin.min.split", 33554432L,
"Determine the number of map task at most used in the follow up map join job for a skew join by specifying \n" +
"the minimum split size. It should be used together with hive.skewjoin.mapjoin.map.tasks to perform a fine grained control."),
HIVESENDHEARTBEAT("hive.heartbeat.interval", 1000,
"Send a heartbeat after this interval - used by mapjoin and filter operators"),
HIVELIMITMAXROWSIZE("hive.limit.row.max.size", 100000L,
"When trying a smaller subset of data for simple LIMIT, how much size we need to guarantee each row to have at least."),
HIVELIMITOPTLIMITFILE("hive.limit.optimize.limit.file", 10,
"When trying a smaller subset of data for simple LIMIT, maximum number of files we can sample."),
HIVELIMITOPTENABLE("hive.limit.optimize.enable", false,
"Whether to enable to optimization to trying a smaller subset of data for simple LIMIT first."),
HIVELIMITOPTMAXFETCH("hive.limit.optimize.fetch.max", 50000,
"Maximum number of rows allowed for a smaller subset of data for simple LIMIT, if it is a fetch query. \n" +
"Insert queries are not restricted by this limit."),
HIVELIMITPUSHDOWNMEMORYUSAGE("hive.limit.pushdown.memory.usage", 0.1f, new RatioValidator(),
"The fraction of available memory to be used for buffering rows in Reducesink operator for limit pushdown optimization."),
HIVECONVERTJOINMAXENTRIESHASHTABLE("hive.auto.convert.join.hashtable.max.entries", 21000000L,
"If hive.auto.convert.join.noconditionaltask is off, this parameter does not take affect. \n" +
"However, if it is on, and the predicted number of entries in hashtable for a given join \n" +
"input is larger than this number, the join will not be converted to a mapjoin. \n" +
"The value \"-1\" means no limit."),
XPRODSMALLTABLEROWSTHRESHOLD("hive.xprod.mapjoin.small.table.rows", 1,"Maximum number of rows on build side"
+ " of map join before it switches over to cross product edge"),
HIVECONVERTJOINMAXSHUFFLESIZE("hive.auto.convert.join.shuffle.max.size", 10000000000L,
"If hive.auto.convert.join.noconditionaltask is off, this parameter does not take affect. \n" +
"However, if it is on, and the predicted size of the larger input for a given join is greater \n" +
"than this number, the join will not be converted to a dynamically partitioned hash join. \n" +
"The value \"-1\" means no limit."),
HIVEHASHTABLEKEYCOUNTADJUSTMENT("hive.hashtable.key.count.adjustment", 0.99f,
"Adjustment to mapjoin hashtable size derived from table and column statistics; the estimate" +
" of the number of keys is divided by this value. If the value is 0, statistics are not used" +
"and hive.hashtable.initialCapacity is used instead."),
HIVEHASHTABLETHRESHOLD("hive.hashtable.initialCapacity", 100000, "Initial capacity of " +
"mapjoin hashtable if statistics are absent, or if hive.hashtable.key.count.adjustment is set to 0"),
HIVEHASHTABLELOADFACTOR("hive.hashtable.loadfactor", (float) 0.75, ""),
HIVEHASHTABLEFOLLOWBYGBYMAXMEMORYUSAGE("hive.mapjoin.followby.gby.localtask.max.memory.usage", (float) 0.55,
"This number means how much memory the local task can take to hold the key/value into an in-memory hash table \n" +
"when this map join is followed by a group by. If the local task's memory usage is more than this number, \n" +
"the local task will abort by itself. It means the data of the small table is too large " +
"to be held in memory. Does not apply to Hive-on-Spark (replaced by " +
"hive.mapjoin.max.gc.time.percentage)"),
HIVEHASHTABLEMAXMEMORYUSAGE("hive.mapjoin.localtask.max.memory.usage", (float) 0.90,
"This number means how much memory the local task can take to hold the key/value into an in-memory hash table. \n" +
"If the local task's memory usage is more than this number, the local task will abort by itself. \n" +
"It means the data of the small table is too large to be held in memory. Does not apply to " +
"Hive-on-Spark (replaced by hive.mapjoin.max.gc.time.percentage)"),
HIVEHASHTABLESCALE("hive.mapjoin.check.memory.rows", (long)100000,
"The number means after how many rows processed it needs to check the memory usage"),
HIVEHASHTABLEMAXGCTIMEPERCENTAGE("hive.mapjoin.max.gc.time.percentage", (float) 0.60,
new RangeValidator(0.0f, 1.0f), "This number means how much time (what percentage, " +
"0..1, of wallclock time) the JVM is allowed to spend in garbage collection when running " +
"the local task. If GC time percentage exceeds this number, the local task will abort by " +
"itself. Applies to Hive-on-Spark only"),
HIVEDEBUGLOCALTASK("hive.debug.localtask",false, ""),
HIVEINPUTFORMAT("hive.input.format", "org.apache.hadoop.hive.ql.io.CombineHiveInputFormat",
"The default input format. Set this to HiveInputFormat if you encounter problems with CombineHiveInputFormat."),
HIVETEZINPUTFORMAT("hive.tez.input.format", "org.apache.hadoop.hive.ql.io.HiveInputFormat",
"The default input format for tez. Tez groups splits in the AM."),
HIVETEZCONTAINERSIZE("hive.tez.container.size", -1,
"By default Tez will spawn containers of the size of a mapper. This can be used to overwrite."),
HIVETEZCPUVCORES("hive.tez.cpu.vcores", -1,
"By default Tez will ask for however many cpus map-reduce is configured to use per container.\n" +
"This can be used to overwrite."),
HIVETEZJAVAOPTS("hive.tez.java.opts", null,
"By default Tez will use the Java options from map tasks. This can be used to overwrite."),
HIVETEZLOGLEVEL("hive.tez.log.level", "INFO",
"The log level to use for tasks executing as part of the DAG.\n" +
"Used only if hive.tez.java.opts is used to configure Java options."),
HIVETEZHS2USERACCESS("hive.tez.hs2.user.access", true,
"Whether to grant access to the hs2/hive user for queries"),
HIVEQUERYNAME ("hive.query.name", null,
"This named is used by Tez to set the dag name. This name in turn will appear on \n" +
"the Tez UI representing the work that was done. Used by Spark to set the query name, will show up in the\n" +
"Spark UI."),
SYSLOG_INPUT_FORMAT_FILE_PRUNING("hive.syslog.input.format.file.pruning", true,
"Whether syslog input format should prune files based on timestamp (ts) column in sys.logs table."),
SYSLOG_INPUT_FORMAT_FILE_TIME_SLICE("hive.syslog.input.format.file.time.slice", "300s",
new TimeValidator(TimeUnit.SECONDS, 0L, false, Long.MAX_VALUE, false),
"Files stored in sys.logs typically are chunked with time interval. For example: depending on the\n" +
"logging library used this represents the flush interval/time slice. \n" +
"If time slice/flust interval is set to 5 minutes, then the expectation is that the filename \n" +
"2019-01-02-10-00_0.log represent time range from 10:00 to 10:05.\n" +
"This time slice should align with the flush interval of the logging library else file pruning may\n" +
"incorrectly prune files leading to incorrect results from sys.logs table."),
HIVEOPTIMIZEBUCKETINGSORTING("hive.optimize.bucketingsorting", true,
"Don't create a reducer for enforcing \n" +
"bucketing/sorting for queries of the form: \n" +
"insert overwrite table T2 select * from T1;\n" +
"where T1 and T2 are bucketed/sorted by the same keys into the same number of buckets."),
HIVEPARTITIONER("hive.mapred.partitioner", "org.apache.hadoop.hive.ql.io.DefaultHivePartitioner", ""),
HIVEENFORCESORTMERGEBUCKETMAPJOIN("hive.enforce.sortmergebucketmapjoin", false,
"If the user asked for sort-merge bucketed map-side join, and it cannot be performed, should the query fail or not ?"),
HIVEENFORCEBUCKETMAPJOIN("hive.enforce.bucketmapjoin", false,
"If the user asked for bucketed map-side join, and it cannot be performed, \n" +
"should the query fail or not ? For example, if the buckets in the tables being joined are\n" +
"not a multiple of each other, bucketed map-side join cannot be performed, and the\n" +
"query will fail if hive.enforce.bucketmapjoin is set to true."),
HIVE_ENFORCE_NOT_NULL_CONSTRAINT("hive.constraint.notnull.enforce", true,
"Should \"IS NOT NULL \" constraint be enforced?"),
HIVE_AUTO_SORTMERGE_JOIN("hive.auto.convert.sortmerge.join", true,
"Will the join be automatically converted to a sort-merge join, if the joined tables pass the criteria for sort-merge join."),
HIVE_AUTO_SORTMERGE_JOIN_REDUCE("hive.auto.convert.sortmerge.join.reduce.side", true,
"Whether hive.auto.convert.sortmerge.join (if enabled) should be applied to reduce side."),
HIVE_AUTO_SORTMERGE_JOIN_BIGTABLE_SELECTOR(
"hive.auto.convert.sortmerge.join.bigtable.selection.policy",
"org.apache.hadoop.hive.ql.optimizer.AvgPartitionSizeBasedBigTableSelectorForAutoSMJ",
"The policy to choose the big table for automatic conversion to sort-merge join. \n" +
"By default, the table with the largest partitions is assigned the big table. All policies are:\n" +
". based on position of the table - the leftmost table is selected\n" +
"org.apache.hadoop.hive.ql.optimizer.LeftmostBigTableSMJ.\n" +
". based on total size (all the partitions selected in the query) of the table \n" +
"org.apache.hadoop.hive.ql.optimizer.TableSizeBasedBigTableSelectorForAutoSMJ.\n" +
". based on average size (all the partitions selected in the query) of the table \n" +
"org.apache.hadoop.hive.ql.optimizer.AvgPartitionSizeBasedBigTableSelectorForAutoSMJ.\n" +
"New policies can be added in future."),
HIVE_AUTO_SORTMERGE_JOIN_TOMAPJOIN(
"hive.auto.convert.sortmerge.join.to.mapjoin", false,
"If hive.auto.convert.sortmerge.join is set to true, and a join was converted to a sort-merge join, \n" +
"this parameter decides whether each table should be tried as a big table, and effectively a map-join should be\n" +
"tried. That would create a conditional task with n+1 children for a n-way join (1 child for each table as the\n" +
"big table), and the backup task will be the sort-merge join. In some cases, a map-join would be faster than a\n" +
"sort-merge join, if there is no advantage of having the output bucketed and sorted. For example, if a very big sorted\n" +
"and bucketed table with few files (say 10 files) are being joined with a very small sorter and bucketed table\n" +
"with few files (10 files), the sort-merge join will only use 10 mappers, and a simple map-only join might be faster\n" +
"if the complete small table can fit in memory, and a map-join can be performed."),
HIVESCRIPTOPERATORTRUST("hive.exec.script.trust", false, ""),
HIVEROWOFFSET("hive.exec.rowoffset", false,
"Whether to provide the row offset virtual column"),
// Optimizer
HIVEOPTINDEXFILTER("hive.optimize.index.filter", true, "Whether to enable automatic use of indexes"),
HIVEOPTPPD("hive.optimize.ppd", true,
"Whether to enable predicate pushdown"),
HIVEOPTPPD_WINDOWING("hive.optimize.ppd.windowing", true,
"Whether to enable predicate pushdown through windowing"),
HIVEPPDRECOGNIZETRANSITIVITY("hive.ppd.recognizetransivity", true,
"Whether to transitively replicate predicate filters over equijoin conditions."),
HIVEPPDREMOVEDUPLICATEFILTERS("hive.ppd.remove.duplicatefilters", true,
"During query optimization, filters may be pushed down in the operator tree. \n" +
"If this config is true only pushed down filters remain in the operator tree, \n" +
"and the original filter is removed. If this config is false, the original filter \n" +
"is also left in the operator tree at the original place."),
HIVEPOINTLOOKUPOPTIMIZER("hive.optimize.point.lookup", true,
"Whether to transform OR clauses in Filter operators into IN clauses"),
HIVEPOINTLOOKUPOPTIMIZERMIN("hive.optimize.point.lookup.min", 2,
"Minimum number of OR clauses needed to transform into IN clauses"),
HIVECOUNTDISTINCTOPTIMIZER("hive.optimize.countdistinct", true,
"Whether to transform count distinct into two stages"),
HIVEPARTITIONCOLUMNSEPARATOR("hive.optimize.partition.columns.separate", true,
"Extract partition columns from IN clauses"),
// Constant propagation optimizer
HIVEOPTCONSTANTPROPAGATION("hive.optimize.constant.propagation", true, "Whether to enable constant propagation optimizer"),
HIVEIDENTITYPROJECTREMOVER("hive.optimize.remove.identity.project", true, "Removes identity project from operator tree"),
HIVEMETADATAONLYQUERIES("hive.optimize.metadataonly", false,
"Whether to eliminate scans of the tables from which no columns are selected. Note\n" +
"that, when selecting from empty tables with data files, this can produce incorrect\n" +
"results, so it's disabled by default. It works correctly for normal tables."),
HIVENULLSCANOPTIMIZE("hive.optimize.null.scan", true, "Dont scan relations which are guaranteed to not generate any rows"),
HIVEOPTPPD_STORAGE("hive.optimize.ppd.storage", true,
"Whether to push predicates down to storage handlers"),
HIVEOPTGROUPBY("hive.optimize.groupby", true,
"Whether to enable the bucketed group by from bucketed partitions/tables."),
HIVEOPTBUCKETMAPJOIN("hive.optimize.bucketmapjoin", false,
"Whether to try bucket mapjoin"),
HIVEOPTSORTMERGEBUCKETMAPJOIN("hive.optimize.bucketmapjoin.sortedmerge", false,
"Whether to try sorted bucket merge map join"),
HIVEOPTREDUCEDEDUPLICATION("hive.optimize.reducededuplication", true,
"Remove extra map-reduce jobs if the data is already clustered by the same key which needs to be used again. \n" +
"This should always be set to true. Since it is a new feature, it has been made configurable."),
HIVEOPTREDUCEDEDUPLICATIONMINREDUCER("hive.optimize.reducededuplication.min.reducer", 4,
"Reduce deduplication merges two RSs by moving key/parts/reducer-num of the child RS to parent RS. \n" +
"That means if reducer-num of the child RS is fixed (order by or forced bucketing) and small, it can make very slow, single MR.\n" +
"The optimization will be automatically disabled if number of reducers would be less than specified value."),
HIVEOPTJOINREDUCEDEDUPLICATION("hive.optimize.joinreducededuplication", true,
"Remove extra shuffle/sorting operations after join algorithm selection has been executed. \n" +
"Currently it only works with Apache Tez. This should always be set to true. \n" +
"Since it is a new feature, it has been made configurable."),
HIVEOPTSORTDYNAMICPARTITION("hive.optimize.sort.dynamic.partition", false,
"When enabled dynamic partitioning column will be globally sorted.\n" +
"This way we can keep only one record writer open for each partition value\n" +
"in the reducer thereby reducing the memory pressure on reducers."),
HIVEOPTSORTDYNAMICPARTITIONTHRESHOLD("hive.optimize.sort.dynamic.partition.threshold", 0,
"When enabled dynamic partitioning column will be globally sorted.\n" +
"This way we can keep only one record writer open for each partition value\n" +
"in the reducer thereby reducing the memory pressure on reducers.\n" +
"This config has following possible values: \n" +
"\t-1 - This completely disables the optimization. \n" +
"\t1 - This always enable the optimization. \n" +
"\t0 - This makes the optimization a cost based decision. \n" +
"Setting it to any other positive integer will make Hive use this as threshold for number of writers."),
HIVESAMPLINGFORORDERBY("hive.optimize.sampling.orderby", false, "Uses sampling on order-by clause for parallel execution."),
HIVESAMPLINGNUMBERFORORDERBY("hive.optimize.sampling.orderby.number", 1000, "Total number of samples to be obtained."),
HIVESAMPLINGPERCENTFORORDERBY("hive.optimize.sampling.orderby.percent", 0.1f, new RatioValidator(),
"Probability with which a row will be chosen."),
HIVE_REMOVE_ORDERBY_IN_SUBQUERY("hive.remove.orderby.in.subquery", true,
"If set to true, order/sort by without limit in sub queries will be removed."),
HIVEOPTIMIZEDISTINCTREWRITE("hive.optimize.distinct.rewrite", true, "When applicable this "
+ "optimization rewrites distinct aggregates from a single stage to multi-stage "
+ "aggregation. This may not be optimal in all cases. Ideally, whether to trigger it or "
+ "not should be cost based decision. Until Hive formalizes cost model for this, this is config driven."),
// whether to optimize union followed by select followed by filesink
// It creates sub-directories in the final output, so should not be turned on in systems
// where MAPREDUCE-1501 is not present
HIVE_OPTIMIZE_UNION_REMOVE("hive.optimize.union.remove", false,
"Whether to remove the union and push the operators between union and the filesink above union. \n" +
"This avoids an extra scan of the output by union. This is independently useful for union\n" +
"queries, and specially useful when hive.optimize.skewjoin.compiletime is set to true, since an\n" +
"extra union is inserted.\n" +
"\n" +
"The merge is triggered if either of hive.merge.mapfiles or hive.merge.mapredfiles is set to true.\n" +
"If the user has set hive.merge.mapfiles to true and hive.merge.mapredfiles to false, the idea was the\n" +
"number of reducers are few, so the number of files anyway are small. However, with this optimization,\n" +
"we are increasing the number of files possibly by a big margin. So, we merge aggressively."),
HIVEOPTCORRELATION("hive.optimize.correlation", false, "exploit intra-query correlations."),
HIVE_OPTIMIZE_LIMIT_TRANSPOSE("hive.optimize.limittranspose", false,
"Whether to push a limit through left/right outer join or union. If the value is true and the size of the outer\n" +
"input is reduced enough (as specified in hive.optimize.limittranspose.reduction), the limit is pushed\n" +
"to the outer input or union; to remain semantically correct, the limit is kept on top of the join or the union too."),
HIVE_OPTIMIZE_LIMIT_TRANSPOSE_REDUCTION_PERCENTAGE("hive.optimize.limittranspose.reductionpercentage", 1.0f,
"When hive.optimize.limittranspose is true, this variable specifies the minimal reduction of the\n" +
"size of the outer input of the join or input of the union that we should get in order to apply the rule."),
HIVE_OPTIMIZE_LIMIT_TRANSPOSE_REDUCTION_TUPLES("hive.optimize.limittranspose.reductiontuples", (long) 0,
"When hive.optimize.limittranspose is true, this variable specifies the minimal reduction in the\n" +
"number of tuples of the outer input of the join or the input of the union that you should get in order to apply the rule."),
HIVE_OPTIMIZE_CONSTRAINTS_JOIN("hive.optimize.constraints.join", true, "Whether to use referential constraints\n" +
"to optimize (remove or transform) join operators"),
HIVE_OPTIMIZE_REDUCE_WITH_STATS("hive.optimize.filter.stats.reduction", false, "Whether to simplify comparison\n" +
"expressions in filter operators using column stats"),
HIVE_OPTIMIZE_SKEWJOIN_COMPILETIME("hive.optimize.skewjoin.compiletime", false,
"Whether to create a separate plan for skewed keys for the tables in the join.\n" +
"This is based on the skewed keys stored in the metadata. At compile time, the plan is broken\n" +
"into different joins: one for the skewed keys, and the other for the remaining keys. And then,\n" +
"a union is performed for the 2 joins generated above. So unless the same skewed key is present\n" +
"in both the joined tables, the join for the skewed key will be performed as a map-side join.\n" +
"\n" +
"The main difference between this parameter and hive.optimize.skewjoin is that this parameter\n" +
"uses the skew information stored in the metastore to optimize the plan at compile time itself.\n" +
"If there is no skew information in the metadata, this parameter will not have any affect.\n" +
"Both hive.optimize.skewjoin.compiletime and hive.optimize.skewjoin should be set to true.\n" +
"Ideally, hive.optimize.skewjoin should be renamed as hive.optimize.skewjoin.runtime, but not doing\n" +
"so for backward compatibility.\n" +
"\n" +
"If the skew information is correctly stored in the metadata, hive.optimize.skewjoin.compiletime\n" +
"would change the query plan to take care of it, and hive.optimize.skewjoin will be a no-op."),
HIVE_OPTIMIZE_TOPNKEY("hive.optimize.topnkey", true, "Whether to enable top n key optimizer."),
HIVE_SHARED_WORK_OPTIMIZATION("hive.optimize.shared.work", true,
"Whether to enable shared work optimizer. The optimizer finds scan operator over the same table\n" +
"and follow-up operators in the query plan and merges them if they meet some preconditions. Tez only."),
HIVE_SHARED_WORK_EXTENDED_OPTIMIZATION("hive.optimize.shared.work.extended", true,
"Whether to enable shared work extended optimizer. The optimizer tries to merge equal operators\n" +
"after a work boundary after shared work optimizer has been executed. Requires hive.optimize.shared.work\n" +
"to be set to true. Tez only."),
HIVE_SHARED_WORK_SEMIJOIN_OPTIMIZATION("hive.optimize.shared.work.semijoin", true,
"Whether to enable shared work extended optimizer for semijoins. The optimizer tries to merge\n" +
"scan operators if one of them reads the full table, even if the other one is the target for\n" +
"one or more semijoin edges. Tez only."),
HIVE_SHARED_WORK_REUSE_MAPJOIN_CACHE("hive.optimize.shared.work.mapjoin.cache.reuse", true,
"When shared work optimizer is enabled, whether we should reuse the cache for the broadcast side\n" +
"of mapjoin operators that share same broadcast input. Requires hive.optimize.shared.work\n" +
"to be set to true. Tez only."),
HIVE_COMBINE_EQUIVALENT_WORK_OPTIMIZATION("hive.combine.equivalent.work.optimization", true, "Whether to " +
"combine equivalent work objects during physical optimization.\n This optimization looks for equivalent " +
"work objects and combines them if they meet certain preconditions. Spark only."),
HIVE_REMOVE_SQ_COUNT_CHECK("hive.optimize.remove.sq_count_check", true,
"Whether to remove an extra join with sq_count_check for scalar subqueries "
+ "with constant group by keys."),
HIVE_OPTIMIZE_TABLE_PROPERTIES_FROM_SERDE("hive.optimize.update.table.properties.from.serde", false,
"Whether to update table-properties by initializing tables' SerDe instances during logical-optimization. \n" +
"By doing so, certain SerDe classes (like AvroSerDe) can pre-calculate table-specific information, and \n" +
"store it in table-properties, to be used later in the SerDe, while running the job."),
HIVE_OPTIMIZE_TABLE_PROPERTIES_FROM_SERDE_LIST("hive.optimize.update.table.properties.from.serde.list",
"org.apache.hadoop.hive.serde2.avro.AvroSerDe",
"The comma-separated list of SerDe classes that are considered when enhancing table-properties \n" +
"during logical optimization."),
// CTE
HIVE_CTE_MATERIALIZE_THRESHOLD("hive.optimize.cte.materialize.threshold", -1,
"If the number of references to a CTE clause exceeds this threshold, Hive will materialize it\n" +
"before executing the main query block. -1 will disable this feature."),
// Statistics
HIVE_STATS_ESTIMATE_STATS("hive.stats.estimate", true,
"Estimate statistics in absence of statistics."),
HIVE_STATS_NDV_ESTIMATE_PERC("hive.stats.ndv.estimate.percent", (float)20,
"This many percentage of rows will be estimated as count distinct in absence of statistics."),
HIVE_STATS_NUM_NULLS_ESTIMATE_PERC("hive.stats.num.nulls.estimate.percent", (float)5,
"This many percentage of rows will be estimated as number of nulls in absence of statistics."),
HIVESTATSAUTOGATHER("hive.stats.autogather", true,
"A flag to gather statistics (only basic) automatically during the INSERT OVERWRITE command."),
HIVESTATSCOLAUTOGATHER("hive.stats.column.autogather", true,
"A flag to gather column statistics automatically."),
HIVESTATSDBCLASS("hive.stats.dbclass", "fs", new PatternSet("custom", "fs"),
"The storage that stores temporary Hive statistics. In filesystem based statistics collection ('fs'), \n" +
"each task writes statistics it has collected in a file on the filesystem, which will be aggregated \n" +
"after the job has finished. Supported values are fs (filesystem) and custom as defined in StatsSetupConst.java."), // StatsSetupConst.StatDB
/**
* @deprecated Use MetastoreConf.STATS_DEFAULT_PUBLISHER
*/
@Deprecated
HIVE_STATS_DEFAULT_PUBLISHER("hive.stats.default.publisher", "",
"The Java class (implementing the StatsPublisher interface) that is used by default if hive.stats.dbclass is custom type."),
/**
* @deprecated Use MetastoreConf.STATS_DEFAULT_AGGRETATOR
*/
@Deprecated
HIVE_STATS_DEFAULT_AGGREGATOR("hive.stats.default.aggregator", "",
"The Java class (implementing the StatsAggregator interface) that is used by default if hive.stats.dbclass is custom type."),
CLIENT_STATS_COUNTERS("hive.client.stats.counters", "",
"Subset of counters that should be of interest for hive.client.stats.publishers (when one wants to limit their publishing). \n" +
"Non-display names should be used"),
//Subset of counters that should be of interest for hive.client.stats.publishers (when one wants to limit their publishing). Non-display names should be used".
HIVE_STATS_RELIABLE("hive.stats.reliable", false,
"Whether queries will fail because stats cannot be collected completely accurately. \n" +
"If this is set to true, reading/writing from/into a partition may fail because the stats\n" +
"could not be computed accurately."),
HIVE_STATS_COLLECT_PART_LEVEL_STATS("hive.analyze.stmt.collect.partlevel.stats", true,
"analyze table T compute statistics for columns. Queries like these should compute partition"
+ "level stats for partitioned table even when no part spec is specified."),
HIVE_STATS_GATHER_NUM_THREADS("hive.stats.gather.num.threads", 10,
"Number of threads used by noscan analyze command for partitioned tables.\n" +
"This is applicable only for file formats that implement StatsProvidingRecordReader (like ORC)."),
// Collect table access keys information for operators that can benefit from bucketing
HIVE_STATS_COLLECT_TABLEKEYS("hive.stats.collect.tablekeys", false,
"Whether join and group by keys on tables are derived and maintained in the QueryPlan.\n" +
"This is useful to identify how tables are accessed and to determine if they should be bucketed."),
// Collect column access information
HIVE_STATS_COLLECT_SCANCOLS("hive.stats.collect.scancols", false,
"Whether column accesses are tracked in the QueryPlan.\n" +
"This is useful to identify how tables are accessed and to determine if there are wasted columns that can be trimmed."),
HIVE_STATS_NDV_ALGO("hive.stats.ndv.algo", "hll", new PatternSet("hll", "fm"),
"hll and fm stand for HyperLogLog and FM-sketch, respectively for computing ndv."),
/**
* @deprecated Use MetastoreConf.STATS_FETCH_BITVECTOR
*/
@Deprecated
HIVE_STATS_FETCH_BITVECTOR("hive.stats.fetch.bitvector", false,
"Whether we fetch bitvector when we compute ndv. Users can turn it off if they want to use old schema"),
// standard error allowed for ndv estimates for FM-sketch. A lower value indicates higher accuracy and a
// higher compute cost.
HIVE_STATS_NDV_ERROR("hive.stats.ndv.error", (float)20.0,
"Standard error expressed in percentage. Provides a tradeoff between accuracy and compute cost. \n" +
"A lower value for error indicates higher accuracy and a higher compute cost."),
/**
* @deprecated Use MetastoreConf.STATS_NDV_TUNER
*/
@Deprecated
HIVE_METASTORE_STATS_NDV_TUNER("hive.metastore.stats.ndv.tuner", (float)0.0,
"Provides a tunable parameter between the lower bound and the higher bound of ndv for aggregate ndv across all the partitions. \n" +
"The lower bound is equal to the maximum of ndv of all the partitions. The higher bound is equal to the sum of ndv of all the partitions.\n" +
"Its value should be between 0.0 (i.e., choose lower bound) and 1.0 (i.e., choose higher bound)"),
/**
* @deprecated Use MetastoreConf.STATS_NDV_DENSITY_FUNCTION
*/
@Deprecated
HIVE_METASTORE_STATS_NDV_DENSITY_FUNCTION("hive.metastore.stats.ndv.densityfunction", false,
"Whether to use density function to estimate the NDV for the whole table based on the NDV of partitions"),
HIVE_STATS_KEY_PREFIX("hive.stats.key.prefix", "", "", true), // internal usage only
// if length of variable length data type cannot be determined this length will be used.
HIVE_STATS_MAX_VARIABLE_LENGTH("hive.stats.max.variable.length", 100,
"To estimate the size of data flowing through operators in Hive/Tez(for reducer estimation etc.),\n" +
"average row size is multiplied with the total number of rows coming out of each operator.\n" +
"Average row size is computed from average column size of all columns in the row. In the absence\n" +
"of column statistics, for variable length columns (like string, bytes etc.), this value will be\n" +
"used. For fixed length columns their corresponding Java equivalent sizes are used\n" +
"(float - 4 bytes, double - 8 bytes etc.)."),
// if number of elements in list cannot be determined, this value will be used
HIVE_STATS_LIST_NUM_ENTRIES("hive.stats.list.num.entries", 10,
"To estimate the size of data flowing through operators in Hive/Tez(for reducer estimation etc.),\n" +
"average row size is multiplied with the total number of rows coming out of each operator.\n" +
"Average row size is computed from average column size of all columns in the row. In the absence\n" +
"of column statistics and for variable length complex columns like list, the average number of\n" +
"entries/values can be specified using this config."),
// if number of elements in map cannot be determined, this value will be used
HIVE_STATS_MAP_NUM_ENTRIES("hive.stats.map.num.entries", 10,
"To estimate the size of data flowing through operators in Hive/Tez(for reducer estimation etc.),\n" +
"average row size is multiplied with the total number of rows coming out of each operator.\n" +
"Average row size is computed from average column size of all columns in the row. In the absence\n" +
"of column statistics and for variable length complex columns like map, the average number of\n" +
"entries/values can be specified using this config."),
// statistics annotation fetches column statistics for all required columns which can
// be very expensive sometimes
HIVE_STATS_FETCH_COLUMN_STATS("hive.stats.fetch.column.stats", true,
"Annotation of operator tree with statistics information requires column statistics.\n" +
"Column statistics are fetched from metastore. Fetching column statistics for each needed column\n" +
"can be expensive when the number of columns is high. This flag can be used to disable fetching\n" +
"of column statistics from metastore."),
// in the absence of column statistics, the estimated number of rows/data size that will
// be emitted from join operator will depend on this factor
HIVE_STATS_JOIN_FACTOR("hive.stats.join.factor", (float) 1.1,
"Hive/Tez optimizer estimates the data size flowing through each of the operators. JOIN operator\n" +
"uses column statistics to estimate the number of rows flowing out of it and hence the data size.\n" +
"In the absence of column statistics, this factor determines the amount of rows that flows out\n" +
"of JOIN operator."),
HIVE_STATS_CORRELATED_MULTI_KEY_JOINS("hive.stats.correlated.multi.key.joins", true,
"When estimating output rows for a join involving multiple columns, the default behavior assumes" +
"the columns are independent. Setting this flag to true will cause the estimator to assume" +
"the columns are correlated."),
// in the absence of uncompressed/raw data size, total file size will be used for statistics
// annotation. But the file may be compressed, encoded and serialized which may be lesser in size
// than the actual uncompressed/raw data size. This factor will be multiplied to file size to estimate
// the raw data size.
HIVE_STATS_DESERIALIZATION_FACTOR("hive.stats.deserialization.factor", (float) 10.0,
"Hive/Tez optimizer estimates the data size flowing through each of the operators. In the absence\n" +
"of basic statistics like number of rows and data size, file size is used to estimate the number\n" +
"of rows and data size. Since files in tables/partitions are serialized (and optionally\n" +
"compressed) the estimates of number of rows and data size cannot be reliably determined.\n" +
"This factor is multiplied with the file size to account for serialization and compression."),
HIVE_STATS_IN_CLAUSE_FACTOR("hive.stats.filter.in.factor", (float) 1.0,
"Currently column distribution is assumed to be uniform. This can lead to overestimation/underestimation\n" +
"in the number of rows filtered by a certain operator, which in turn might lead to overprovision or\n" +
"underprovision of resources. This factor is applied to the cardinality estimation of IN clauses in\n" +
"filter operators."),
HIVE_STATS_IN_MIN_RATIO("hive.stats.filter.in.min.ratio", (float) 0.0f,
"Output estimation of an IN filter can't be lower than this ratio"),
HIVE_STATS_UDTF_FACTOR("hive.stats.udtf.factor", (float) 1.0,
"UDTFs change the number of rows of the output. A common UDTF is the explode() method that creates\n" +
"multiple rows for each element in the input array. This factor is applied to the number of\n" +
"output rows and output size."),
// Concurrency
HIVE_SUPPORT_CONCURRENCY("hive.support.concurrency", false,
"Whether Hive supports concurrency control or not. \n" +
"A ZooKeeper instance must be up and running when using zookeeper Hive lock manager "),
HIVE_LOCK_MANAGER("hive.lock.manager", "org.apache.hadoop.hive.ql.lockmgr.zookeeper.ZooKeeperHiveLockManager", ""),
HIVE_LOCK_NUMRETRIES("hive.lock.numretries", 100,
"The number of times you want to try to get all the locks"),
HIVE_UNLOCK_NUMRETRIES("hive.unlock.numretries", 10,
"The number of times you want to retry to do one unlock"),
HIVE_LOCK_SLEEP_BETWEEN_RETRIES("hive.lock.sleep.between.retries", "60s",
new TimeValidator(TimeUnit.SECONDS, 0L, false, Long.MAX_VALUE, false),
"The maximum sleep time between various retries"),
HIVE_LOCK_MAPRED_ONLY("hive.lock.mapred.only.operation", false,
"This param is to control whether or not only do lock on queries\n" +
"that need to execute at least one mapred job."),
HIVE_LOCK_QUERY_STRING_MAX_LENGTH("hive.lock.query.string.max.length", 1000000,
"The maximum length of the query string to store in the lock.\n" +
"The default value is 1000000, since the data limit of a znode is 1MB"),
HIVE_MM_ALLOW_ORIGINALS("hive.mm.allow.originals", false,
"Whether to allow original files in MM tables. Conversion to MM may be expensive if\n" +
"this is set to false, however unless MAPREDUCE-7086 fix is present, queries that\n" +
"read MM tables with original files will fail. The default in Hive 3.0 is false."),
// Zookeeper related configs
HIVE_ZOOKEEPER_QUORUM("hive.zookeeper.quorum", "",
"List of ZooKeeper servers to talk to. This is needed for: \n" +
"1. Read/write locks - when hive.lock.manager is set to \n" +
"org.apache.hadoop.hive.ql.lockmgr.zookeeper.ZooKeeperHiveLockManager, \n" +
"2. When HiveServer2 supports service discovery via Zookeeper.\n" +
"3. For delegation token storage if zookeeper store is used, if\n" +
"hive.cluster.delegation.token.store.zookeeper.connectString is not set\n" +
"4. LLAP daemon registry service\n" +
"5. Leader selection for privilege synchronizer"),
HIVE_ZOOKEEPER_CLIENT_PORT("hive.zookeeper.client.port", "2181",
"The port of ZooKeeper servers to talk to.\n" +
"If the list of Zookeeper servers specified in hive.zookeeper.quorum\n" +
"does not contain port numbers, this value is used."),
HIVE_ZOOKEEPER_SESSION_TIMEOUT("hive.zookeeper.session.timeout", "1200000ms",
new TimeValidator(TimeUnit.MILLISECONDS),
"ZooKeeper client's session timeout (in milliseconds). The client is disconnected, and as a result, all locks released, \n" +
"if a heartbeat is not sent in the timeout."),
HIVE_ZOOKEEPER_CONNECTION_TIMEOUT("hive.zookeeper.connection.timeout", "15s",
new TimeValidator(TimeUnit.SECONDS),
"ZooKeeper client's connection timeout in seconds. Connection timeout * hive.zookeeper.connection.max.retries\n" +
"with exponential backoff is when curator client deems connection is lost to zookeeper."),
HIVE_ZOOKEEPER_NAMESPACE("hive.zookeeper.namespace", "hive_zookeeper_namespace",
"The parent node under which all ZooKeeper nodes are created."),
HIVE_ZOOKEEPER_CLEAN_EXTRA_NODES("hive.zookeeper.clean.extra.nodes", false,
"Clean extra nodes at the end of the session."),
HIVE_ZOOKEEPER_CONNECTION_MAX_RETRIES("hive.zookeeper.connection.max.retries", 3,
"Max number of times to retry when connecting to the ZooKeeper server."),
HIVE_ZOOKEEPER_CONNECTION_BASESLEEPTIME("hive.zookeeper.connection.basesleeptime", "1000ms",
new TimeValidator(TimeUnit.MILLISECONDS),
"Initial amount of time (in milliseconds) to wait between retries\n" +
"when connecting to the ZooKeeper server when using ExponentialBackoffRetry policy."),
// Transactions
HIVE_TXN_MANAGER("hive.txn.manager",
"org.apache.hadoop.hive.ql.lockmgr.DummyTxnManager",
"Set to org.apache.hadoop.hive.ql.lockmgr.DbTxnManager as part of turning on Hive\n" +
"transactions, which also requires appropriate settings for hive.compactor.initiator.on,\n" +
"hive.compactor.worker.threads, hive.support.concurrency (true),\n" +
"and hive.exec.dynamic.partition.mode (nonstrict).\n" +
"The default DummyTxnManager replicates pre-Hive-0.13 behavior and provides\n" +
"no transactions."),
HIVE_TXN_STRICT_LOCKING_MODE("hive.txn.strict.locking.mode", true, "In strict mode non-ACID\n" +
"resources use standard R/W lock semantics, e.g. INSERT will acquire exclusive lock.\n" +
"In nonstrict mode, for non-ACID resources, INSERT will only acquire shared lock, which\n" +
"allows two concurrent writes to the same partition but still lets lock manager prevent\n" +
"DROP TABLE etc. when the table is being written to"),
TXN_OVERWRITE_X_LOCK("hive.txn.xlock.iow", true,
"Ensures commands with OVERWRITE (such as INSERT OVERWRITE) acquire Exclusive locks for\n" +
"transactional tables. This ensures that inserts (w/o overwrite) running concurrently\n" +
"are not hidden by the INSERT OVERWRITE."),
HIVE_TXN_STATS_ENABLED("hive.txn.stats.enabled", true,
"Whether Hive supports transactional stats (accurate stats for transactional tables)"),
/**
* @deprecated Use MetastoreConf.TXN_TIMEOUT
*/
@Deprecated
HIVE_TXN_TIMEOUT("hive.txn.timeout", "300s", new TimeValidator(TimeUnit.SECONDS),
"time after which transactions are declared aborted if the client has not sent a heartbeat."),
/**
* @deprecated Use MetastoreConf.TXN_HEARTBEAT_THREADPOOL_SIZE
*/
@Deprecated
HIVE_TXN_HEARTBEAT_THREADPOOL_SIZE("hive.txn.heartbeat.threadpool.size", 5, "The number of " +
"threads to use for heartbeating. For Hive CLI, 1 is enough. For HiveServer2, we need a few"),
TXN_MGR_DUMP_LOCK_STATE_ON_ACQUIRE_TIMEOUT("hive.txn.manager.dump.lock.state.on.acquire.timeout", false,
"Set this to true so that when attempt to acquire a lock on resource times out, the current state" +
" of the lock manager is dumped to log file. This is for debugging. See also " +
"hive.lock.numretries and hive.lock.sleep.between.retries."),
HIVE_TXN_OPERATIONAL_PROPERTIES("hive.txn.operational.properties", 1,
"1: Enable split-update feature found in the newer version of Hive ACID subsystem\n" +
"4: Make the table 'quarter-acid' as it only supports insert. But it doesn't require ORC or bucketing.\n" +
"This is intended to be used as an internal property for future versions of ACID. (See\n" +
"HIVE-14035 for details. User sets it tblproperites via transactional_properties.)", true),
/**
* @deprecated Use MetastoreConf.MAX_OPEN_TXNS
*/
@Deprecated
HIVE_MAX_OPEN_TXNS("hive.max.open.txns", 100000, "Maximum number of open transactions. If \n" +
"current open transactions reach this limit, future open transaction requests will be \n" +
"rejected, until this number goes below the limit."),
/**
* @deprecated Use MetastoreConf.COUNT_OPEN_TXNS_INTERVAL
*/
@Deprecated
HIVE_COUNT_OPEN_TXNS_INTERVAL("hive.count.open.txns.interval", "1s",
new TimeValidator(TimeUnit.SECONDS), "Time in seconds between checks to count open transactions."),
/**
* @deprecated Use MetastoreConf.TXN_MAX_OPEN_BATCH
*/
@Deprecated
HIVE_TXN_MAX_OPEN_BATCH("hive.txn.max.open.batch", 1000,
"Maximum number of transactions that can be fetched in one call to open_txns().\n" +
"This controls how many transactions streaming agents such as Flume or Storm open\n" +
"simultaneously. The streaming agent then writes that number of entries into a single\n" +
"file (per Flume agent or Storm bolt). Thus increasing this value decreases the number\n" +
"of delta files created by streaming agents. But it also increases the number of open\n" +
"transactions that Hive has to track at any given time, which may negatively affect\n" +
"read performance."),
/**
* @deprecated Use MetastoreConf.TXN_RETRYABLE_SQLEX_REGEX
*/
@Deprecated
HIVE_TXN_RETRYABLE_SQLEX_REGEX("hive.txn.retryable.sqlex.regex", "", "Comma separated list\n" +
"of regular expression patterns for SQL state, error code, and error message of\n" +
"retryable SQLExceptions, that's suitable for the metastore DB.\n" +
"For example: Can't serialize.*,40001$,^Deadlock,.*ORA-08176.*\n" +
"The string that the regex will be matched against is of the following form, where ex is a SQLException:\n" +
"ex.getMessage() + \" (SQLState=\" + ex.getSQLState() + \", ErrorCode=\" + ex.getErrorCode() + \")\""),
/**
* @deprecated Use MetastoreConf.COMPACTOR_INITIATOR_ON
*/
@Deprecated
HIVE_COMPACTOR_INITIATOR_ON("hive.compactor.initiator.on", false,
"Whether to run the initiator and cleaner threads on this metastore instance or not.\n" +
"Set this to true on one instance of the Thrift metastore service as part of turning\n" +
"on Hive transactions. For a complete list of parameters required for turning on\n" +
"transactions, see hive.txn.manager."),
/**
* @deprecated Use MetastoreConf.COMPACTOR_WORKER_THREADS
*/
@Deprecated
HIVE_COMPACTOR_WORKER_THREADS("hive.compactor.worker.threads", 0,
"How many compactor worker threads to run on this metastore instance. Set this to a\n" +
"positive number on one or more instances of the Thrift metastore service as part of\n" +
"turning on Hive transactions. For a complete list of parameters required for turning\n" +
"on transactions, see hive.txn.manager.\n" +
"Worker threads spawn MapReduce jobs to do compactions. They do not do the compactions\n" +
"themselves. Increasing the number of worker threads will decrease the time it takes\n" +
"tables or partitions to be compacted once they are determined to need compaction.\n" +
"It will also increase the background load on the Hadoop cluster as more MapReduce jobs\n" +
"will be running in the background."),
HIVE_COMPACTOR_WORKER_TIMEOUT("hive.compactor.worker.timeout", "86400s",
new TimeValidator(TimeUnit.SECONDS),
"Time in seconds after which a compaction job will be declared failed and the\n" +
"compaction re-queued."),
HIVE_COMPACTOR_CHECK_INTERVAL("hive.compactor.check.interval", "300s",
new TimeValidator(TimeUnit.SECONDS),
"Time in seconds between checks to see if any tables or partitions need to be\n" +
"compacted. This should be kept high because each check for compaction requires\n" +
"many calls against the NameNode.\n" +
"Decreasing this value will reduce the time it takes for compaction to be started\n" +
"for a table or partition that requires compaction. However, checking if compaction\n" +
"is needed requires several calls to the NameNode for each table or partition that\n" +
"has had a transaction done on it since the last major compaction. So decreasing this\n" +
"value will increase the load on the NameNode."),
HIVE_COMPACTOR_DELTA_NUM_THRESHOLD("hive.compactor.delta.num.threshold", 10,
"Number of delta directories in a table or partition that will trigger a minor\n" +
"compaction."),
HIVE_COMPACTOR_DELTA_PCT_THRESHOLD("hive.compactor.delta.pct.threshold", 0.1f,
"Percentage (fractional) size of the delta files relative to the base that will trigger\n" +
"a major compaction. (1.0 = 100%, so the default 0.1 = 10%.)"),
COMPACTOR_MAX_NUM_DELTA("hive.compactor.max.num.delta", 500, "Maximum number of delta files that " +
"the compactor will attempt to handle in a single job."),
HIVE_COMPACTOR_ABORTEDTXN_THRESHOLD("hive.compactor.abortedtxn.threshold", 1000,
"Number of aborted transactions involving a given table or partition that will trigger\n" +
"a major compaction."),
/**
* @deprecated Use MetastoreConf.COMPACTOR_INITIATOR_FAILED_THRESHOLD
*/
@Deprecated
COMPACTOR_INITIATOR_FAILED_THRESHOLD("hive.compactor.initiator.failed.compacts.threshold", 2,
new RangeValidator(1, 20), "Number of consecutive compaction failures (per table/partition) " +
"after which automatic compactions will not be scheduled any more. Note that this must be less " +
"than hive.compactor.history.retention.failed."),
HIVE_COMPACTOR_CLEANER_RUN_INTERVAL("hive.compactor.cleaner.run.interval", "5000ms",
new TimeValidator(TimeUnit.MILLISECONDS), "Time between runs of the cleaner thread"),
COMPACTOR_JOB_QUEUE("hive.compactor.job.queue", "", "Used to specify name of Hadoop queue to which\n" +
"Compaction jobs will be submitted. Set to empty string to let Hadoop choose the queue."),
TRANSACTIONAL_CONCATENATE_NOBLOCK("hive.transactional.concatenate.noblock", false,
"Will cause 'alter table T concatenate' to be non-blocking"),
HIVE_COMPACTOR_COMPACT_MM("hive.compactor.compact.insert.only", true,
"Whether the compactor should compact insert-only tables. A safety switch."),
COMPACTOR_CRUD_QUERY_BASED("hive.compactor.crud.query.based", false,
"Means Major compaction on full CRUD tables is done as a query, "
+ "and minor compaction will be disabled."),
SPLIT_GROUPING_MODE("hive.split.grouping.mode", "query", new StringSet("query", "compactor"),
"This is set to compactor from within the query based compactor. This enables the Tez SplitGrouper "
+ "to group splits based on their bucket number, so that all rows from different bucket files "
+ " for the same bucket number can end up in the same bucket file after the compaction."),
/**
* @deprecated Use MetastoreConf.COMPACTOR_HISTORY_RETENTION_SUCCEEDED
*/
@Deprecated
COMPACTOR_HISTORY_RETENTION_SUCCEEDED("hive.compactor.history.retention.succeeded", 3,
new RangeValidator(0, 100), "Determines how many successful compaction records will be " +
"retained in compaction history for a given table/partition."),
/**
* @deprecated Use MetastoreConf.COMPACTOR_HISTORY_RETENTION_FAILED
*/
@Deprecated
COMPACTOR_HISTORY_RETENTION_FAILED("hive.compactor.history.retention.failed", 3,
new RangeValidator(0, 100), "Determines how many failed compaction records will be " +
"retained in compaction history for a given table/partition."),
/**
* @deprecated Use MetastoreConf.COMPACTOR_HISTORY_RETENTION_ATTEMPTED
*/
@Deprecated
COMPACTOR_HISTORY_RETENTION_ATTEMPTED("hive.compactor.history.retention.attempted", 2,
new RangeValidator(0, 100), "Determines how many attempted compaction records will be " +
"retained in compaction history for a given table/partition."),
/**
* @deprecated Use MetastoreConf.COMPACTOR_HISTORY_REAPER_INTERVAL
*/
@Deprecated
COMPACTOR_HISTORY_REAPER_INTERVAL("hive.compactor.history.reaper.interval", "2m",
new TimeValidator(TimeUnit.MILLISECONDS), "Determines how often compaction history reaper runs"),
/**
* @deprecated Use MetastoreConf.TIMEDOUT_TXN_REAPER_START
*/
@Deprecated
HIVE_TIMEDOUT_TXN_REAPER_START("hive.timedout.txn.reaper.start", "100s",
new TimeValidator(TimeUnit.MILLISECONDS), "Time delay of 1st reaper run after metastore start"),
/**
* @deprecated Use MetastoreConf.TIMEDOUT_TXN_REAPER_INTERVAL
*/
@Deprecated
HIVE_TIMEDOUT_TXN_REAPER_INTERVAL("hive.timedout.txn.reaper.interval", "180s",
new TimeValidator(TimeUnit.MILLISECONDS), "Time interval describing how often the reaper runs"),
/**
* @deprecated Use MetastoreConf.WRITE_SET_REAPER_INTERVAL
*/
@Deprecated
WRITE_SET_REAPER_INTERVAL("hive.writeset.reaper.interval", "60s",
new TimeValidator(TimeUnit.MILLISECONDS), "Frequency of WriteSet reaper runs"),
MERGE_CARDINALITY_VIOLATION_CHECK("hive.merge.cardinality.check", true,
"Set to true to ensure that each SQL Merge statement ensures that for each row in the target\n" +
"table there is at most 1 matching row in the source table per SQL Specification."),
MERGE_SPLIT_UPDATE("hive.merge.split.update", false,
"If true, SQL Merge statement will handle WHEN MATCHED UPDATE by splitting it into 2\n" +
"branches of a multi-insert, representing delete of existing row and an insert of\n" +
"the new version of the row. Updating bucketing and partitioning columns should\n" +
"only be permitted if this is true."),
OPTIMIZE_ACID_META_COLUMNS("hive.optimize.acid.meta.columns", true,
"If true, don't decode Acid metadata columns from storage unless" +
" they are needed."),
// For Arrow SerDe
HIVE_ARROW_ROOT_ALLOCATOR_LIMIT("hive.arrow.root.allocator.limit", Long.MAX_VALUE,
"Arrow root allocator memory size limitation in bytes."),
HIVE_ARROW_BATCH_ALLOCATOR_LIMIT("hive.arrow.batch.allocator.limit", 10_000_000_000L,
"Max bytes per arrow batch. This is a threshold, the memory is not pre-allocated."),
HIVE_ARROW_BATCH_SIZE("hive.arrow.batch.size", 1000, "The number of rows sent in one Arrow batch."),
// For Druid storage handler
HIVE_DRUID_INDEXING_GRANULARITY("hive.druid.indexer.segments.granularity", "DAY",
new PatternSet("YEAR", "MONTH", "WEEK", "DAY", "HOUR", "MINUTE", "SECOND"),
"Granularity for the segments created by the Druid storage handler"
),
HIVE_DRUID_MAX_PARTITION_SIZE("hive.druid.indexer.partition.size.max", 5000000,
"Maximum number of records per segment partition"
),
HIVE_DRUID_MAX_ROW_IN_MEMORY("hive.druid.indexer.memory.rownum.max", 75000,
"Maximum number of records in memory while storing data in Druid"
),
HIVE_DRUID_BROKER_DEFAULT_ADDRESS("hive.druid.broker.address.default", "localhost:8082",
"Address of the Druid broker. If we are querying Druid from Hive, this address needs to be\n"
+
"declared"
),
HIVE_DRUID_COORDINATOR_DEFAULT_ADDRESS("hive.druid.coordinator.address.default", "localhost:8081",
"Address of the Druid coordinator. It is used to check the load status of newly created segments"
),
HIVE_DRUID_OVERLORD_DEFAULT_ADDRESS("hive.druid.overlord.address.default", "localhost:8090",
"Address of the Druid overlord. It is used to submit indexing tasks to druid."
),
HIVE_DRUID_SELECT_THRESHOLD("hive.druid.select.threshold", 10000,
"Takes only effect when hive.druid.select.distribute is set to false. \n" +
"When we can split a Select query, this is the maximum number of rows that we try to retrieve\n" +
"per query. In order to do that, we obtain the estimated size for the complete result. If the\n" +
"number of records of the query results is larger than this threshold, we split the query in\n" +
"total number of rows/threshold parts across the time dimension. Note that we assume the\n" +
"records to be split uniformly across the time dimension."),
HIVE_DRUID_NUM_HTTP_CONNECTION("hive.druid.http.numConnection", 20, "Number of connections used by\n" +
"the HTTP client."),
HIVE_DRUID_HTTP_READ_TIMEOUT("hive.druid.http.read.timeout", "PT1M", "Read timeout period for the HTTP\n" +
"client in ISO8601 format (for example P2W, P3M, PT1H30M, PT0.750S), default is period of 1 minute."),
HIVE_DRUID_SLEEP_TIME("hive.druid.sleep.time", "PT10S",
"Sleep time between retries in ISO8601 format (for example P2W, P3M, PT1H30M, PT0.750S), default is period of 10 seconds."
),
HIVE_DRUID_BASE_PERSIST_DIRECTORY("hive.druid.basePersistDirectory", "",
"Local temporary directory used to persist intermediate indexing state, will default to JVM system property java.io.tmpdir."
),
HIVE_DRUID_ROLLUP("hive.druid.rollup", true, "Whether to rollup druid rows or not."),
DRUID_SEGMENT_DIRECTORY("hive.druid.storage.storageDirectory", "/druid/segments"
, "druid deep storage location."),
DRUID_METADATA_BASE("hive.druid.metadata.base", "druid", "Default prefix for metadata tables"),
DRUID_METADATA_DB_TYPE("hive.druid.metadata.db.type", "mysql",
new PatternSet("mysql", "postgresql", "derby"), "Type of the metadata database."
),
DRUID_METADATA_DB_USERNAME("hive.druid.metadata.username", "",
"Username to connect to Type of the metadata DB."
),
DRUID_METADATA_DB_PASSWORD("hive.druid.metadata.password", "",
"Password to connect to Type of the metadata DB."
),
DRUID_METADATA_DB_URI("hive.druid.metadata.uri", "",
"URI to connect to the database (for example jdbc:mysql://hostname:port/DBName)."
),
DRUID_WORKING_DIR("hive.druid.working.directory", "/tmp/workingDirectory",
"Default hdfs working directory used to store some intermediate metadata"
),
HIVE_DRUID_MAX_TRIES("hive.druid.maxTries", 5, "Maximum number of retries before giving up"),
HIVE_DRUID_PASSIVE_WAIT_TIME("hive.druid.passiveWaitTimeMs", 30000L,
"Wait time in ms default to 30 seconds."
),
HIVE_DRUID_BITMAP_FACTORY_TYPE("hive.druid.bitmap.type", "roaring", new PatternSet("roaring", "concise"), "Coding algorithm use to encode the bitmaps"),
// For HBase storage handler
HIVE_HBASE_WAL_ENABLED("hive.hbase.wal.enabled", true,
"Whether writes to HBase should be forced to the write-ahead log. \n" +
"Disabling this improves HBase write performance at the risk of lost writes in case of a crash."),
HIVE_HBASE_GENERATE_HFILES("hive.hbase.generatehfiles", false,
"True when HBaseStorageHandler should generate hfiles instead of operate against the online table."),
HIVE_HBASE_SNAPSHOT_NAME("hive.hbase.snapshot.name", null, "The HBase table snapshot name to use."),
HIVE_HBASE_SNAPSHOT_RESTORE_DIR("hive.hbase.snapshot.restoredir", "/tmp", "The directory in which to " +
"restore the HBase table snapshot."),
// For har files
HIVEARCHIVEENABLED("hive.archive.enabled", false, "Whether archiving operations are permitted"),
HIVEFETCHTASKCONVERSION("hive.fetch.task.conversion", "more", new StringSet("none", "minimal", "more"),
"Some select queries can be converted to single FETCH task minimizing latency.\n" +
"Currently the query should be single sourced not having any subquery and should not have\n" +
"any aggregations or distincts (which incurs RS), lateral views and joins.\n" +
"0. none : disable hive.fetch.task.conversion\n" +
"1. minimal : SELECT STAR, FILTER on partition columns, LIMIT only\n" +
"2. more : SELECT, FILTER, LIMIT only (support TABLESAMPLE and virtual columns)"
),
HIVEFETCHTASKCONVERSIONTHRESHOLD("hive.fetch.task.conversion.threshold", 1073741824L,
"Input threshold for applying hive.fetch.task.conversion. If target table is native, input length\n" +
"is calculated by summation of file lengths. If it's not native, storage handler for the table\n" +
"can optionally implement org.apache.hadoop.hive.ql.metadata.InputEstimator interface."),
HIVEFETCHTASKAGGR("hive.fetch.task.aggr", false,
"Aggregation queries with no group-by clause (for example, select count(*) from src) execute\n" +
"final aggregations in single reduce task. If this is set true, Hive delegates final aggregation\n" +
"stage to fetch task, possibly decreasing the query time."),
HIVEOPTIMIZEMETADATAQUERIES("hive.compute.query.using.stats", true,
"When set to true Hive will answer a few queries like count(1) purely using stats\n" +
"stored in metastore. For basic stats collection turn on the config hive.stats.autogather to true.\n" +
"For more advanced stats collection need to run analyze table queries."),
// Serde for FetchTask
HIVEFETCHOUTPUTSERDE("hive.fetch.output.serde", "org.apache.hadoop.hive.serde2.DelimitedJSONSerDe",
"The SerDe used by FetchTask to serialize the fetch output."),
HIVEEXPREVALUATIONCACHE("hive.cache.expr.evaluation", true,
"If true, the evaluation result of a deterministic expression referenced twice or more\n" +
"will be cached.\n" +
"For example, in a filter condition like '.. where key + 10 = 100 or key + 10 = 0'\n" +
"the expression 'key + 10' will be evaluated/cached once and reused for the following\n" +
"expression ('key + 10 = 0'). Currently, this is applied only to expressions in select\n" +
"or filter operators."),
// Hive Variables
HIVEVARIABLESUBSTITUTE("hive.variable.substitute", true,
"This enables substitution using syntax like ${var} ${system:var} and ${env:var}."),
HIVEVARIABLESUBSTITUTEDEPTH("hive.variable.substitute.depth", 40,
"The maximum replacements the substitution engine will do."),
HIVECONFVALIDATION("hive.conf.validation", true,
"Enables type checking for registered Hive configurations"),
SEMANTIC_ANALYZER_HOOK("hive.semantic.analyzer.hook", "", ""),
HIVE_TEST_AUTHORIZATION_SQLSTD_HS2_MODE(
"hive.test.authz.sstd.hs2.mode", false, "test hs2 mode from .q tests", true),
HIVE_AUTHORIZATION_ENABLED("hive.security.authorization.enabled", false,
"enable or disable the Hive client authorization"),
HIVE_AUTHORIZATION_KERBEROS_USE_SHORTNAME("hive.security.authorization.kerberos.use.shortname", true,
"use short name in Kerberos cluster"),
HIVE_AUTHORIZATION_MANAGER("hive.security.authorization.manager",
"org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactory",
"The Hive client authorization manager class name. The user defined authorization class should implement \n" +
"interface org.apache.hadoop.hive.ql.security.authorization.HiveAuthorizationProvider."),
HIVE_AUTHENTICATOR_MANAGER("hive.security.authenticator.manager",
"org.apache.hadoop.hive.ql.security.HadoopDefaultAuthenticator",
"hive client authenticator manager class name. The user defined authenticator should implement \n" +
"interface org.apache.hadoop.hive.ql.security.HiveAuthenticationProvider."),
HIVE_METASTORE_AUTHORIZATION_MANAGER("hive.security.metastore.authorization.manager",
"org.apache.hadoop.hive.ql.security.authorization.DefaultHiveMetastoreAuthorizationProvider",
"Names of authorization manager classes (comma separated) to be used in the metastore\n" +
"for authorization. The user defined authorization class should implement interface\n" +
"org.apache.hadoop.hive.ql.security.authorization.HiveMetastoreAuthorizationProvider.\n" +
"All authorization manager classes have to successfully authorize the metastore API\n" +
"call for the command execution to be allowed."),
HIVE_METASTORE_AUTHORIZATION_AUTH_READS("hive.security.metastore.authorization.auth.reads", true,
"If this is true, metastore authorizer authorizes read actions on database, table"),
HIVE_METASTORE_AUTHENTICATOR_MANAGER("hive.security.metastore.authenticator.manager",
"org.apache.hadoop.hive.ql.security.HadoopDefaultMetastoreAuthenticator",
"authenticator manager class name to be used in the metastore for authentication. \n" +
"The user defined authenticator should implement interface org.apache.hadoop.hive.ql.security.HiveAuthenticationProvider."),
HIVE_AUTHORIZATION_TABLE_USER_GRANTS("hive.security.authorization.createtable.user.grants", "",
"the privileges automatically granted to some users whenever a table gets created.\n" +
"An example like \"userX,userY:select;userZ:create\" will grant select privilege to userX and userY,\n" +
"and grant create privilege to userZ whenever a new table created."),
HIVE_AUTHORIZATION_TABLE_GROUP_GRANTS("hive.security.authorization.createtable.group.grants",
"",
"the privileges automatically granted to some groups whenever a table gets created.\n" +
"An example like \"groupX,groupY:select;groupZ:create\" will grant select privilege to groupX and groupY,\n" +
"and grant create privilege to groupZ whenever a new table created."),
HIVE_AUTHORIZATION_TABLE_ROLE_GRANTS("hive.security.authorization.createtable.role.grants", "",
"the privileges automatically granted to some roles whenever a table gets created.\n" +
"An example like \"roleX,roleY:select;roleZ:create\" will grant select privilege to roleX and roleY,\n" +
"and grant create privilege to roleZ whenever a new table created."),
HIVE_AUTHORIZATION_TABLE_OWNER_GRANTS("hive.security.authorization.createtable.owner.grants",
"",
"The privileges automatically granted to the owner whenever a table gets created.\n" +
"An example like \"select,drop\" will grant select and drop privilege to the owner\n" +
"of the table. Note that the default gives the creator of a table no access to the\n" +
"table (but see HIVE-8067)."),
HIVE_AUTHORIZATION_TASK_FACTORY("hive.security.authorization.task.factory",
"org.apache.hadoop.hive.ql.parse.authorization.HiveAuthorizationTaskFactoryImpl",
"Authorization DDL task factory implementation"),
// if this is not set default value is set during config initialization
// Default value can't be set in this constructor as it would refer names in other ConfVars
// whose constructor would not have been called
HIVE_AUTHORIZATION_SQL_STD_AUTH_CONFIG_WHITELIST(
"hive.security.authorization.sqlstd.confwhitelist", "",
"A Java regex. Configurations parameters that match this\n" +
"regex can be modified by user when SQL standard authorization is enabled.\n" +
"To get the default value, use the 'set <param>' command.\n" +
"Note that the hive.conf.restricted.list checks are still enforced after the white list\n" +
"check"),
HIVE_AUTHORIZATION_SQL_STD_AUTH_CONFIG_WHITELIST_APPEND(
"hive.security.authorization.sqlstd.confwhitelist.append", "",
"2nd Java regex that it would match in addition to\n" +
"hive.security.authorization.sqlstd.confwhitelist.\n" +
"Do not include a starting \"|\" in the value. Using this regex instead\n" +
"of updating the original regex means that you can append to the default\n" +
"set by SQL standard authorization instead of replacing it entirely."),
HIVE_CLI_PRINT_HEADER("hive.cli.print.header", false, "Whether to print the names of the columns in query output."),
HIVE_CLI_PRINT_ESCAPE_CRLF("hive.cli.print.escape.crlf", false,
"Whether to print carriage returns and line feeds in row output as escaped \\r and \\n"),
HIVE_CLI_TEZ_SESSION_ASYNC("hive.cli.tez.session.async", true, "Whether to start Tez\n" +
"session in background when running CLI with Tez, allowing CLI to be available earlier."),
HIVE_DISABLE_UNSAFE_EXTERNALTABLE_OPERATIONS("hive.disable.unsafe.external.table.operations", true,
"Whether to disable certain optimizations and operations on external tables," +
" on the assumption that data changes by external applications may have negative effects" +
" on these operations."),
HIVE_STRICT_MANAGED_TABLES("hive.strict.managed.tables", false,
"Whether strict managed tables mode is enabled. With this mode enabled, " +
"only transactional tables (both full and insert-only) are allowed to be created as managed tables"),
HIVE_EXTERNALTABLE_PURGE_DEFAULT("hive.external.table.purge.default", false,
"Set to true to set external.table.purge=true on newly created external tables," +
" which will specify that the table data should be deleted when the table is dropped." +
" Set to false maintain existing behavior that external tables do not delete data" +
" when the table is dropped."),
HIVE_ERROR_ON_EMPTY_PARTITION("hive.error.on.empty.partition", false,
"Whether to throw an exception if dynamic partition insert generates empty results."),
HIVE_EXIM_URI_SCHEME_WL("hive.exim.uri.scheme.whitelist", "hdfs,pfile,file,s3,s3a,gs",
"A comma separated list of acceptable URI schemes for import and export."),
// temporary variable for testing. This is added just to turn off this feature in case of a bug in
// deployment. It has not been documented in hive-default.xml intentionally, this should be removed
// once the feature is stable
HIVE_EXIM_RESTRICT_IMPORTS_INTO_REPLICATED_TABLES("hive.exim.strict.repl.tables",true,
"Parameter that determines if 'regular' (non-replication) export dumps can be\n" +
"imported on to tables that are the target of replication. If this parameter is\n" +
"set, regular imports will check if the destination table(if it exists) has a " +
"'repl.last.id' set on it. If so, it will fail."),
HIVE_REPL_TASK_FACTORY("hive.repl.task.factory",
"org.apache.hive.hcatalog.api.repl.exim.EximReplicationTaskFactory",
"Parameter that can be used to override which ReplicationTaskFactory will be\n" +
"used to instantiate ReplicationTask events. Override for third party repl plugins"),
HIVE_MAPPER_CANNOT_SPAN_MULTIPLE_PARTITIONS("hive.mapper.cannot.span.multiple.partitions", false, ""),
HIVE_REWORK_MAPREDWORK("hive.rework.mapredwork", false,
"should rework the mapred work or not.\n" +
"This is first introduced by SymlinkTextInputFormat to replace symlink files with real paths at compile time."),
HIVE_IO_EXCEPTION_HANDLERS("hive.io.exception.handlers", "",
"A list of io exception handler class names. This is used\n" +
"to construct a list exception handlers to handle exceptions thrown\n" +
"by record readers"),
// logging configuration
HIVE_LOG4J_FILE("hive.log4j.file", "",
"Hive log4j configuration file.\n" +
"If the property is not set, then logging will be initialized using hive-log4j2.properties found on the classpath.\n" +
"If the property is set, the value must be a valid URI (java.net.URI, e.g. \"file:///tmp/my-logging.xml\"), \n" +
"which you can then extract a URL from and pass to PropertyConfigurator.configure(URL)."),
HIVE_EXEC_LOG4J_FILE("hive.exec.log4j.file", "",
"Hive log4j configuration file for execution mode(sub command).\n" +
"If the property is not set, then logging will be initialized using hive-exec-log4j2.properties found on the classpath.\n" +
"If the property is set, the value must be a valid URI (java.net.URI, e.g. \"file:///tmp/my-logging.xml\"), \n" +
"which you can then extract a URL from and pass to PropertyConfigurator.configure(URL)."),
HIVE_ASYNC_LOG_ENABLED("hive.async.log.enabled", true,
"Whether to enable Log4j2's asynchronous logging. Asynchronous logging can give\n" +
" significant performance improvement as logging will be handled in separate thread\n" +
" that uses LMAX disruptor queue for buffering log messages.\n" +
" Refer https://logging.apache.org/log4j/2.x/manual/async.html for benefits and\n" +
" drawbacks."),
HIVE_LOG_EXPLAIN_OUTPUT("hive.log.explain.output", false,
"Whether to log explain output for every query.\n"
+ "When enabled, will log EXPLAIN EXTENDED output for the query at INFO log4j log level."),
HIVE_EXPLAIN_USER("hive.explain.user", true,
"Whether to show explain result at user level.\n" +
"When enabled, will log EXPLAIN output for the query at user level. Tez only."),
HIVE_SPARK_EXPLAIN_USER("hive.spark.explain.user", false,
"Whether to show explain result at user level.\n" +
"When enabled, will log EXPLAIN output for the query at user level. Spark only."),
HIVE_SPARK_LOG_EXPLAIN_WEBUI("hive.spark.log.explain.webui", true, "Whether to show the " +
"explain plan in the Spark Web UI. Only shows the regular EXPLAIN plan, and ignores " +
"any extra EXPLAIN configuration (e.g. hive.spark.explain.user, etc.). The explain " +
"plan for each stage is truncated at 100,000 characters."),
// prefix used to auto generated column aliases (this should be s,tarted with '_')
HIVE_AUTOGEN_COLUMNALIAS_PREFIX_LABEL("hive.autogen.columnalias.prefix.label", "_c",
"String used as a prefix when auto generating column alias.\n" +
"By default the prefix label will be appended with a column position number to form the column alias. \n" +
"Auto generation would happen if an aggregate function is used in a select clause without an explicit alias."),
HIVE_AUTOGEN_COLUMNALIAS_PREFIX_INCLUDEFUNCNAME(
"hive.autogen.columnalias.prefix.includefuncname", false,
"Whether to include function name in the column alias auto generated by Hive."),
HIVE_METRICS_CLASS("hive.service.metrics.class",
"org.apache.hadoop.hive.common.metrics.metrics2.CodahaleMetrics",
new StringSet(
"org.apache.hadoop.hive.common.metrics.metrics2.CodahaleMetrics",
"org.apache.hadoop.hive.common.metrics.LegacyMetrics"),
"Hive metrics subsystem implementation class."),
HIVE_CODAHALE_METRICS_REPORTER_CLASSES("hive.service.metrics.codahale.reporter.classes",
"org.apache.hadoop.hive.common.metrics.metrics2.JsonFileMetricsReporter, " +
"org.apache.hadoop.hive.common.metrics.metrics2.JmxMetricsReporter",
"Comma separated list of reporter implementation classes for metric class "
+ "org.apache.hadoop.hive.common.metrics.metrics2.CodahaleMetrics. Overrides "
+ "HIVE_METRICS_REPORTER conf if present"),
@Deprecated
HIVE_METRICS_REPORTER("hive.service.metrics.reporter", "",
"Reporter implementations for metric class "
+ "org.apache.hadoop.hive.common.metrics.metrics2.CodahaleMetrics;" +
"Deprecated, use HIVE_CODAHALE_METRICS_REPORTER_CLASSES instead. This configuraiton will be"
+ " overridden by HIVE_CODAHALE_METRICS_REPORTER_CLASSES if present. " +
"Comma separated list of JMX, CONSOLE, JSON_FILE, HADOOP2"),
HIVE_METRICS_JSON_FILE_LOCATION("hive.service.metrics.file.location", "/tmp/report.json",
"For metric class org.apache.hadoop.hive.common.metrics.metrics2.CodahaleMetrics JSON_FILE reporter, the location of local JSON metrics file. " +
"This file will get overwritten at every interval."),
HIVE_METRICS_JSON_FILE_INTERVAL("hive.service.metrics.file.frequency", "5000ms",
new TimeValidator(TimeUnit.MILLISECONDS),
"For metric class org.apache.hadoop.hive.common.metrics.metrics2.JsonFileMetricsReporter, " +
"the frequency of updating JSON metrics file."),
HIVE_METRICS_HADOOP2_INTERVAL("hive.service.metrics.hadoop2.frequency", "30s",
new TimeValidator(TimeUnit.SECONDS),
"For metric class org.apache.hadoop.hive.common.metrics.metrics2.Metrics2Reporter, " +
"the frequency of updating the HADOOP2 metrics system."),
HIVE_METRICS_HADOOP2_COMPONENT_NAME("hive.service.metrics.hadoop2.component",
"hive",
"Component name to provide to Hadoop2 Metrics system. Ideally 'hivemetastore' for the MetaStore " +
" and and 'hiveserver2' for HiveServer2."
),
HIVE_PERF_LOGGER("hive.exec.perf.logger", "org.apache.hadoop.hive.ql.log.PerfLogger",
"The class responsible for logging client side performance metrics. \n" +
"Must be a subclass of org.apache.hadoop.hive.ql.log.PerfLogger"),
HIVE_START_CLEANUP_SCRATCHDIR("hive.start.cleanup.scratchdir", false,
"To cleanup the Hive scratchdir when starting the Hive Server"),
HIVE_SCRATCH_DIR_LOCK("hive.scratchdir.lock", false,
"To hold a lock file in scratchdir to prevent to be removed by cleardanglingscratchdir"),
HIVE_INSERT_INTO_MULTILEVEL_DIRS("hive.insert.into.multilevel.dirs", false,
"Where to insert into multilevel directories like\n" +
"\"insert directory '/HIVEFT25686/chinna/' from table\""),
HIVE_CTAS_EXTERNAL_TABLES("hive.ctas.external.tables", true,
"whether CTAS for external tables is allowed"),
HIVE_INSERT_INTO_EXTERNAL_TABLES("hive.insert.into.external.tables", true,
"whether insert into external tables is allowed"),
HIVE_TEMPORARY_TABLE_STORAGE(
"hive.exec.temporary.table.storage", "default", new StringSet("memory",
"ssd", "default"), "Define the storage policy for temporary tables." +
"Choices between memory, ssd and default"),
HIVE_QUERY_LIFETIME_HOOKS("hive.query.lifetime.hooks", "",
"A comma separated list of hooks which implement QueryLifeTimeHook. These will be triggered" +
" before/after query compilation and before/after query execution, in the order specified." +
"Implementations of QueryLifeTimeHookWithParseHooks can also be specified in this list. If they are" +
"specified then they will be invoked in the same places as QueryLifeTimeHooks and will be invoked during pre " +
"and post query parsing"),
HIVE_DRIVER_RUN_HOOKS("hive.exec.driver.run.hooks", "",
"A comma separated list of hooks which implement HiveDriverRunHook. Will be run at the beginning " +
"and end of Driver.run, these will be run in the order specified."),
HIVE_DDL_OUTPUT_FORMAT("hive.ddl.output.format", null,
"The data format to use for DDL output. One of \"text\" (for human\n" +
"readable text) or \"json\" (for a json object)."),
HIVE_ENTITY_SEPARATOR("hive.entity.separator", "@",
"Separator used to construct names of tables and partitions. For example, dbname@tablename@partitionname"),
HIVE_CAPTURE_TRANSFORM_ENTITY("hive.entity.capture.transform", false,
"Compiler to capture transform URI referred in the query"),
HIVE_DISPLAY_PARTITION_COLUMNS_SEPARATELY("hive.display.partition.cols.separately", true,
"In older Hive version (0.10 and earlier) no distinction was made between\n" +
"partition columns or non-partition columns while displaying columns in describe\n" +
"table. From 0.12 onwards, they are displayed separately. This flag will let you\n" +
"get old behavior, if desired. See, test-case in patch for HIVE-6689."),
HIVE_SSL_PROTOCOL_BLACKLIST("hive.ssl.protocol.blacklist", "SSLv2,SSLv3",
"SSL Versions to disable for all Hive Servers"),
HIVE_PRIVILEGE_SYNCHRONIZER("hive.privilege.synchronizer", true,
"Whether to synchronize privileges from external authorizer periodically in HS2"),
HIVE_PRIVILEGE_SYNCHRONIZER_INTERVAL("hive.privilege.synchronizer.interval",
"1800s", new TimeValidator(TimeUnit.SECONDS),
"Interval to synchronize privileges from external authorizer periodically in HS2"),
// HiveServer2 specific configs
HIVE_SERVER2_CLEAR_DANGLING_SCRATCH_DIR("hive.server2.clear.dangling.scratchdir", false,
"Clear dangling scratch dir periodically in HS2"),
HIVE_SERVER2_CLEAR_DANGLING_SCRATCH_DIR_INTERVAL("hive.server2.clear.dangling.scratchdir.interval",
"1800s", new TimeValidator(TimeUnit.SECONDS),
"Interval to clear dangling scratch dir periodically in HS2"),
HIVE_SERVER2_SLEEP_INTERVAL_BETWEEN_START_ATTEMPTS("hive.server2.sleep.interval.between.start.attempts",
"60s", new TimeValidator(TimeUnit.MILLISECONDS, 0l, true, Long.MAX_VALUE, true),
"Amount of time to sleep between HiveServer2 start attempts. Primarily meant for tests"),
HIVE_SERVER2_MAX_START_ATTEMPTS("hive.server2.max.start.attempts", 30L, new RangeValidator(0L, null),
"Number of times HiveServer2 will attempt to start before exiting. The sleep interval between retries" +
" is determined by " + ConfVars.HIVE_SERVER2_SLEEP_INTERVAL_BETWEEN_START_ATTEMPTS.varname +
"\n The default of 30 will keep trying for 30 minutes."),
HIVE_SERVER2_SUPPORT_DYNAMIC_SERVICE_DISCOVERY("hive.server2.support.dynamic.service.discovery", false,
"Whether HiveServer2 supports dynamic service discovery for its clients. " +
"To support this, each instance of HiveServer2 currently uses ZooKeeper to register itself, " +
"when it is brought up. JDBC/ODBC clients should use the ZooKeeper ensemble: " +
"hive.zookeeper.quorum in their connection string."),
HIVE_SERVER2_ZOOKEEPER_NAMESPACE("hive.server2.zookeeper.namespace", "hiveserver2",
"The parent node in ZooKeeper used by HiveServer2 when supporting dynamic service discovery."),
HIVE_SERVER2_ZOOKEEPER_PUBLISH_CONFIGS("hive.server2.zookeeper.publish.configs", true,
"Whether we should publish HiveServer2's configs to ZooKeeper."),
// HiveServer2 global init file location
HIVE_SERVER2_GLOBAL_INIT_FILE_LOCATION("hive.server2.global.init.file.location", "${env:HIVE_CONF_DIR}",
"Either the location of a HS2 global init file or a directory containing a .hiverc file. If the \n" +
"property is set, the value must be a valid path to an init file or directory where the init file is located."),
HIVE_SERVER2_TRANSPORT_MODE("hive.server2.transport.mode", "binary", new StringSet("binary", "http"),
"Transport mode of HiveServer2."),
HIVE_SERVER2_THRIFT_BIND_HOST("hive.server2.thrift.bind.host", "",
"Bind host on which to run the HiveServer2 Thrift service."),
HIVE_SERVER2_PARALLEL_COMPILATION("hive.driver.parallel.compilation", false, "Whether to\n" +
"enable parallel compilation of the queries between sessions and within the same session on HiveServer2. The default is false."),
HIVE_SERVER2_PARALLEL_COMPILATION_LIMIT("hive.driver.parallel.compilation.global.limit", -1, "Determines the " +
"degree of parallelism for queries compilation between sessions on HiveServer2. The default is -1."),
HIVE_SERVER2_COMPILE_LOCK_TIMEOUT("hive.server2.compile.lock.timeout", "0s",
new TimeValidator(TimeUnit.SECONDS),
"Number of seconds a request will wait to acquire the compile lock before giving up. " +
"Setting it to 0s disables the timeout."),
HIVE_SERVER2_PARALLEL_OPS_IN_SESSION("hive.server2.parallel.ops.in.session", true,
"Whether to allow several parallel operations (such as SQL statements) in one session."),
HIVE_SERVER2_MATERIALIZED_VIEWS_REGISTRY_IMPL("hive.server2.materializedviews.registry.impl", "DEFAULT",
new StringSet("DEFAULT", "DUMMY"),
"The implementation that we should use for the materialized views registry. \n" +
" DEFAULT: Default cache for materialized views\n" +
" DUMMY: Do not cache materialized views and hence forward requests to metastore"),
// HiveServer2 WebUI
HIVE_SERVER2_WEBUI_BIND_HOST("hive.server2.webui.host", "0.0.0.0", "The host address the HiveServer2 WebUI will listen on"),
HIVE_SERVER2_WEBUI_PORT("hive.server2.webui.port", 10002, "The port the HiveServer2 WebUI will listen on. This can be"
+ "set to 0 or a negative integer to disable the web UI"),
HIVE_SERVER2_WEBUI_MAX_THREADS("hive.server2.webui.max.threads", 50, "The max HiveServer2 WebUI threads"),
HIVE_SERVER2_WEBUI_USE_SSL("hive.server2.webui.use.ssl", false,
"Set this to true for using SSL encryption for HiveServer2 WebUI."),
HIVE_SERVER2_WEBUI_SSL_KEYSTORE_PATH("hive.server2.webui.keystore.path", "",
"SSL certificate keystore location for HiveServer2 WebUI."),
HIVE_SERVER2_WEBUI_SSL_KEYSTORE_PASSWORD("hive.server2.webui.keystore.password", "",
"SSL certificate keystore password for HiveServer2 WebUI."),
HIVE_SERVER2_WEBUI_USE_SPNEGO("hive.server2.webui.use.spnego", false,
"If true, the HiveServer2 WebUI will be secured with SPNEGO. Clients must authenticate with Kerberos."),
HIVE_SERVER2_WEBUI_SPNEGO_KEYTAB("hive.server2.webui.spnego.keytab", "",
"The path to the Kerberos Keytab file containing the HiveServer2 WebUI SPNEGO service principal."),
HIVE_SERVER2_WEBUI_SPNEGO_PRINCIPAL("hive.server2.webui.spnego.principal",
"HTTP/[email protected]", "The HiveServer2 WebUI SPNEGO service principal.\n" +
"The special string _HOST will be replaced automatically with \n" +
"the value of hive.server2.webui.host or the correct host name."),
HIVE_SERVER2_WEBUI_MAX_HISTORIC_QUERIES("hive.server2.webui.max.historic.queries", 25,
"The maximum number of past queries to show in HiverSever2 WebUI."),
HIVE_SERVER2_WEBUI_USE_PAM("hive.server2.webui.use.pam", false,
"If true, the HiveServer2 WebUI will be secured with PAM."),
HIVE_SERVER2_WEBUI_EXPLAIN_OUTPUT("hive.server2.webui.explain.output", false,
"When set to true, the EXPLAIN output for every query is displayed"
+ " in the HS2 WebUI / Drilldown / Query Plan tab.\n"),
HIVE_SERVER2_WEBUI_SHOW_GRAPH("hive.server2.webui.show.graph", false,
"Set this to true to to display query plan as a graph instead of text in the WebUI. " +
"Only works with hive.server2.webui.explain.output set to true."),
HIVE_SERVER2_WEBUI_MAX_GRAPH_SIZE("hive.server2.webui.max.graph.size", 25,
"Max number of stages graph can display. If number of stages exceeds this, no query" +
"plan will be shown. Only works when hive.server2.webui.show.graph and " +
"hive.server2.webui.explain.output set to true."),
HIVE_SERVER2_WEBUI_SHOW_STATS("hive.server2.webui.show.stats", false,
"Set this to true to to display statistics for MapReduce tasks in the WebUI. " +
"Only works when hive.server2.webui.show.graph and hive.server2.webui.explain.output " +
"set to true."),
HIVE_SERVER2_WEBUI_ENABLE_CORS("hive.server2.webui.enable.cors", false,
"Whether to enable cross origin requests (CORS)\n"),
HIVE_SERVER2_WEBUI_CORS_ALLOWED_ORIGINS("hive.server2.webui.cors.allowed.origins", "*",
"Comma separated list of origins that are allowed when CORS is enabled.\n"),
HIVE_SERVER2_WEBUI_CORS_ALLOWED_METHODS("hive.server2.webui.cors.allowed.methods", "GET,POST,DELETE,HEAD",
"Comma separated list of http methods that are allowed when CORS is enabled.\n"),
HIVE_SERVER2_WEBUI_CORS_ALLOWED_HEADERS("hive.server2.webui.cors.allowed.headers",
"X-Requested-With,Content-Type,Accept,Origin",
"Comma separated list of http headers that are allowed when CORS is enabled.\n"),
// Tez session settings
HIVE_SERVER2_ACTIVE_PASSIVE_HA_ENABLE("hive.server2.active.passive.ha.enable", false,
"Whether HiveServer2 Active/Passive High Availability be enabled when Hive Interactive sessions are enabled." +
"This will also require hive.server2.support.dynamic.service.discovery to be enabled."),
HIVE_SERVER2_ACTIVE_PASSIVE_HA_REGISTRY_NAMESPACE("hive.server2.active.passive.ha.registry.namespace",
"hs2ActivePassiveHA",
"When HiveServer2 Active/Passive High Availability is enabled, uses this namespace for registering HS2\n" +
"instances with zookeeper"),
HIVE_SERVER2_TEZ_INTERACTIVE_QUEUE("hive.server2.tez.interactive.queue", "",
"A single YARN queues to use for Hive Interactive sessions. When this is specified,\n" +
"workload management is enabled and used for these sessions."),
HIVE_SERVER2_WM_NAMESPACE("hive.server2.wm.namespace", "default",
"The WM namespace to use when one metastore is used by multiple compute clusters each \n" +
"with their own workload management. The special value 'default' (the default) will \n" +
"also include any resource plans created before the namespaces were introduced."),
HIVE_SERVER2_WM_WORKER_THREADS("hive.server2.wm.worker.threads", 4,
"Number of worker threads to use to perform the synchronous operations with Tez\n" +
"sessions for workload management (e.g. opening, closing, etc.)"),
HIVE_SERVER2_WM_ALLOW_ANY_POOL_VIA_JDBC("hive.server2.wm.allow.any.pool.via.jdbc", false,
"Applies when a user specifies a target WM pool in the JDBC connection string. If\n" +
"false, the user can only specify a pool he is mapped to (e.g. make a choice among\n" +
"multiple group mappings); if true, the user can specify any existing pool."),
HIVE_SERVER2_WM_POOL_METRICS("hive.server2.wm.pool.metrics", true,
"Whether per-pool WM metrics should be enabled."),
HIVE_SERVER2_TEZ_WM_AM_REGISTRY_TIMEOUT("hive.server2.tez.wm.am.registry.timeout", "30s",
new TimeValidator(TimeUnit.SECONDS),
"The timeout for AM registry registration, after which (on attempting to use the\n" +
"session), we kill it and try to get another one."),
HIVE_SERVER2_TEZ_DEFAULT_QUEUES("hive.server2.tez.default.queues", "",
"A list of comma separated values corresponding to YARN queues of the same name.\n" +
"When HiveServer2 is launched in Tez mode, this configuration needs to be set\n" +
"for multiple Tez sessions to run in parallel on the cluster."),
HIVE_SERVER2_TEZ_SESSIONS_PER_DEFAULT_QUEUE("hive.server2.tez.sessions.per.default.queue", 1,
"A positive integer that determines the number of Tez sessions that should be\n" +
"launched on each of the queues specified by \"hive.server2.tez.default.queues\".\n" +
"Determines the parallelism on each queue."),
HIVE_SERVER2_TEZ_INITIALIZE_DEFAULT_SESSIONS("hive.server2.tez.initialize.default.sessions",
false,
"This flag is used in HiveServer2 to enable a user to use HiveServer2 without\n" +
"turning on Tez for HiveServer2. The user could potentially want to run queries\n" +
"over Tez without the pool of sessions."),
HIVE_SERVER2_TEZ_QUEUE_ACCESS_CHECK("hive.server2.tez.queue.access.check", false,
"Whether to check user access to explicitly specified YARN queues. " +
"yarn.resourcemanager.webapp.address must be configured to use this."),
HIVE_SERVER2_TEZ_SESSION_LIFETIME("hive.server2.tez.session.lifetime", "162h",
new TimeValidator(TimeUnit.HOURS),
"The lifetime of the Tez sessions launched by HS2 when default sessions are enabled.\n" +
"Set to 0 to disable session expiration."),
HIVE_SERVER2_TEZ_SESSION_LIFETIME_JITTER("hive.server2.tez.session.lifetime.jitter", "3h",
new TimeValidator(TimeUnit.HOURS),
"The jitter for Tez session lifetime; prevents all the sessions from restarting at once."),
HIVE_SERVER2_TEZ_SESSION_MAX_INIT_THREADS("hive.server2.tez.sessions.init.threads", 16,
"If hive.server2.tez.initialize.default.sessions is enabled, the maximum number of\n" +
"threads to use to initialize the default sessions."),
HIVE_SERVER2_TEZ_SESSION_RESTRICTED_CONFIGS("hive.server2.tez.sessions.restricted.configs", "",
"The configuration settings that cannot be set when submitting jobs to HiveServer2. If\n" +
"any of these are set to values different from those in the server configuration, an\n" +
"exception will be thrown."),
HIVE_SERVER2_TEZ_SESSION_CUSTOM_QUEUE_ALLOWED("hive.server2.tez.sessions.custom.queue.allowed",
"true", new StringSet("true", "false", "ignore"),
"Whether Tez session pool should allow submitting queries to custom queues. The options\n" +
"are true, false (error out), ignore (accept the query but ignore the queue setting)."),
// Operation log configuration
HIVE_SERVER2_LOGGING_OPERATION_ENABLED("hive.server2.logging.operation.enabled", true,
"When true, HS2 will save operation logs and make them available for clients"),
HIVE_SERVER2_LOGGING_OPERATION_LOG_LOCATION("hive.server2.logging.operation.log.location",
"${system:java.io.tmpdir}" + File.separator + "${system:user.name}" + File.separator +
"operation_logs",
"Top level directory where operation logs are stored if logging functionality is enabled"),
HIVE_SERVER2_LOGGING_OPERATION_LEVEL("hive.server2.logging.operation.level", "EXECUTION",
new StringSet("NONE", "EXECUTION", "PERFORMANCE", "VERBOSE"),
"HS2 operation logging mode available to clients to be set at session level.\n" +
"For this to work, hive.server2.logging.operation.enabled should be set to true.\n" +
" NONE: Ignore any logging\n" +
" EXECUTION: Log completion of tasks\n" +
" PERFORMANCE: Execution + Performance logs \n" +
" VERBOSE: All logs" ),
HIVE_SERVER2_OPERATION_LOG_CLEANUP_DELAY("hive.server2.operation.log.cleanup.delay", "300s",
new TimeValidator(TimeUnit.SECONDS), "When a query is cancelled (via kill query, query timeout or triggers),\n" +
" operation logs gets cleaned up after this delay"),
// HS2 connections guard rails
HIVE_SERVER2_LIMIT_CONNECTIONS_PER_USER("hive.server2.limit.connections.per.user", 0,
"Maximum hive server2 connections per user. Any user exceeding this limit will not be allowed to connect. " +
"Default=0 does not enforce limits."),
HIVE_SERVER2_LIMIT_CONNECTIONS_PER_IPADDRESS("hive.server2.limit.connections.per.ipaddress", 0,
"Maximum hive server2 connections per ipaddress. Any ipaddress exceeding this limit will not be allowed " +
"to connect. Default=0 does not enforce limits."),
HIVE_SERVER2_LIMIT_CONNECTIONS_PER_USER_IPADDRESS("hive.server2.limit.connections.per.user.ipaddress", 0,
"Maximum hive server2 connections per user:ipaddress combination. Any user-ipaddress exceeding this limit will " +
"not be allowed to connect. Default=0 does not enforce limits."),
// Enable metric collection for HiveServer2
HIVE_SERVER2_METRICS_ENABLED("hive.server2.metrics.enabled", false, "Enable metrics on the HiveServer2."),
// http (over thrift) transport settings
HIVE_SERVER2_THRIFT_HTTP_PORT("hive.server2.thrift.http.port", 10001,
"Port number of HiveServer2 Thrift interface when hive.server2.transport.mode is 'http'."),
HIVE_SERVER2_THRIFT_HTTP_PATH("hive.server2.thrift.http.path", "cliservice",
"Path component of URL endpoint when in HTTP mode."),
HIVE_SERVER2_THRIFT_MAX_MESSAGE_SIZE("hive.server2.thrift.max.message.size", 100*1024*1024,
"Maximum message size in bytes a HS2 server will accept."),
HIVE_SERVER2_THRIFT_HTTP_MAX_IDLE_TIME("hive.server2.thrift.http.max.idle.time", "1800s",
new TimeValidator(TimeUnit.MILLISECONDS),
"Maximum idle time for a connection on the server when in HTTP mode."),
HIVE_SERVER2_THRIFT_HTTP_WORKER_KEEPALIVE_TIME("hive.server2.thrift.http.worker.keepalive.time", "60s",
new TimeValidator(TimeUnit.SECONDS),
"Keepalive time for an idle http worker thread. When the number of workers exceeds min workers, " +
"excessive threads are killed after this time interval."),
HIVE_SERVER2_THRIFT_HTTP_REQUEST_HEADER_SIZE("hive.server2.thrift.http.request.header.size", 6*1024,
"Request header size in bytes, when using HTTP transport mode. Jetty defaults used."),
HIVE_SERVER2_THRIFT_HTTP_RESPONSE_HEADER_SIZE("hive.server2.thrift.http.response.header.size", 6*1024,
"Response header size in bytes, when using HTTP transport mode. Jetty defaults used."),
HIVE_SERVER2_THRIFT_HTTP_COMPRESSION_ENABLED("hive.server2.thrift.http.compression.enabled", true,
"Enable thrift http compression via Jetty compression support"),
// Cookie based authentication when using HTTP Transport
HIVE_SERVER2_THRIFT_HTTP_COOKIE_AUTH_ENABLED("hive.server2.thrift.http.cookie.auth.enabled", true,
"When true, HiveServer2 in HTTP transport mode, will use cookie based authentication mechanism."),
HIVE_SERVER2_THRIFT_HTTP_COOKIE_MAX_AGE("hive.server2.thrift.http.cookie.max.age", "86400s",
new TimeValidator(TimeUnit.SECONDS),
"Maximum age in seconds for server side cookie used by HS2 in HTTP mode."),
HIVE_SERVER2_THRIFT_HTTP_COOKIE_DOMAIN("hive.server2.thrift.http.cookie.domain", null,
"Domain for the HS2 generated cookies"),
HIVE_SERVER2_THRIFT_HTTP_COOKIE_PATH("hive.server2.thrift.http.cookie.path", null,
"Path for the HS2 generated cookies"),
@Deprecated
HIVE_SERVER2_THRIFT_HTTP_COOKIE_IS_SECURE("hive.server2.thrift.http.cookie.is.secure", true,
"Deprecated: Secure attribute of the HS2 generated cookie (this is automatically enabled for SSL enabled HiveServer2)."),
HIVE_SERVER2_THRIFT_HTTP_COOKIE_IS_HTTPONLY("hive.server2.thrift.http.cookie.is.httponly", true,
"HttpOnly attribute of the HS2 generated cookie."),
// binary transport settings
HIVE_SERVER2_THRIFT_PORT("hive.server2.thrift.port", 10000,
"Port number of HiveServer2 Thrift interface when hive.server2.transport.mode is 'binary'."),
HIVE_SERVER2_THRIFT_SASL_QOP("hive.server2.thrift.sasl.qop", "auth",
new StringSet("auth", "auth-int", "auth-conf"),
"Sasl QOP value; set it to one of following values to enable higher levels of\n" +
"protection for HiveServer2 communication with clients.\n" +
"Setting hadoop.rpc.protection to a higher level than HiveServer2 does not\n" +
"make sense in most situations. HiveServer2 ignores hadoop.rpc.protection in favor\n" +
"of hive.server2.thrift.sasl.qop.\n" +
" \"auth\" - authentication only (default)\n" +
" \"auth-int\" - authentication plus integrity protection\n" +
" \"auth-conf\" - authentication plus integrity and confidentiality protection\n" +
"This is applicable only if HiveServer2 is configured to use Kerberos authentication."),
HIVE_SERVER2_THRIFT_MIN_WORKER_THREADS("hive.server2.thrift.min.worker.threads", 5,
"Minimum number of Thrift worker threads"),
HIVE_SERVER2_THRIFT_MAX_WORKER_THREADS("hive.server2.thrift.max.worker.threads", 500,
"Maximum number of Thrift worker threads"),
HIVE_SERVER2_THRIFT_LOGIN_BEBACKOFF_SLOT_LENGTH(
"hive.server2.thrift.exponential.backoff.slot.length", "100ms",
new TimeValidator(TimeUnit.MILLISECONDS),
"Binary exponential backoff slot time for Thrift clients during login to HiveServer2,\n" +
"for retries until hitting Thrift client timeout"),
HIVE_SERVER2_THRIFT_LOGIN_TIMEOUT("hive.server2.thrift.login.timeout", "20s",
new TimeValidator(TimeUnit.SECONDS), "Timeout for Thrift clients during login to HiveServer2"),
HIVE_SERVER2_THRIFT_WORKER_KEEPALIVE_TIME("hive.server2.thrift.worker.keepalive.time", "60s",
new TimeValidator(TimeUnit.SECONDS),
"Keepalive time (in seconds) for an idle worker thread. When the number of workers exceeds min workers, " +
"excessive threads are killed after this time interval."),
// Configuration for async thread pool in SessionManager
HIVE_SERVER2_ASYNC_EXEC_THREADS("hive.server2.async.exec.threads", 100,
"Number of threads in the async thread pool for HiveServer2"),
HIVE_SERVER2_ASYNC_EXEC_SHUTDOWN_TIMEOUT("hive.server2.async.exec.shutdown.timeout", "10s",
new TimeValidator(TimeUnit.SECONDS),
"How long HiveServer2 shutdown will wait for async threads to terminate."),
HIVE_SERVER2_ASYNC_EXEC_WAIT_QUEUE_SIZE("hive.server2.async.exec.wait.queue.size", 100,
"Size of the wait queue for async thread pool in HiveServer2.\n" +
"After hitting this limit, the async thread pool will reject new requests."),
HIVE_SERVER2_ASYNC_EXEC_KEEPALIVE_TIME("hive.server2.async.exec.keepalive.time", "10s",
new TimeValidator(TimeUnit.SECONDS),
"Time that an idle HiveServer2 async thread (from the thread pool) will wait for a new task\n" +
"to arrive before terminating"),
HIVE_SERVER2_ASYNC_EXEC_ASYNC_COMPILE("hive.server2.async.exec.async.compile", false,
"Whether to enable compiling async query asynchronously. If enabled, it is unknown if the query will have any resultset before compilation completed."),
HIVE_SERVER2_LONG_POLLING_TIMEOUT("hive.server2.long.polling.timeout", "5000ms",
new TimeValidator(TimeUnit.MILLISECONDS),
"Time that HiveServer2 will wait before responding to asynchronous calls that use long polling"),
HIVE_SESSION_IMPL_CLASSNAME("hive.session.impl.classname", null, "Classname for custom implementation of hive session"),
HIVE_SESSION_IMPL_WITH_UGI_CLASSNAME("hive.session.impl.withugi.classname", null, "Classname for custom implementation of hive session with UGI"),
// HiveServer2 auth configuration
HIVE_SERVER2_AUTHENTICATION("hive.server2.authentication", "NONE",
new StringSet("NOSASL", "NONE", "LDAP", "KERBEROS", "PAM", "CUSTOM"),
"Client authentication types.\n" +
" NONE: no authentication check\n" +
" LDAP: LDAP/AD based authentication\n" +
" KERBEROS: Kerberos/GSSAPI authentication\n" +
" CUSTOM: Custom authentication provider\n" +
" (Use with property hive.server2.custom.authentication.class)\n" +
" PAM: Pluggable authentication module\n" +
" NOSASL: Raw transport"),
HIVE_SERVER2_ALLOW_USER_SUBSTITUTION("hive.server2.allow.user.substitution", true,
"Allow alternate user to be specified as part of HiveServer2 open connection request."),
HIVE_SERVER2_KERBEROS_KEYTAB("hive.server2.authentication.kerberos.keytab", "",
"Kerberos keytab file for server principal"),
HIVE_SERVER2_KERBEROS_PRINCIPAL("hive.server2.authentication.kerberos.principal", "",
"Kerberos server principal"),
HIVE_SERVER2_CLIENT_KERBEROS_PRINCIPAL("hive.server2.authentication.client.kerberos.principal", "",
"Kerberos principal used by the HA hive_server2s."),
HIVE_SERVER2_SPNEGO_KEYTAB("hive.server2.authentication.spnego.keytab", "",
"keytab file for SPNego principal, optional,\n" +
"typical value would look like /etc/security/keytabs/spnego.service.keytab,\n" +
"This keytab would be used by HiveServer2 when Kerberos security is enabled and \n" +
"HTTP transport mode is used.\n" +
"This needs to be set only if SPNEGO is to be used in authentication.\n" +
"SPNego authentication would be honored only if valid\n" +
" hive.server2.authentication.spnego.principal\n" +
"and\n" +
" hive.server2.authentication.spnego.keytab\n" +
"are specified."),
HIVE_SERVER2_SPNEGO_PRINCIPAL("hive.server2.authentication.spnego.principal", "",
"SPNego service principal, optional,\n" +
"typical value would look like HTTP/[email protected]\n" +
"SPNego service principal would be used by HiveServer2 when Kerberos security is enabled\n" +
"and HTTP transport mode is used.\n" +
"This needs to be set only if SPNEGO is to be used in authentication."),
HIVE_SERVER2_PLAIN_LDAP_URL("hive.server2.authentication.ldap.url", null,
"LDAP connection URL(s),\n" +
"this value could contain URLs to multiple LDAP servers instances for HA,\n" +
"each LDAP URL is separated by a SPACE character. URLs are used in the \n" +
" order specified until a connection is successful."),
HIVE_SERVER2_PLAIN_LDAP_BASEDN("hive.server2.authentication.ldap.baseDN", null, "LDAP base DN"),
HIVE_SERVER2_PLAIN_LDAP_DOMAIN("hive.server2.authentication.ldap.Domain", null, ""),
HIVE_SERVER2_PLAIN_LDAP_GROUPDNPATTERN("hive.server2.authentication.ldap.groupDNPattern", null,
"COLON-separated list of patterns to use to find DNs for group entities in this directory.\n" +
"Use %s where the actual group name is to be substituted for.\n" +
"For example: CN=%s,CN=Groups,DC=subdomain,DC=domain,DC=com."),
HIVE_SERVER2_PLAIN_LDAP_GROUPFILTER("hive.server2.authentication.ldap.groupFilter", null,
"COMMA-separated list of LDAP Group names (short name not full DNs).\n" +
"For example: HiveAdmins,HadoopAdmins,Administrators"),
HIVE_SERVER2_PLAIN_LDAP_USERDNPATTERN("hive.server2.authentication.ldap.userDNPattern", null,
"COLON-separated list of patterns to use to find DNs for users in this directory.\n" +
"Use %s where the actual group name is to be substituted for.\n" +
"For example: CN=%s,CN=Users,DC=subdomain,DC=domain,DC=com."),
HIVE_SERVER2_PLAIN_LDAP_USERFILTER("hive.server2.authentication.ldap.userFilter", null,
"COMMA-separated list of LDAP usernames (just short names, not full DNs).\n" +
"For example: hiveuser,impalauser,hiveadmin,hadoopadmin"),
HIVE_SERVER2_PLAIN_LDAP_GUIDKEY("hive.server2.authentication.ldap.guidKey", "uid",
"LDAP attribute name whose values are unique in this LDAP server.\n" +
"For example: uid or CN."),
HIVE_SERVER2_PLAIN_LDAP_GROUPMEMBERSHIP_KEY("hive.server2.authentication.ldap.groupMembershipKey", "member",
"LDAP attribute name on the group object that contains the list of distinguished names\n" +
"for the user, group, and contact objects that are members of the group.\n" +
"For example: member, uniqueMember or memberUid"),
HIVE_SERVER2_PLAIN_LDAP_USERMEMBERSHIP_KEY(HIVE_SERVER2_AUTHENTICATION_LDAP_USERMEMBERSHIPKEY_NAME, null,
"LDAP attribute name on the user object that contains groups of which the user is\n" +
"a direct member, except for the primary group, which is represented by the\n" +
"primaryGroupId.\n" +
"For example: memberOf"),
HIVE_SERVER2_PLAIN_LDAP_GROUPCLASS_KEY("hive.server2.authentication.ldap.groupClassKey", "groupOfNames",
"LDAP attribute name on the group entry that is to be used in LDAP group searches.\n" +
"For example: group, groupOfNames or groupOfUniqueNames."),
HIVE_SERVER2_PLAIN_LDAP_CUSTOMLDAPQUERY("hive.server2.authentication.ldap.customLDAPQuery", null,
"A full LDAP query that LDAP Atn provider uses to execute against LDAP Server.\n" +
"If this query returns a null resultset, the LDAP Provider fails the Authentication\n" +
"request, succeeds if the user is part of the resultset." +
"For example: (&(objectClass=group)(objectClass=top)(instanceType=4)(cn=Domain*)) \n" +
"(&(objectClass=person)(|(sAMAccountName=admin)(|(memberOf=CN=Domain Admins,CN=Users,DC=domain,DC=com)" +
"(memberOf=CN=Administrators,CN=Builtin,DC=domain,DC=com))))"),
HIVE_SERVER2_PLAIN_LDAP_BIND_USER("hive.server2.authentication.ldap.binddn", null,
"The user with which to bind to the LDAP server, and search for the full domain name " +
"of the user being authenticated.\n" +
"This should be the full domain name of the user, and should have search access across all " +
"users in the LDAP tree.\n" +
"If not specified, then the user being authenticated will be used as the bind user.\n" +
"For example: CN=bindUser,CN=Users,DC=subdomain,DC=domain,DC=com"),
HIVE_SERVER2_PLAIN_LDAP_BIND_PASSWORD("hive.server2.authentication.ldap.bindpw", null,
"The password for the bind user, to be used to search for the full name of the user being authenticated.\n" +
"If the username is specified, this parameter must also be specified."),
HIVE_SERVER2_CUSTOM_AUTHENTICATION_CLASS("hive.server2.custom.authentication.class", null,
"Custom authentication class. Used when property\n" +
"'hive.server2.authentication' is set to 'CUSTOM'. Provided class\n" +
"must be a proper implementation of the interface\n" +
"org.apache.hive.service.auth.PasswdAuthenticationProvider. HiveServer2\n" +
"will call its Authenticate(user, passed) method to authenticate requests.\n" +
"The implementation may optionally implement Hadoop's\n" +
"org.apache.hadoop.conf.Configurable class to grab Hive's Configuration object."),
HIVE_SERVER2_PAM_SERVICES("hive.server2.authentication.pam.services", null,
"List of the underlying pam services that should be used when auth type is PAM\n" +
"A file with the same name must exist in /etc/pam.d"),
HIVE_SERVER2_ENABLE_DOAS("hive.server2.enable.doAs", true,
"Setting this property to true will have HiveServer2 execute\n" +
"Hive operations as the user making the calls to it."),
HIVE_DISTCP_DOAS_USER("hive.distcp.privileged.doAs","hive",
"This property allows privileged distcp executions done by hive\n" +
"to run as this user."),
HIVE_SERVER2_TABLE_TYPE_MAPPING("hive.server2.table.type.mapping", "CLASSIC", new StringSet("CLASSIC", "HIVE"),
"This setting reflects how HiveServer2 will report the table types for JDBC and other\n" +
"client implementations that retrieve the available tables and supported table types\n" +
" HIVE : Exposes Hive's native table types like MANAGED_TABLE, EXTERNAL_TABLE, VIRTUAL_VIEW\n" +
" CLASSIC : More generic types like TABLE and VIEW"),
HIVE_SERVER2_SESSION_HOOK("hive.server2.session.hook", "", ""),
// SSL settings
HIVE_SERVER2_USE_SSL("hive.server2.use.SSL", false,
"Set this to true for using SSL encryption in HiveServer2."),
HIVE_SERVER2_SSL_KEYSTORE_PATH("hive.server2.keystore.path", "",
"SSL certificate keystore location."),
HIVE_SERVER2_SSL_KEYSTORE_PASSWORD("hive.server2.keystore.password", "",
"SSL certificate keystore password."),
HIVE_SERVER2_MAP_FAIR_SCHEDULER_QUEUE("hive.server2.map.fair.scheduler.queue", true,
"If the YARN fair scheduler is configured and HiveServer2 is running in non-impersonation mode,\n" +
"this setting determines the user for fair scheduler queue mapping.\n" +
"If set to true (default), the logged-in user determines the fair scheduler queue\n" +
"for submitted jobs, so that map reduce resource usage can be tracked by user.\n" +
"If set to false, all Hive jobs go to the 'hive' user's queue."),
HIVE_SERVER2_BUILTIN_UDF_WHITELIST("hive.server2.builtin.udf.whitelist", "",
"Comma separated list of builtin udf names allowed in queries.\n" +
"An empty whitelist allows all builtin udfs to be executed. " +
" The udf black list takes precedence over udf white list"),
HIVE_SERVER2_BUILTIN_UDF_BLACKLIST("hive.server2.builtin.udf.blacklist", "",
"Comma separated list of udfs names. These udfs will not be allowed in queries." +
" The udf black list takes precedence over udf white list"),
HIVE_ALLOW_UDF_LOAD_ON_DEMAND("hive.allow.udf.load.on.demand", false,
"Whether enable loading UDFs from metastore on demand; this is mostly relevant for\n" +
"HS2 and was the default behavior before Hive 1.2. Off by default."),
HIVE_SERVER2_SESSION_CHECK_INTERVAL("hive.server2.session.check.interval", "6h",
new TimeValidator(TimeUnit.MILLISECONDS, 3000l, true, null, false),
"The check interval for session/operation timeout, which can be disabled by setting to zero or negative value."),
HIVE_SERVER2_CLOSE_SESSION_ON_DISCONNECT("hive.server2.close.session.on.disconnect", true,
"Session will be closed when connection is closed. Set this to false to have session outlive its parent connection."),
HIVE_SERVER2_IDLE_SESSION_TIMEOUT("hive.server2.idle.session.timeout", "7d",
new TimeValidator(TimeUnit.MILLISECONDS),
"Session will be closed when it's not accessed for this duration, which can be disabled by setting to zero or negative value."),
HIVE_SERVER2_IDLE_OPERATION_TIMEOUT("hive.server2.idle.operation.timeout", "5d",
new TimeValidator(TimeUnit.MILLISECONDS),
"Operation will be closed when it's not accessed for this duration of time, which can be disabled by setting to zero value.\n" +
" With positive value, it's checked for operations in terminal state only (FINISHED, CANCELED, CLOSED, ERROR).\n" +
" With negative value, it's checked for all of the operations regardless of state."),
HIVE_SERVER2_IDLE_SESSION_CHECK_OPERATION("hive.server2.idle.session.check.operation", true,
"Session will be considered to be idle only if there is no activity, and there is no pending operation.\n" +
" This setting takes effect only if session idle timeout (hive.server2.idle.session.timeout) and checking\n" +
"(hive.server2.session.check.interval) are enabled."),
HIVE_SERVER2_THRIFT_CLIENT_RETRY_LIMIT("hive.server2.thrift.client.retry.limit", 1,"Number of retries upon " +
"failure of Thrift HiveServer2 calls"),
HIVE_SERVER2_THRIFT_CLIENT_CONNECTION_RETRY_LIMIT("hive.server2.thrift.client.connect.retry.limit", 1,"Number of " +
"retries while opening a connection to HiveServe2"),
HIVE_SERVER2_THRIFT_CLIENT_RETRY_DELAY_SECONDS("hive.server2.thrift.client.retry.delay.seconds", "1s",
new TimeValidator(TimeUnit.SECONDS), "Number of seconds for the HiveServer2 thrift client to wait between " +
"consecutive connection attempts. Also specifies the time to wait between retrying thrift calls upon failures"),
HIVE_SERVER2_THRIFT_CLIENT_USER("hive.server2.thrift.client.user", "anonymous","Username to use against thrift" +
" client"),
HIVE_SERVER2_THRIFT_CLIENT_PASSWORD("hive.server2.thrift.client.password", "anonymous","Password to use against " +
"thrift client"),
// ResultSet serialization settings
HIVE_SERVER2_THRIFT_RESULTSET_SERIALIZE_IN_TASKS("hive.server2.thrift.resultset.serialize.in.tasks", false,
"Whether we should serialize the Thrift structures used in JDBC ResultSet RPC in task nodes.\n " +
"We use SequenceFile and ThriftJDBCBinarySerDe to read and write the final results if this is true."),
// TODO: Make use of this config to configure fetch size
HIVE_SERVER2_THRIFT_RESULTSET_MAX_FETCH_SIZE("hive.server2.thrift.resultset.max.fetch.size",
10000, "Max number of rows sent in one Fetch RPC call by the server to the client."),
HIVE_SERVER2_THRIFT_RESULTSET_DEFAULT_FETCH_SIZE("hive.server2.thrift.resultset.default.fetch.size", 1000,
"The number of rows sent in one Fetch RPC call by the server to the client, if not\n" +
"specified by the client."),
HIVE_SERVER2_XSRF_FILTER_ENABLED("hive.server2.xsrf.filter.enabled",false,
"If enabled, HiveServer2 will block any requests made to it over http " +
"if an X-XSRF-HEADER header is not present"),
HIVE_SECURITY_COMMAND_WHITELIST("hive.security.command.whitelist",
"set,reset,dfs,add,list,delete,reload,compile,llap",
"Comma separated list of non-SQL Hive commands users are authorized to execute"),
HIVE_SERVER2_JOB_CREDENTIAL_PROVIDER_PATH("hive.server2.job.credential.provider.path", "",
"If set, this configuration property should provide a comma-separated list of URLs that indicates the type and " +
"location of providers to be used by hadoop credential provider API. It provides HiveServer2 the ability to provide job-specific " +
"credential providers for jobs run using MR and Spark execution engines. This functionality has not been tested against Tez."),
HIVE_MOVE_FILES_THREAD_COUNT("hive.mv.files.thread", 15, new SizeValidator(0L, true, 1024L, true), "Number of threads"
+ " used to move files in move task. Set it to 0 to disable multi-threaded file moves. This parameter is also used by"
+ " MSCK to check tables."),
HIVE_LOAD_DYNAMIC_PARTITIONS_THREAD_COUNT("hive.load.dynamic.partitions.thread", 15,
new SizeValidator(1L, true, 1024L, true),
"Number of threads used to load dynamic partitions."),
// If this is set all move tasks at the end of a multi-insert query will only begin once all
// outputs are ready
HIVE_MULTI_INSERT_MOVE_TASKS_SHARE_DEPENDENCIES(
"hive.multi.insert.move.tasks.share.dependencies", false,
"If this is set all move tasks for tables/partitions (not directories) at the end of a\n" +
"multi-insert query will only begin once the dependencies for all these move tasks have been\n" +
"met.\n" +
"Advantages: If concurrency is enabled, the locks will only be released once the query has\n" +
" finished, so with this config enabled, the time when the table/partition is\n" +
" generated will be much closer to when the lock on it is released.\n" +
"Disadvantages: If concurrency is not enabled, with this disabled, the tables/partitions which\n" +
" are produced by this query and finish earlier will be available for querying\n" +
" much earlier. Since the locks are only released once the query finishes, this\n" +
" does not apply if concurrency is enabled."),
HIVE_INFER_BUCKET_SORT("hive.exec.infer.bucket.sort", false,
"If this is set, when writing partitions, the metadata will include the bucketing/sorting\n" +
"properties with which the data was written if any (this will not overwrite the metadata\n" +
"inherited from the table if the table is bucketed/sorted)"),
HIVE_INFER_BUCKET_SORT_NUM_BUCKETS_POWER_TWO(
"hive.exec.infer.bucket.sort.num.buckets.power.two", false,
"If this is set, when setting the number of reducers for the map reduce task which writes the\n" +
"final output files, it will choose a number which is a power of two, unless the user specifies\n" +
"the number of reducers to use using mapred.reduce.tasks. The number of reducers\n" +
"may be set to a power of two, only to be followed by a merge task meaning preventing\n" +
"anything from being inferred.\n" +
"With hive.exec.infer.bucket.sort set to true:\n" +
"Advantages: If this is not set, the number of buckets for partitions will seem arbitrary,\n" +
" which means that the number of mappers used for optimized joins, for example, will\n" +
" be very low. With this set, since the number of buckets used for any partition is\n" +
" a power of two, the number of mappers used for optimized joins will be the least\n" +
" number of buckets used by any partition being joined.\n" +
"Disadvantages: This may mean a much larger or much smaller number of reducers being used in the\n" +
" final map reduce job, e.g. if a job was originally going to take 257 reducers,\n" +
" it will now take 512 reducers, similarly if the max number of reducers is 511,\n" +
" and a job was going to use this many, it will now use 256 reducers."),
HIVEOPTLISTBUCKETING("hive.optimize.listbucketing", false,
"Enable list bucketing optimizer. Default value is false so that we disable it by default."),
// Allow TCP Keep alive socket option for for HiveServer or a maximum timeout for the socket.
SERVER_READ_SOCKET_TIMEOUT("hive.server.read.socket.timeout", "10s",
new TimeValidator(TimeUnit.SECONDS),
"Timeout for the HiveServer to close the connection if no response from the client. By default, 10 seconds."),
SERVER_TCP_KEEP_ALIVE("hive.server.tcp.keepalive", true,
"Whether to enable TCP keepalive for the Hive Server. Keepalive will prevent accumulation of half-open connections."),
HIVE_DECODE_PARTITION_NAME("hive.decode.partition.name", false,
"Whether to show the unquoted partition names in query results."),
HIVE_EXECUTION_ENGINE("hive.execution.engine", "mr", new StringSet(true, "mr", "tez", "spark"),
"Chooses execution engine. Options are: mr (Map reduce, default), tez, spark. While MR\n" +
"remains the default engine for historical reasons, it is itself a historical engine\n" +
"and is deprecated in Hive 2 line. It may be removed without further warning."),
HIVE_EXECUTION_MODE("hive.execution.mode", "container", new StringSet("container", "llap"),
"Chooses whether query fragments will run in container or in llap"),
HIVE_JAR_DIRECTORY("hive.jar.directory", null,
"This is the location hive in tez mode will look for to find a site wide \n" +
"installed hive instance."),
HIVE_USER_INSTALL_DIR("hive.user.install.directory", "/user/",
"If hive (in tez mode only) cannot find a usable hive jar in \"hive.jar.directory\", \n" +
"it will upload the hive jar to \"hive.user.install.directory/user.name\"\n" +
"and use it to run queries."),
// Vectorization enabled
HIVE_VECTORIZATION_ENABLED("hive.vectorized.execution.enabled", true,
"This flag should be set to true to enable vectorized mode of query execution.\n" +
"The default value is true to reflect that our most expected Hive deployment will be using vectorization."),
HIVE_VECTORIZATION_REDUCE_ENABLED("hive.vectorized.execution.reduce.enabled", true,
"This flag should be set to true to enable vectorized mode of the reduce-side of query execution.\n" +
"The default value is true."),
HIVE_VECTORIZATION_REDUCE_GROUPBY_ENABLED("hive.vectorized.execution.reduce.groupby.enabled", true,
"This flag should be set to true to enable vectorized mode of the reduce-side GROUP BY query execution.\n" +
"The default value is true."),
HIVE_VECTORIZATION_MAPJOIN_NATIVE_ENABLED("hive.vectorized.execution.mapjoin.native.enabled", true,
"This flag should be set to true to enable native (i.e. non-pass through) vectorization\n" +
"of queries using MapJoin.\n" +
"The default value is true."),
HIVE_VECTORIZATION_MAPJOIN_NATIVE_MULTIKEY_ONLY_ENABLED("hive.vectorized.execution.mapjoin.native.multikey.only.enabled", false,
"This flag should be set to true to restrict use of native vector map join hash tables to\n" +
"the MultiKey in queries using MapJoin.\n" +
"The default value is false."),
HIVE_VECTORIZATION_MAPJOIN_NATIVE_MINMAX_ENABLED("hive.vectorized.execution.mapjoin.minmax.enabled", false,
"This flag should be set to true to enable vector map join hash tables to\n" +
"use max / max filtering for integer join queries using MapJoin.\n" +
"The default value is false."),
HIVE_VECTORIZATION_MAPJOIN_NATIVE_OVERFLOW_REPEATED_THRESHOLD("hive.vectorized.execution.mapjoin.overflow.repeated.threshold", -1,
"The number of small table rows for a match in vector map join hash tables\n" +
"where we use the repeated field optimization in overflow vectorized row batch for join queries using MapJoin.\n" +
"A value of -1 means do use the join result optimization. Otherwise, threshold value can be 0 to maximum integer."),
HIVE_VECTORIZATION_MAPJOIN_NATIVE_FAST_HASHTABLE_ENABLED("hive.vectorized.execution.mapjoin.native.fast.hashtable.enabled", false,
"This flag should be set to true to enable use of native fast vector map join hash tables in\n" +
"queries using MapJoin.\n" +
"The default value is false."),
HIVE_VECTORIZATION_GROUPBY_CHECKINTERVAL("hive.vectorized.groupby.checkinterval", 100000,
"Number of entries added to the group by aggregation hash before a recomputation of average entry size is performed."),
HIVE_VECTORIZATION_GROUPBY_MAXENTRIES("hive.vectorized.groupby.maxentries", 1000000,
"Max number of entries in the vector group by aggregation hashtables. \n" +
"Exceeding this will trigger a flush irrelevant of memory pressure condition."),
HIVE_VECTORIZATION_GROUPBY_FLUSH_PERCENT("hive.vectorized.groupby.flush.percent", (float) 0.1,
"Percent of entries in the group by aggregation hash flushed when the memory threshold is exceeded."),
HIVE_VECTORIZATION_REDUCESINK_NEW_ENABLED("hive.vectorized.execution.reducesink.new.enabled", true,
"This flag should be set to true to enable the new vectorization\n" +
"of queries using ReduceSink.\ni" +
"The default value is true."),
HIVE_VECTORIZATION_USE_VECTORIZED_INPUT_FILE_FORMAT("hive.vectorized.use.vectorized.input.format", true,
"This flag should be set to true to enable vectorizing with vectorized input file format capable SerDe.\n" +
"The default value is true."),
HIVE_VECTORIZATION_VECTORIZED_INPUT_FILE_FORMAT_EXCLUDES("hive.vectorized.input.format.excludes","",
"This configuration should be set to fully described input format class names for which \n"
+ " vectorized input format should not be used for vectorized execution."),
HIVE_VECTORIZATION_USE_VECTOR_DESERIALIZE("hive.vectorized.use.vector.serde.deserialize", true,
"This flag should be set to true to enable vectorizing rows using vector deserialize.\n" +
"The default value is true."),
HIVE_VECTORIZATION_USE_ROW_DESERIALIZE("hive.vectorized.use.row.serde.deserialize", true,
"This flag should be set to true to enable vectorizing using row deserialize.\n" +
"The default value is false."),
HIVE_VECTORIZATION_ROW_DESERIALIZE_INPUTFORMAT_EXCLUDES(
"hive.vectorized.row.serde.inputformat.excludes",
"org.apache.parquet.hadoop.ParquetInputFormat,org.apache.hadoop.hive.ql.io.parquet.MapredParquetInputFormat",
"The input formats not supported by row deserialize vectorization."),
HIVE_VECTOR_ADAPTOR_USAGE_MODE("hive.vectorized.adaptor.usage.mode", "all", new StringSet("none", "chosen", "all"),
"Specifies the extent to which the VectorUDFAdaptor will be used for UDFs that do not have a corresponding vectorized class.\n" +
"0. none : disable any usage of VectorUDFAdaptor\n" +
"1. chosen : use VectorUDFAdaptor for a small set of UDFs that were chosen for good performance\n" +
"2. all : use VectorUDFAdaptor for all UDFs"
),
HIVE_TEST_VECTOR_ADAPTOR_OVERRIDE("hive.test.vectorized.adaptor.override", false,
"internal use only, used to force always using the VectorUDFAdaptor.\n" +
"The default is false, of course",
true),
HIVE_VECTORIZATION_PTF_ENABLED("hive.vectorized.execution.ptf.enabled", true,
"This flag should be set to true to enable vectorized mode of the PTF of query execution.\n" +
"The default value is true."),
HIVE_VECTORIZATION_PTF_MAX_MEMORY_BUFFERING_BATCH_COUNT("hive.vectorized.ptf.max.memory.buffering.batch.count", 25,
"Maximum number of vectorized row batches to buffer in memory for PTF\n" +
"The default value is 25"),
HIVE_VECTORIZATION_TESTING_REDUCER_BATCH_SIZE("hive.vectorized.testing.reducer.batch.size", -1,
"internal use only, used for creating small group key vectorized row batches to exercise more logic\n" +
"The default value is -1 which means don't restrict for testing",
true),
HIVE_VECTORIZATION_TESTING_REUSE_SCRATCH_COLUMNS("hive.vectorized.reuse.scratch.columns", true,
"internal use only. Disable this to debug scratch column state issues",
true),
HIVE_VECTORIZATION_COMPLEX_TYPES_ENABLED("hive.vectorized.complex.types.enabled", true,
"This flag should be set to true to enable vectorization\n" +
"of expressions with complex types.\n" +
"The default value is true."),
HIVE_VECTORIZATION_GROUPBY_COMPLEX_TYPES_ENABLED("hive.vectorized.groupby.complex.types.enabled", true,
"This flag should be set to true to enable group by vectorization\n" +
"of aggregations that use complex types.\n",
"For example, AVG uses a complex type (STRUCT) for partial aggregation results" +
"The default value is true."),
HIVE_VECTORIZATION_ROW_IDENTIFIER_ENABLED("hive.vectorized.row.identifier.enabled", true,
"This flag should be set to true to enable vectorization of ROW__ID."),
HIVE_VECTORIZATION_USE_CHECKED_EXPRESSIONS("hive.vectorized.use.checked.expressions", false,
"This flag should be set to true to use overflow checked vector expressions when available.\n" +
"For example, arithmetic expressions which can overflow the output data type can be evaluated using\n" +
" checked vector expressions so that they produce same result as non-vectorized evaluation."),
HIVE_VECTORIZED_ADAPTOR_SUPPRESS_EVALUATE_EXCEPTIONS(
"hive.vectorized.adaptor.suppress.evaluate.exceptions", false,
"This flag should be set to true to suppress HiveException from the generic UDF function\n" +
"evaluate call and turn them into NULLs. Assume, by default, this is not needed"),
HIVE_VECTORIZED_INPUT_FORMAT_SUPPORTS_ENABLED(
"hive.vectorized.input.format.supports.enabled",
"decimal_64",
"Which vectorized input format support features are enabled for vectorization.\n" +
"That is, if a VectorizedInputFormat input format does support \"decimal_64\" for example\n" +
"this variable must enable that to be used in vectorization"),
HIVE_VECTORIZED_IF_EXPR_MODE("hive.vectorized.if.expr.mode", "better", new StringSet("adaptor", "good", "better"),
"Specifies the extent to which SQL IF statements will be vectorized.\n" +
"0. adaptor: only use the VectorUDFAdaptor to vectorize IF statements\n" +
"1. good : use regular vectorized IF expression classes that get good performance\n" +
"2. better : use vectorized IF expression classes that conditionally execute THEN/ELSE\n" +
" expressions for better performance.\n"),
HIVE_TEST_VECTORIZATION_ENABLED_OVERRIDE("hive.test.vectorized.execution.enabled.override",
"none", new StringSet("none", "enable", "disable"),
"internal use only, used to override the hive.vectorized.execution.enabled setting and\n" +
"turn off vectorization. The default is false, of course",
true),
HIVE_TEST_VECTORIZATION_SUPPRESS_EXPLAIN_EXECUTION_MODE(
"hive.test.vectorization.suppress.explain.execution.mode", false,
"internal use only, used to suppress \"Execution mode: vectorized\" EXPLAIN display.\n" +
"The default is false, of course",
true),
HIVE_TEST_VECTORIZER_SUPPRESS_FATAL_EXCEPTIONS(
"hive.test.vectorizer.suppress.fatal.exceptions", true,
"internal use only. When false, don't suppress fatal exceptions like\n" +
"NullPointerException, etc so the query will fail and assure it will be noticed",
true),
HIVE_VECTORIZATION_FILESINK_ARROW_NATIVE_ENABLED(
"hive.vectorized.execution.filesink.arrow.native.enabled", false,
"This flag should be set to true to enable the native vectorization\n" +
"of queries using the Arrow SerDe and FileSink.\n" +
"The default value is false."),
HIVE_TYPE_CHECK_ON_INSERT("hive.typecheck.on.insert", true, "This property has been extended to control "
+ "whether to check, convert, and normalize partition value to conform to its column type in "
+ "partition operations including but not limited to insert, such as alter, describe etc."),
HIVE_HADOOP_CLASSPATH("hive.hadoop.classpath", null,
"For Windows OS, we need to pass HIVE_HADOOP_CLASSPATH Java parameter while starting HiveServer2 \n" +
"using \"-hiveconf hive.hadoop.classpath=%HIVE_LIB%\"."),
HIVE_RPC_QUERY_PLAN("hive.rpc.query.plan", false,
"Whether to send the query plan via local resource or RPC"),
HIVE_AM_SPLIT_GENERATION("hive.compute.splits.in.am", true,
"Whether to generate the splits locally or in the AM (tez only)"),
HIVE_TEZ_GENERATE_CONSISTENT_SPLITS("hive.tez.input.generate.consistent.splits", true,
"Whether to generate consistent split locations when generating splits in the AM"),
HIVE_PREWARM_ENABLED("hive.prewarm.enabled", false, "Enables container prewarm for Tez/Spark (Hadoop 2 only)"),
HIVE_PREWARM_NUM_CONTAINERS("hive.prewarm.numcontainers", 10, "Controls the number of containers to prewarm for Tez/Spark (Hadoop 2 only)"),
HIVE_PREWARM_SPARK_TIMEOUT("hive.prewarm.spark.timeout", "5000ms",
new TimeValidator(TimeUnit.MILLISECONDS),
"Time to wait to finish prewarming spark executors"),
HIVESTAGEIDREARRANGE("hive.stageid.rearrange", "none", new StringSet("none", "idonly", "traverse", "execution"), ""),
HIVEEXPLAINDEPENDENCYAPPENDTASKTYPES("hive.explain.dependency.append.tasktype", false, ""),
HIVECOUNTERGROUP("hive.counters.group.name", "HIVE",
"The name of counter group for internal Hive variables (CREATED_FILE, FATAL_ERROR, etc.)"),
HIVE_QUOTEDID_SUPPORT("hive.support.quoted.identifiers", "column",
new StringSet("none", "column"),
"Whether to use quoted identifier. 'none' or 'column' can be used. \n" +
" none: default(past) behavior. Implies only alphaNumeric and underscore are valid characters in identifiers.\n" +
" column: implies column names can contain any character."
),
/**
* @deprecated Use MetastoreConf.SUPPORT_SPECIAL_CHARACTERS_IN_TABLE_NAMES
*/
@Deprecated
HIVE_SUPPORT_SPECICAL_CHARACTERS_IN_TABLE_NAMES("hive.support.special.characters.tablename", true,
"This flag should be set to true to enable support for special characters in table names.\n"
+ "When it is set to false, only [a-zA-Z_0-9]+ are supported.\n"
+ "The only supported special character right now is '/'. This flag applies only to quoted table names.\n"
+ "The default value is true."),
HIVE_CREATE_TABLES_AS_INSERT_ONLY("hive.create.as.insert.only", false,
"Whether the eligible tables should be created as ACID insert-only by default. Does \n" +
"not apply to external tables, the ones using storage handlers, etc."),
// role names are case-insensitive
USERS_IN_ADMIN_ROLE("hive.users.in.admin.role", "", false,
"Comma separated list of users who are in admin role for bootstrapping.\n" +
"More users can be added in ADMIN role later."),
HIVE_COMPAT("hive.compat", HiveCompat.DEFAULT_COMPAT_LEVEL,
"Enable (configurable) deprecated behaviors by setting desired level of backward compatibility.\n" +
"Setting to 0.12:\n" +
" Maintains division behavior: int / int = double"),
HIVE_CONVERT_JOIN_BUCKET_MAPJOIN_TEZ("hive.convert.join.bucket.mapjoin.tez", true,
"Whether joins can be automatically converted to bucket map joins in hive \n" +
"when tez is used as the execution engine."),
HIVE_TEZ_BMJ_USE_SUBCACHE("hive.tez.bmj.use.subcache", true,
"Use subcache to reuse hashtable across multiple tasks"),
HIVE_CHECK_CROSS_PRODUCT("hive.exec.check.crossproducts", true,
"Check if a plan contains a Cross Product. If there is one, output a warning to the Session's console."),
HIVE_LOCALIZE_RESOURCE_WAIT_INTERVAL("hive.localize.resource.wait.interval", "5000ms",
new TimeValidator(TimeUnit.MILLISECONDS),
"Time to wait for another thread to localize the same resource for hive-tez."),
HIVE_LOCALIZE_RESOURCE_NUM_WAIT_ATTEMPTS("hive.localize.resource.num.wait.attempts", 5,
"The number of attempts waiting for localizing a resource in hive-tez."),
TEZ_AUTO_REDUCER_PARALLELISM("hive.tez.auto.reducer.parallelism", false,
"Turn on Tez' auto reducer parallelism feature. When enabled, Hive will still estimate data sizes\n" +
"and set parallelism estimates. Tez will sample source vertices' output sizes and adjust the estimates at runtime as\n" +
"necessary."),
TEZ_LLAP_MIN_REDUCER_PER_EXECUTOR("hive.tez.llap.min.reducer.per.executor", 0.33f,
"If above 0, the min number of reducers for auto-parallelism for LLAP scheduling will\n" +
"be set to this fraction of the number of executors."),
TEZ_MAX_PARTITION_FACTOR("hive.tez.max.partition.factor", 2f,
"When auto reducer parallelism is enabled this factor will be used to over-partition data in shuffle edges."),
TEZ_MIN_PARTITION_FACTOR("hive.tez.min.partition.factor", 0.25f,
"When auto reducer parallelism is enabled this factor will be used to put a lower limit to the number\n" +
"of reducers that tez specifies."),
TEZ_OPTIMIZE_BUCKET_PRUNING(
"hive.tez.bucket.pruning", false,
"When pruning is enabled, filters on bucket columns will be processed by \n" +
"filtering the splits against a bitset of included buckets. This needs predicates \n"+
"produced by hive.optimize.ppd and hive.optimize.index.filters."),
TEZ_OPTIMIZE_BUCKET_PRUNING_COMPAT(
"hive.tez.bucket.pruning.compat", true,
"When pruning is enabled, handle possibly broken inserts due to negative hashcodes.\n" +
"This occasionally doubles the data scan cost, but is default enabled for safety"),
TEZ_DYNAMIC_PARTITION_PRUNING(
"hive.tez.dynamic.partition.pruning", true,
"When dynamic pruning is enabled, joins on partition keys will be processed by sending\n" +
"events from the processing vertices to the Tez application master. These events will be\n" +
"used to prune unnecessary partitions."),
TEZ_DYNAMIC_PARTITION_PRUNING_EXTENDED("hive.tez.dynamic.partition.pruning.extended", true,
"Whether we should try to create additional opportunities for dynamic pruning, e.g., considering\n" +
"siblings that may not be created by normal dynamic pruning logic.\n" +
"Only works when dynamic pruning is enabled."),
TEZ_DYNAMIC_PARTITION_PRUNING_MAX_EVENT_SIZE("hive.tez.dynamic.partition.pruning.max.event.size", 1*1024*1024L,
"Maximum size of events sent by processors in dynamic pruning. If this size is crossed no pruning will take place."),
TEZ_DYNAMIC_PARTITION_PRUNING_MAX_DATA_SIZE("hive.tez.dynamic.partition.pruning.max.data.size", 100*1024*1024L,
"Maximum total data size of events in dynamic pruning."),
TEZ_DYNAMIC_SEMIJOIN_REDUCTION("hive.tez.dynamic.semijoin.reduction", true,
"When dynamic semijoin is enabled, shuffle joins will perform a leaky semijoin before shuffle. This " +
"requires hive.tez.dynamic.partition.pruning to be enabled."),
TEZ_MIN_BLOOM_FILTER_ENTRIES("hive.tez.min.bloom.filter.entries", 1000000L,
"Bloom filter should be of at min certain size to be effective"),
TEZ_MAX_BLOOM_FILTER_ENTRIES("hive.tez.max.bloom.filter.entries", 100000000L,
"Bloom filter should be of at max certain size to be effective"),
TEZ_BLOOM_FILTER_FACTOR("hive.tez.bloom.filter.factor", (float) 1.0,
"Bloom filter should be a multiple of this factor with nDV"),
TEZ_BIGTABLE_MIN_SIZE_SEMIJOIN_REDUCTION("hive.tez.bigtable.minsize.semijoin.reduction", 100000000L,
"Big table for runtime filteting should be of atleast this size"),
TEZ_DYNAMIC_SEMIJOIN_REDUCTION_THRESHOLD("hive.tez.dynamic.semijoin.reduction.threshold", (float) 0.50,
"Only perform semijoin optimization if the estimated benefit at or above this fraction of the target table"),
TEZ_DYNAMIC_SEMIJOIN_REDUCTION_FOR_MAPJOIN("hive.tez.dynamic.semijoin.reduction.for.mapjoin", false,
"Use a semi-join branch for map-joins. This may not make it faster, but is helpful in certain join patterns."),
TEZ_DYNAMIC_SEMIJOIN_REDUCTION_FOR_DPP_FACTOR("hive.tez.dynamic.semijoin.reduction.for.dpp.factor",
(float) 1.0,
"The factor to decide if semijoin branch feeds into a TableScan\n" +
"which has an outgoing Dynamic Partition Pruning (DPP) branch based on number of distinct values."),
TEZ_SMB_NUMBER_WAVES(
"hive.tez.smb.number.waves",
(float) 0.5,
"The number of waves in which to run the SMB join. Account for cluster being occupied. Ideally should be 1 wave."),
TEZ_EXEC_SUMMARY(
"hive.tez.exec.print.summary",
false,
"Display breakdown of execution steps, for every query executed by the shell."),
TEZ_SESSION_EVENTS_SUMMARY(
"hive.tez.session.events.print.summary",
"none", new StringSet("none", "text", "json"),
"Display summary of all tez sessions related events in text or json format"),
TEZ_EXEC_INPLACE_PROGRESS(
"hive.tez.exec.inplace.progress",
true,
"Updates tez job execution progress in-place in the terminal when hive-cli is used."),
HIVE_SERVER2_INPLACE_PROGRESS(
"hive.server2.in.place.progress",
true,
"Allows hive server 2 to send progress bar update information. This is currently available"
+ " only if the execution engine is tez or Spark."),
TEZ_DAG_STATUS_CHECK_INTERVAL("hive.tez.dag.status.check.interval", "500ms",
new TimeValidator(TimeUnit.MILLISECONDS), "Interval between subsequent DAG status invocation."),
SPARK_EXEC_INPLACE_PROGRESS("hive.spark.exec.inplace.progress", true,
"Updates spark job execution progress in-place in the terminal."),
TEZ_CONTAINER_MAX_JAVA_HEAP_FRACTION("hive.tez.container.max.java.heap.fraction", 0.8f,
"This is to override the tez setting with the same name"),
TEZ_TASK_SCALE_MEMORY_RESERVE_FRACTION_MIN("hive.tez.task.scale.memory.reserve-fraction.min",
0.3f, "This is to override the tez setting tez.task.scale.memory.reserve-fraction"),
TEZ_TASK_SCALE_MEMORY_RESERVE_FRACTION_MAX("hive.tez.task.scale.memory.reserve.fraction.max",
0.5f, "The maximum fraction of JVM memory which Tez will reserve for the processor"),
TEZ_TASK_SCALE_MEMORY_RESERVE_FRACTION("hive.tez.task.scale.memory.reserve.fraction",
-1f, "The customized fraction of JVM memory which Tez will reserve for the processor"),
TEZ_CARTESIAN_PRODUCT_EDGE_ENABLED("hive.tez.cartesian-product.enabled",
false, "Use Tez cartesian product edge to speed up cross product"),
TEZ_SIMPLE_CUSTOM_EDGE_TINY_BUFFER_SIZE_MB("hive.tez.unordered.output.buffer.size.mb", -1,
"When we have an operation that does not need a large buffer, we use this buffer size for simple custom edge.\n" +
"Value is an integer. Default value is -1, which means that we will estimate this value from operators in the plan."),
// The default is different on the client and server, so it's null here.
LLAP_IO_ENABLED("hive.llap.io.enabled", null, "Whether the LLAP IO layer is enabled."),
LLAP_IO_ROW_WRAPPER_ENABLED("hive.llap.io.row.wrapper.enabled", true, "Whether the LLAP IO row wrapper is enabled for non-vectorized queries."),
LLAP_IO_ACID_ENABLED("hive.llap.io.acid", true, "Whether the LLAP IO layer is enabled for ACID."),
LLAP_IO_TRACE_SIZE("hive.llap.io.trace.size", "2Mb",
new SizeValidator(0L, true, (long)Integer.MAX_VALUE, false),
"The buffer size for a per-fragment LLAP debug trace. 0 to disable."),
LLAP_IO_TRACE_ALWAYS_DUMP("hive.llap.io.trace.always.dump", false,
"Whether to always dump the LLAP IO trace (if enabled); the default is on error."),
LLAP_IO_NONVECTOR_WRAPPER_ENABLED("hive.llap.io.nonvector.wrapper.enabled", true,
"Whether the LLAP IO layer is enabled for non-vectorized queries that read inputs\n" +
"that can be vectorized"),
LLAP_IO_MEMORY_MODE("hive.llap.io.memory.mode", "cache",
new StringSet("cache", "none"),
"LLAP IO memory usage; 'cache' (the default) uses data and metadata cache with a\n" +
"custom off-heap allocator, 'none' doesn't use either (this mode may result in\n" +
"significant performance degradation)"),
LLAP_ALLOCATOR_MIN_ALLOC("hive.llap.io.allocator.alloc.min", "4Kb", new SizeValidator(),
"Minimum allocation possible from LLAP buddy allocator. Allocations below that are\n" +
"padded to minimum allocation. For ORC, should generally be the same as the expected\n" +
"compression buffer size, or next lowest power of 2. Must be a power of 2."),
LLAP_ALLOCATOR_MAX_ALLOC("hive.llap.io.allocator.alloc.max", "16Mb", new SizeValidator(),
"Maximum allocation possible from LLAP buddy allocator. For ORC, should be as large as\n" +
"the largest expected ORC compression buffer size. Must be a power of 2."),
LLAP_ALLOCATOR_ARENA_COUNT("hive.llap.io.allocator.arena.count", 8,
"Arena count for LLAP low-level cache; cache will be allocated in the steps of\n" +
"(size/arena_count) bytes. This size must be <= 1Gb and >= max allocation; if it is\n" +
"not the case, an adjusted size will be used. Using powers of 2 is recommended."),
LLAP_IO_MEMORY_MAX_SIZE("hive.llap.io.memory.size", "1Gb", new SizeValidator(),
"Maximum size for IO allocator or ORC low-level cache.", "hive.llap.io.cache.orc.size"),
LLAP_ALLOCATOR_DIRECT("hive.llap.io.allocator.direct", true,
"Whether ORC low-level cache should use direct allocation."),
LLAP_ALLOCATOR_PREALLOCATE("hive.llap.io.allocator.preallocate", true,
"Whether to preallocate the entire IO memory at init time."),
LLAP_ALLOCATOR_MAPPED("hive.llap.io.allocator.mmap", false,
"Whether ORC low-level cache should use memory mapped allocation (direct I/O). \n" +
"This is recommended to be used along-side NVDIMM (DAX) or NVMe flash storage."),
LLAP_ALLOCATOR_MAPPED_PATH("hive.llap.io.allocator.mmap.path", "/tmp",
new WritableDirectoryValidator(),
"The directory location for mapping NVDIMM/NVMe flash storage into the ORC low-level cache."),
LLAP_ALLOCATOR_DISCARD_METHOD("hive.llap.io.allocator.discard.method", "both",
new StringSet("freelist", "brute", "both"),
"Which method to use to force-evict blocks to deal with fragmentation:\n" +
"freelist - use half-size free list (discards less, but also less reliable); brute -\n" +
"brute force, discard whatever we can; both - first try free list, then brute force."),
LLAP_ALLOCATOR_DEFRAG_HEADROOM("hive.llap.io.allocator.defrag.headroom", "1Mb",
"How much of a headroom to leave to allow allocator more flexibility to defragment.\n" +
"The allocator would further cap it to a fraction of total memory."),
LLAP_ALLOCATOR_MAX_FORCE_EVICTED("hive.llap.io.allocator.max.force.eviction", "16Mb",
"Fragmentation can lead to some cases where more eviction has to happen to accommodate allocations\n" +
" This configuration puts a limit on how many bytes to force evict before using Allocator Discard method."
+ " Higher values will allow allocator more flexibility and will lead to better caching."),
LLAP_TRACK_CACHE_USAGE("hive.llap.io.track.cache.usage", true,
"Whether to tag LLAP cache contents, mapping them to Hive entities (paths for\n" +
"partitions and tables) for reporting."),
LLAP_USE_LRFU("hive.llap.io.use.lrfu", true,
"Whether ORC low-level cache should use LRFU cache policy instead of default (FIFO)."),
LLAP_LRFU_LAMBDA("hive.llap.io.lrfu.lambda", 0.000001f,
"Lambda for ORC low-level cache LRFU cache policy. Must be in [0, 1]. 0 makes LRFU\n" +
"behave like LFU, 1 makes it behave like LRU, values in between balance accordingly.\n" +
"The meaning of this parameter is the inverse of the number of time ticks (cache\n" +
" operations, currently) that cause the combined recency-frequency of a block in cache\n" +
" to be halved."),
LLAP_CACHE_ALLOW_SYNTHETIC_FILEID("hive.llap.cache.allow.synthetic.fileid", true,
"Whether LLAP cache should use synthetic file ID if real one is not available. Systems\n" +
"like HDFS, Isilon, etc. provide a unique file/inode ID. On other FSes (e.g. local\n" +
"FS), the cache would not work by default because LLAP is unable to uniquely track the\n" +
"files; enabling this setting allows LLAP to generate file ID from the path, size and\n" +
"modification time, which is almost certain to identify file uniquely. However, if you\n" +
"use a FS without file IDs and rewrite files a lot (or are paranoid), you might want\n" +
"to avoid this setting."),
LLAP_CACHE_DEFAULT_FS_FILE_ID("hive.llap.cache.defaultfs.only.native.fileid", true,
"Whether LLAP cache should use native file IDs from the default FS only. This is to\n" +
"avoid file ID collisions when several different DFS instances are in use at the same\n" +
"time. Disable this check to allow native file IDs from non-default DFS."),
LLAP_CACHE_ENABLE_ORC_GAP_CACHE("hive.llap.orc.gap.cache", true,
"Whether LLAP cache for ORC should remember gaps in ORC compression buffer read\n" +
"estimates, to avoid re-reading the data that was read once and discarded because it\n" +
"is unneeded. This is only necessary for ORC files written before HIVE-9660."),
LLAP_IO_USE_FILEID_PATH("hive.llap.io.use.fileid.path", true,
"Whether LLAP should use fileId (inode)-based path to ensure better consistency for the\n" +
"cases of file overwrites. This is supported on HDFS. Disabling this also turns off any\n" +
"cache consistency checks based on fileid comparisons."),
// Restricted to text for now as this is a new feature; only text files can be sliced.
LLAP_IO_ENCODE_ENABLED("hive.llap.io.encode.enabled", true,
"Whether LLAP should try to re-encode and cache data for non-ORC formats. This is used\n" +
"on LLAP Server side to determine if the infrastructure for that is initialized."),
LLAP_IO_ENCODE_FORMATS("hive.llap.io.encode.formats",
"org.apache.hadoop.mapred.TextInputFormat,",
"The table input formats for which LLAP IO should re-encode and cache data.\n" +
"Comma-separated list."),
LLAP_IO_ENCODE_ALLOC_SIZE("hive.llap.io.encode.alloc.size", "256Kb", new SizeValidator(),
"Allocation size for the buffers used to cache encoded data from non-ORC files. Must\n" +
"be a power of two between " + LLAP_ALLOCATOR_MIN_ALLOC + " and\n" +
LLAP_ALLOCATOR_MAX_ALLOC + "."),
LLAP_IO_ENCODE_VECTOR_SERDE_ENABLED("hive.llap.io.encode.vector.serde.enabled", true,
"Whether LLAP should use vectorized SerDe reader to read text data when re-encoding."),
LLAP_IO_ENCODE_VECTOR_SERDE_ASYNC_ENABLED("hive.llap.io.encode.vector.serde.async.enabled",
true,
"Whether LLAP should use async mode in vectorized SerDe reader to read text data."),
LLAP_IO_ENCODE_SLICE_ROW_COUNT("hive.llap.io.encode.slice.row.count", 100000,
"Row count to use to separate cache slices when reading encoded data from row-based\n" +
"inputs into LLAP cache, if this feature is enabled."),
LLAP_IO_ENCODE_SLICE_LRR("hive.llap.io.encode.slice.lrr", true,
"Whether to separate cache slices when reading encoded data from text inputs via MR\n" +
"MR LineRecordRedader into LLAP cache, if this feature is enabled. Safety flag."),
LLAP_ORC_ENABLE_TIME_COUNTERS("hive.llap.io.orc.time.counters", true,
"Whether to enable time counters for LLAP IO layer (time spent in HDFS, etc.)"),
LLAP_IO_VRB_QUEUE_LIMIT_BASE("hive.llap.io.vrb.queue.limit.base", 50000,
"The default queue size for VRBs produced by a LLAP IO thread when the processing is\n" +
"slower than the IO. The actual queue size is set per fragment, and is adjusted down\n" +
"from the base, depending on the schema."),
LLAP_IO_VRB_QUEUE_LIMIT_MIN("hive.llap.io.vrb.queue.limit.min", 10,
"The minimum queue size for VRBs produced by a LLAP IO thread when the processing is\n" +
"slower than the IO (used when determining the size from base size)."),
LLAP_IO_SHARE_OBJECT_POOLS("hive.llap.io.share.object.pools", false,
"Whether to used shared object pools in LLAP IO. A safety flag."),
LLAP_AUTO_ALLOW_UBER("hive.llap.auto.allow.uber", false,
"Whether or not to allow the planner to run vertices in the AM."),
LLAP_AUTO_ENFORCE_TREE("hive.llap.auto.enforce.tree", true,
"Enforce that all parents are in llap, before considering vertex"),
LLAP_AUTO_ENFORCE_VECTORIZED("hive.llap.auto.enforce.vectorized", true,
"Enforce that inputs are vectorized, before considering vertex"),
LLAP_AUTO_ENFORCE_STATS("hive.llap.auto.enforce.stats", true,
"Enforce that col stats are available, before considering vertex"),
LLAP_AUTO_MAX_INPUT("hive.llap.auto.max.input.size", 10*1024*1024*1024L,
"Check input size, before considering vertex (-1 disables check)"),
LLAP_AUTO_MAX_OUTPUT("hive.llap.auto.max.output.size", 1*1024*1024*1024L,
"Check output size, before considering vertex (-1 disables check)"),
LLAP_SKIP_COMPILE_UDF_CHECK("hive.llap.skip.compile.udf.check", false,
"Whether to skip the compile-time check for non-built-in UDFs when deciding whether to\n" +
"execute tasks in LLAP. Skipping the check allows executing UDFs from pre-localized\n" +
"jars in LLAP; if the jars are not pre-localized, the UDFs will simply fail to load."),
LLAP_ALLOW_PERMANENT_FNS("hive.llap.allow.permanent.fns", true,
"Whether LLAP decider should allow permanent UDFs."),
LLAP_EXECUTION_MODE("hive.llap.execution.mode", "none",
new StringSet("auto", "none", "all", "map", "only"),
"Chooses whether query fragments will run in container or in llap"),
LLAP_IO_ETL_SKIP_FORMAT("hive.llap.io.etl.skip.format", "encode", new StringSet("none", "encode", "all"),
"For ETL queries, determines whether to skip llap io cache. By default, hive.llap.io.encode.enabled " +
"will be set to false which disables LLAP IO for text formats. Setting it to 'all' will disable LLAP IO for all" +
" formats. 'none' will not disable LLAP IO for any formats."),
LLAP_OBJECT_CACHE_ENABLED("hive.llap.object.cache.enabled", true,
"Cache objects (plans, hashtables, etc) in llap"),
LLAP_IO_DECODING_METRICS_PERCENTILE_INTERVALS("hive.llap.io.decoding.metrics.percentiles.intervals", "30",
"Comma-delimited set of integers denoting the desired rollover intervals (in seconds)\n" +
"for percentile latency metrics on the LLAP daemon IO decoding time.\n" +
"hive.llap.queue.metrics.percentiles.intervals"),
LLAP_IO_THREADPOOL_SIZE("hive.llap.io.threadpool.size", 10,
"Specify the number of threads to use for low-level IO thread pool."),
LLAP_KERBEROS_PRINCIPAL(HIVE_LLAP_DAEMON_SERVICE_PRINCIPAL_NAME, "",
"The name of the LLAP daemon's service principal."),
LLAP_KERBEROS_KEYTAB_FILE("hive.llap.daemon.keytab.file", "",
"The path to the Kerberos Keytab file containing the LLAP daemon's service principal."),
LLAP_WEBUI_SPNEGO_KEYTAB_FILE("hive.llap.webui.spnego.keytab", "",
"The path to the Kerberos Keytab file containing the LLAP WebUI SPNEGO principal.\n" +
"Typical value would look like /etc/security/keytabs/spnego.service.keytab."),
LLAP_WEBUI_SPNEGO_PRINCIPAL("hive.llap.webui.spnego.principal", "",
"The LLAP WebUI SPNEGO service principal. Configured similarly to\n" +
"hive.server2.webui.spnego.principal"),
LLAP_FS_KERBEROS_PRINCIPAL("hive.llap.task.principal", "",
"The name of the principal to use to run tasks. By default, the clients are required\n" +
"to provide tokens to access HDFS/etc."),
LLAP_FS_KERBEROS_KEYTAB_FILE("hive.llap.task.keytab.file", "",
"The path to the Kerberos Keytab file containing the principal to use to run tasks.\n" +
"By default, the clients are required to provide tokens to access HDFS/etc."),
LLAP_ZKSM_ZK_CONNECTION_STRING("hive.llap.zk.sm.connectionString", "",
"ZooKeeper connection string for ZooKeeper SecretManager."),
LLAP_ZKSM_ZK_SESSION_TIMEOUT("hive.llap.zk.sm.session.timeout", "40s", new TimeValidator(
TimeUnit.MILLISECONDS), "ZooKeeper session timeout for ZK SecretManager."),
LLAP_ZK_REGISTRY_USER("hive.llap.zk.registry.user", "",
"In the LLAP ZooKeeper-based registry, specifies the username in the Zookeeper path.\n" +
"This should be the hive user or whichever user is running the LLAP daemon."),
LLAP_ZK_REGISTRY_NAMESPACE("hive.llap.zk.registry.namespace", null,
"In the LLAP ZooKeeper-based registry, overrides the ZK path namespace. Note that\n" +
"using this makes the path management (e.g. setting correct ACLs) your responsibility."),
// Note: do not rename to ..service.acl; Hadoop generates .hosts setting name from this,
// resulting in a collision with existing hive.llap.daemon.service.hosts and bizarre errors.
// These are read by Hadoop IPC, so you should check the usage and naming conventions (e.g.
// ".blocked" is a string hardcoded by Hadoop, and defaults are enforced elsewhere in Hive)
// before making changes or copy-pasting these.
LLAP_SECURITY_ACL("hive.llap.daemon.acl", "*", "The ACL for LLAP daemon."),
LLAP_SECURITY_ACL_DENY("hive.llap.daemon.acl.blocked", "", "The deny ACL for LLAP daemon."),
LLAP_MANAGEMENT_ACL("hive.llap.management.acl", "*", "The ACL for LLAP daemon management."),
LLAP_MANAGEMENT_ACL_DENY("hive.llap.management.acl.blocked", "",
"The deny ACL for LLAP daemon management."),
LLAP_PLUGIN_ACL("hive.llap.plugin.acl", "*", "The ACL for LLAP plugin AM endpoint."),
LLAP_PLUGIN_ACL_DENY("hive.llap.plugin.acl.blocked", "",
"The deny ACL for LLAP plugin AM endpoint."),
LLAP_REMOTE_TOKEN_REQUIRES_SIGNING("hive.llap.remote.token.requires.signing", "true",
new StringSet("false", "except_llap_owner", "true"),
"Whether the token returned from LLAP management API should require fragment signing.\n" +
"True by default; can be disabled to allow CLI to get tokens from LLAP in a secure\n" +
"cluster by setting it to true or 'except_llap_owner' (the latter returns such tokens\n" +
"to everyone except the user LLAP cluster is authenticating under)."),
// Hadoop DelegationTokenManager default is 1 week.
LLAP_DELEGATION_TOKEN_LIFETIME("hive.llap.daemon.delegation.token.lifetime", "14d",
new TimeValidator(TimeUnit.SECONDS),
"LLAP delegation token lifetime, in seconds if specified without a unit."),
LLAP_MANAGEMENT_RPC_PORT("hive.llap.management.rpc.port", 15004,
"RPC port for LLAP daemon management service."),
LLAP_WEB_AUTO_AUTH("hive.llap.auto.auth", false,
"Whether or not to set Hadoop configs to enable auth in LLAP web app."),
LLAP_DAEMON_RPC_NUM_HANDLERS("hive.llap.daemon.rpc.num.handlers", 5,
"Number of RPC handlers for LLAP daemon.", "llap.daemon.rpc.num.handlers"),
LLAP_PLUGIN_RPC_PORT("hive.llap.plugin.rpc.port", 0,
"Port to use for LLAP plugin rpc server"),
LLAP_PLUGIN_RPC_NUM_HANDLERS("hive.llap.plugin.rpc.num.handlers", 1,
"Number of RPC handlers for AM LLAP plugin endpoint."),
LLAP_DAEMON_WORK_DIRS("hive.llap.daemon.work.dirs", "",
"Working directories for the daemon. This should not be set if running as a YARN\n" +
"Service. It must be set when not running on YARN. If the value is set when\n" +
"running as a YARN Service, the specified value will be used.",
"llap.daemon.work.dirs"),
LLAP_DAEMON_YARN_SHUFFLE_PORT("hive.llap.daemon.yarn.shuffle.port", 15551,
"YARN shuffle port for LLAP-daemon-hosted shuffle.", "llap.daemon.yarn.shuffle.port"),
LLAP_DAEMON_YARN_CONTAINER_MB("hive.llap.daemon.yarn.container.mb", -1,
"llap server yarn container size in MB. Used in LlapServiceDriver and package.py", "llap.daemon.yarn.container.mb"),
LLAP_DAEMON_QUEUE_NAME("hive.llap.daemon.queue.name", null,
"Queue name within which the llap application will run." +
" Used in LlapServiceDriver and package.py"),
// TODO Move the following 2 properties out of Configuration to a constant.
LLAP_DAEMON_CONTAINER_ID("hive.llap.daemon.container.id", null,
"ContainerId of a running LlapDaemon. Used to publish to the registry"),
LLAP_DAEMON_NM_ADDRESS("hive.llap.daemon.nm.address", null,
"NM Address host:rpcPort for the NodeManager on which the instance of the daemon is running.\n" +
"Published to the llap registry. Should never be set by users"),
LLAP_DAEMON_SHUFFLE_DIR_WATCHER_ENABLED("hive.llap.daemon.shuffle.dir.watcher.enabled", false,
"TODO doc", "llap.daemon.shuffle.dir-watcher.enabled"),
LLAP_DAEMON_AM_LIVENESS_HEARTBEAT_INTERVAL_MS(
"hive.llap.daemon.am.liveness.heartbeat.interval.ms", "10000ms",
new TimeValidator(TimeUnit.MILLISECONDS),
"Tez AM-LLAP heartbeat interval (milliseconds). This needs to be below the task timeout\n" +
"interval, but otherwise as high as possible to avoid unnecessary traffic.",
"llap.daemon.am.liveness.heartbeat.interval-ms"),
LLAP_DAEMON_AM_LIVENESS_CONNECTION_TIMEOUT_MS(
"hive.llap.am.liveness.connection.timeout.ms", "10000ms",
new TimeValidator(TimeUnit.MILLISECONDS),
"Amount of time to wait on connection failures to the AM from an LLAP daemon before\n" +
"considering the AM to be dead.", "llap.am.liveness.connection.timeout-millis"),
LLAP_DAEMON_AM_USE_FQDN("hive.llap.am.use.fqdn", true,
"Whether to use FQDN of the AM machine when submitting work to LLAP."),
// Not used yet - since the Writable RPC engine does not support this policy.
LLAP_DAEMON_AM_LIVENESS_CONNECTION_SLEEP_BETWEEN_RETRIES_MS(
"hive.llap.am.liveness.connection.sleep.between.retries.ms", "2000ms",
new TimeValidator(TimeUnit.MILLISECONDS),
"Sleep duration while waiting to retry connection failures to the AM from the daemon for\n" +
"the general keep-alive thread (milliseconds).",
"llap.am.liveness.connection.sleep-between-retries-millis"),
LLAP_DAEMON_TASK_SCHEDULER_TIMEOUT_SECONDS(
"hive.llap.task.scheduler.timeout.seconds", "60s",
new TimeValidator(TimeUnit.SECONDS),
"Amount of time to wait before failing the query when there are no llap daemons running\n" +
"(alive) in the cluster.", "llap.daemon.scheduler.timeout.seconds"),
LLAP_DAEMON_NUM_EXECUTORS("hive.llap.daemon.num.executors", 4,
"Number of executors to use in LLAP daemon; essentially, the number of tasks that can be\n" +
"executed in parallel.", "llap.daemon.num.executors"),
LLAP_MAPJOIN_MEMORY_OVERSUBSCRIBE_FACTOR("hive.llap.mapjoin.memory.oversubscribe.factor", 0.2f,
"Fraction of memory from hive.auto.convert.join.noconditionaltask.size that can be over subscribed\n" +
"by queries running in LLAP mode. This factor has to be from 0.0 to 1.0. Default is 20% over subscription.\n"),
LLAP_MEMORY_OVERSUBSCRIPTION_MAX_EXECUTORS_PER_QUERY("hive.llap.memory.oversubscription.max.executors.per.query",
-1,
"Used along with hive.llap.mapjoin.memory.oversubscribe.factor to limit the number of executors from\n" +
"which memory for mapjoin can be borrowed. Default 3 (from 3 other executors\n" +
"hive.llap.mapjoin.memory.oversubscribe.factor amount of memory can be borrowed based on which mapjoin\n" +
"conversion decision will be made). This is only an upper bound. Lower bound is determined by number of\n" +
"executors and configured max concurrency."),
LLAP_MAPJOIN_MEMORY_MONITOR_CHECK_INTERVAL("hive.llap.mapjoin.memory.monitor.check.interval", 100000L,
"Check memory usage of mapjoin hash tables after every interval of this many rows. If map join hash table\n" +
"memory usage exceeds (hive.auto.convert.join.noconditionaltask.size * hive.hash.table.inflation.factor)\n" +
"when running in LLAP, tasks will get killed and not retried. Set the value to 0 to disable this feature."),
LLAP_DAEMON_AM_REPORTER_MAX_THREADS("hive.llap.daemon.am-reporter.max.threads", 4,
"Maximum number of threads to be used for AM reporter. If this is lower than number of\n" +
"executors in llap daemon, it would be set to number of executors at runtime.",
"llap.daemon.am-reporter.max.threads"),
LLAP_DAEMON_RPC_PORT("hive.llap.daemon.rpc.port", 0, "The LLAP daemon RPC port.",
"llap.daemon.rpc.port. A value of 0 indicates a dynamic port"),
LLAP_DAEMON_MEMORY_PER_INSTANCE_MB("hive.llap.daemon.memory.per.instance.mb", 4096,
"The total amount of memory to use for the executors inside LLAP (in megabytes).",
"llap.daemon.memory.per.instance.mb"),
LLAP_DAEMON_XMX_HEADROOM("hive.llap.daemon.xmx.headroom", "5%",
"The total amount of heap memory set aside by LLAP and not used by the executors. Can\n" +
"be specified as size (e.g. '512Mb'), or percentage (e.g. '5%'). Note that the latter is\n" +
"derived from the total daemon XMX, which can be different from the total executor\n" +
"memory if the cache is on-heap; although that's not the default configuration."),
LLAP_DAEMON_VCPUS_PER_INSTANCE("hive.llap.daemon.vcpus.per.instance", 4,
"The total number of vcpus to use for the executors inside LLAP.",
"llap.daemon.vcpus.per.instance"),
LLAP_DAEMON_NUM_FILE_CLEANER_THREADS("hive.llap.daemon.num.file.cleaner.threads", 1,
"Number of file cleaner threads in LLAP.", "llap.daemon.num.file.cleaner.threads"),
LLAP_FILE_CLEANUP_DELAY_SECONDS("hive.llap.file.cleanup.delay.seconds", "300s",
new TimeValidator(TimeUnit.SECONDS),
"How long to delay before cleaning up query files in LLAP (in seconds, for debugging).",
"llap.file.cleanup.delay-seconds"),
LLAP_DAEMON_SERVICE_HOSTS("hive.llap.daemon.service.hosts", null,
"Explicitly specified hosts to use for LLAP scheduling. Useful for testing. By default,\n" +
"YARN registry is used.", "llap.daemon.service.hosts"),
LLAP_DAEMON_SERVICE_REFRESH_INTERVAL("hive.llap.daemon.service.refresh.interval.sec", "60s",
new TimeValidator(TimeUnit.SECONDS),
"LLAP YARN registry service list refresh delay, in seconds.",
"llap.daemon.service.refresh.interval"),
LLAP_DAEMON_COMMUNICATOR_NUM_THREADS("hive.llap.daemon.communicator.num.threads", 10,
"Number of threads to use in LLAP task communicator in Tez AM.",
"llap.daemon.communicator.num.threads"),
LLAP_PLUGIN_CLIENT_NUM_THREADS("hive.llap.plugin.client.num.threads", 10,
"Number of threads to use in LLAP task plugin client."),
LLAP_DAEMON_DOWNLOAD_PERMANENT_FNS("hive.llap.daemon.download.permanent.fns", false,
"Whether LLAP daemon should localize the resources for permanent UDFs."),
LLAP_TASK_SCHEDULER_AM_REGISTRY_NAME("hive.llap.task.scheduler.am.registry", "llap",
"AM registry name for LLAP task scheduler plugin to register with."),
LLAP_TASK_SCHEDULER_AM_REGISTRY_PRINCIPAL("hive.llap.task.scheduler.am.registry.principal", "",
"The name of the principal used to access ZK AM registry securely."),
LLAP_TASK_SCHEDULER_AM_REGISTRY_KEYTAB_FILE("hive.llap.task.scheduler.am.registry.keytab.file", "",
"The path to the Kerberos keytab file used to access ZK AM registry securely."),
LLAP_TASK_SCHEDULER_NODE_REENABLE_MIN_TIMEOUT_MS(
"hive.llap.task.scheduler.node.reenable.min.timeout.ms", "200ms",
new TimeValidator(TimeUnit.MILLISECONDS),
"Minimum time after which a previously disabled node will be re-enabled for scheduling,\n" +
"in milliseconds. This may be modified by an exponential back-off if failures persist.",
"llap.task.scheduler.node.re-enable.min.timeout.ms"),
LLAP_TASK_SCHEDULER_NODE_REENABLE_MAX_TIMEOUT_MS(
"hive.llap.task.scheduler.node.reenable.max.timeout.ms", "10000ms",
new TimeValidator(TimeUnit.MILLISECONDS),
"Maximum time after which a previously disabled node will be re-enabled for scheduling,\n" +
"in milliseconds. This may be modified by an exponential back-off if failures persist.",
"llap.task.scheduler.node.re-enable.max.timeout.ms"),
LLAP_TASK_SCHEDULER_NODE_DISABLE_BACK_OFF_FACTOR(
"hive.llap.task.scheduler.node.disable.backoff.factor", 1.5f,
"Backoff factor on successive blacklists of a node due to some failures. Blacklist times\n" +
"start at the min timeout and go up to the max timeout based on this backoff factor.",
"llap.task.scheduler.node.disable.backoff.factor"),
LLAP_TASK_SCHEDULER_PREEMPT_INDEPENDENT("hive.llap.task.scheduler.preempt.independent", false,
"Whether the AM LLAP scheduler should preempt a lower priority task for a higher pri one\n" +
"even if the former doesn't depend on the latter (e.g. for two parallel sides of a union)."),
LLAP_TASK_SCHEDULER_NUM_SCHEDULABLE_TASKS_PER_NODE(
"hive.llap.task.scheduler.num.schedulable.tasks.per.node", 0,
"The number of tasks the AM TaskScheduler will try allocating per node. 0 indicates that\n" +
"this should be picked up from the Registry. -1 indicates unlimited capacity; positive\n" +
"values indicate a specific bound.", "llap.task.scheduler.num.schedulable.tasks.per.node"),
LLAP_TASK_SCHEDULER_LOCALITY_DELAY(
"hive.llap.task.scheduler.locality.delay", "0ms",
new TimeValidator(TimeUnit.MILLISECONDS, -1l, true, Long.MAX_VALUE, true),
"Amount of time to wait before allocating a request which contains location information," +
" to a location other than the ones requested. Set to -1 for an infinite delay, 0" +
"for no delay."
),
LLAP_DAEMON_TASK_PREEMPTION_METRICS_INTERVALS(
"hive.llap.daemon.task.preemption.metrics.intervals", "30,60,300",
"Comma-delimited set of integers denoting the desired rollover intervals (in seconds)\n" +
" for percentile latency metrics. Used by LLAP daemon task scheduler metrics for\n" +
" time taken to kill task (due to pre-emption) and useful time wasted by the task that\n" +
" is about to be preempted."
),
LLAP_DAEMON_TASK_SCHEDULER_WAIT_QUEUE_SIZE("hive.llap.daemon.task.scheduler.wait.queue.size",
10, "LLAP scheduler maximum queue size.", "llap.daemon.task.scheduler.wait.queue.size"),
LLAP_DAEMON_WAIT_QUEUE_COMPARATOR_CLASS_NAME(
"hive.llap.daemon.wait.queue.comparator.class.name",
"org.apache.hadoop.hive.llap.daemon.impl.comparator.ShortestJobFirstComparator",
"The priority comparator to use for LLAP scheduler priority queue. The built-in options\n" +
"are org.apache.hadoop.hive.llap.daemon.impl.comparator.ShortestJobFirstComparator and\n" +
".....FirstInFirstOutComparator", "llap.daemon.wait.queue.comparator.class.name"),
LLAP_DAEMON_TASK_SCHEDULER_ENABLE_PREEMPTION(
"hive.llap.daemon.task.scheduler.enable.preemption", true,
"Whether non-finishable running tasks (e.g. a reducer waiting for inputs) should be\n" +
"preempted by finishable tasks inside LLAP scheduler.",
"llap.daemon.task.scheduler.enable.preemption"),
LLAP_TASK_COMMUNICATOR_CONNECTION_TIMEOUT_MS(
"hive.llap.task.communicator.connection.timeout.ms", "16000ms",
new TimeValidator(TimeUnit.MILLISECONDS),
"Connection timeout (in milliseconds) before a failure to an LLAP daemon from Tez AM.",
"llap.task.communicator.connection.timeout-millis"),
LLAP_TASK_COMMUNICATOR_LISTENER_THREAD_COUNT(
"hive.llap.task.communicator.listener.thread-count", 30,
"The number of task communicator listener threads."),
LLAP_TASK_COMMUNICATOR_CONNECTION_SLEEP_BETWEEN_RETRIES_MS(
"hive.llap.task.communicator.connection.sleep.between.retries.ms", "2000ms",
new TimeValidator(TimeUnit.MILLISECONDS),
"Sleep duration (in milliseconds) to wait before retrying on error when obtaining a\n" +
"connection to LLAP daemon from Tez AM.",
"llap.task.communicator.connection.sleep-between-retries-millis"),
LLAP_TASK_UMBILICAL_SERVER_PORT("hive.llap.daemon.umbilical.port", 0,
"LLAP task umbilical server RPC port"),
LLAP_DAEMON_WEB_PORT("hive.llap.daemon.web.port", 15002, "LLAP daemon web UI port.",
"llap.daemon.service.port"),
LLAP_DAEMON_WEB_SSL("hive.llap.daemon.web.ssl", false,
"Whether LLAP daemon web UI should use SSL.", "llap.daemon.service.ssl"),
LLAP_CLIENT_CONSISTENT_SPLITS("hive.llap.client.consistent.splits", true,
"Whether to setup split locations to match nodes on which llap daemons are running, " +
"preferring one of the locations provided by the split itself. If there is no llap daemon " +
"running on any of those locations (or on the cloud), fall back to a cache affinity to" +
" an LLAP node. This is effective only if hive.execution.mode is llap."),
LLAP_VALIDATE_ACLS("hive.llap.validate.acls", true,
"Whether LLAP should reject permissive ACLs in some cases (e.g. its own management\n" +
"protocol or ZK paths), similar to how ssh refuses a key with bad access permissions."),
LLAP_DAEMON_OUTPUT_SERVICE_PORT("hive.llap.daemon.output.service.port", 15003,
"LLAP daemon output service port"),
LLAP_DAEMON_OUTPUT_STREAM_TIMEOUT("hive.llap.daemon.output.stream.timeout", "120s",
new TimeValidator(TimeUnit.SECONDS),
"The timeout for the client to connect to LLAP output service and start the fragment\n" +
"output after sending the fragment. The fragment will fail if its output is not claimed."),
LLAP_DAEMON_OUTPUT_SERVICE_SEND_BUFFER_SIZE("hive.llap.daemon.output.service.send.buffer.size",
128 * 1024, "Send buffer size to be used by LLAP daemon output service"),
LLAP_DAEMON_OUTPUT_SERVICE_MAX_PENDING_WRITES("hive.llap.daemon.output.service.max.pending.writes",
8, "Maximum number of queued writes allowed per connection when sending data\n" +
" via the LLAP output service to external clients."),
LLAP_EXTERNAL_SPLITS_TEMP_TABLE_STORAGE_FORMAT("hive.llap.external.splits.temp.table.storage.format",
"orc", new StringSet("default", "text", "orc"),
"Storage format for temp tables created using LLAP external client"),
LLAP_EXTERNAL_SPLITS_ORDER_BY_FORCE_SINGLE_SPLIT("hive.llap.external.splits.order.by.force.single.split",
true,
"If LLAP external clients submits ORDER BY queries, force return a single split to guarantee reading\n" +
"data out in ordered way. Setting this to false will let external clients read data out in parallel\n" +
"losing the ordering (external clients are responsible for guaranteeing the ordering)"),
LLAP_ENABLE_GRACE_JOIN_IN_LLAP("hive.llap.enable.grace.join.in.llap", false,
"Override if grace join should be allowed to run in llap."),
LLAP_HS2_ENABLE_COORDINATOR("hive.llap.hs2.coordinator.enabled", true,
"Whether to create the LLAP coordinator; since execution engine and container vs llap\n" +
"settings are both coming from job configs, we don't know at start whether this should\n" +
"be created. Default true."),
LLAP_DAEMON_LOGGER("hive.llap.daemon.logger", Constants.LLAP_LOGGER_NAME_QUERY_ROUTING,
new StringSet(Constants.LLAP_LOGGER_NAME_QUERY_ROUTING,
Constants.LLAP_LOGGER_NAME_RFA,
Constants.LLAP_LOGGER_NAME_CONSOLE),
"logger used for llap-daemons."),
LLAP_OUTPUT_FORMAT_ARROW("hive.llap.output.format.arrow", true,
"Whether LLapOutputFormatService should output arrow batches"),
LLAP_COLLECT_LOCK_METRICS("hive.llap.lockmetrics.collect", false,
"Whether lock metrics (wait times, counts) are collected for LLAP "
+ "related locks"),
HIVE_TRIGGER_VALIDATION_INTERVAL("hive.trigger.validation.interval", "500ms",
new TimeValidator(TimeUnit.MILLISECONDS),
"Interval for validating triggers during execution of a query. Triggers defined in resource plan will get\n" +
"validated for all SQL operations after every defined interval (default: 500ms) and corresponding action\n" +
"defined in the trigger will be taken"),
SPARK_USE_OP_STATS("hive.spark.use.op.stats", true,
"Whether to use operator stats to determine reducer parallelism for Hive on Spark.\n" +
"If this is false, Hive will use source table stats to determine reducer\n" +
"parallelism for all first level reduce tasks, and the maximum reducer parallelism\n" +
"from all parents for all the rest (second level and onward) reducer tasks."),
SPARK_USE_TS_STATS_FOR_MAPJOIN("hive.spark.use.ts.stats.for.mapjoin", false,
"If this is set to true, mapjoin optimization in Hive/Spark will use statistics from\n" +
"TableScan operators at the root of operator tree, instead of parent ReduceSink\n" +
"operators of the Join operator."),
SPARK_OPTIMIZE_SHUFFLE_SERDE("hive.spark.optimize.shuffle.serde", true,
"If this is set to true, Hive on Spark will register custom serializers for data types\n" +
"in shuffle. This should result in less shuffled data."),
SPARK_CLIENT_FUTURE_TIMEOUT("hive.spark.client.future.timeout",
"60s", new TimeValidator(TimeUnit.SECONDS),
"Timeout for requests between Hive client and remote Spark driver."),
SPARK_JOB_MONITOR_TIMEOUT("hive.spark.job.monitor.timeout",
"60s", new TimeValidator(TimeUnit.SECONDS),
"Timeout for job monitor to get Spark job state."),
SPARK_RPC_CLIENT_CONNECT_TIMEOUT("hive.spark.client.connect.timeout",
"1000ms", new TimeValidator(TimeUnit.MILLISECONDS),
"Timeout for remote Spark driver in connecting back to Hive client."),
SPARK_RPC_CLIENT_HANDSHAKE_TIMEOUT("hive.spark.client.server.connect.timeout",
"90000ms", new TimeValidator(TimeUnit.MILLISECONDS),
"Timeout for handshake between Hive client and remote Spark driver. Checked by both processes."),
SPARK_RPC_SECRET_RANDOM_BITS("hive.spark.client.secret.bits", "256",
"Number of bits of randomness in the generated secret for communication between Hive client and remote Spark driver. " +
"Rounded down to the nearest multiple of 8."),
SPARK_RPC_MAX_THREADS("hive.spark.client.rpc.threads", 8,
"Maximum number of threads for remote Spark driver's RPC event loop."),
SPARK_RPC_MAX_MESSAGE_SIZE("hive.spark.client.rpc.max.size", 50 * 1024 * 1024,
"Maximum message size in bytes for communication between Hive client and remote Spark driver. Default is 50MB."),
SPARK_RPC_CHANNEL_LOG_LEVEL("hive.spark.client.channel.log.level", null,
"Channel logging level for remote Spark driver. One of {DEBUG, ERROR, INFO, TRACE, WARN}."),
SPARK_RPC_SASL_MECHANISM("hive.spark.client.rpc.sasl.mechanisms", "DIGEST-MD5",
"Name of the SASL mechanism to use for authentication."),
SPARK_RPC_SERVER_ADDRESS("hive.spark.client.rpc.server.address", "",
"The server address of HiverServer2 host to be used for communication between Hive client and remote Spark driver. " +
"Default is empty, which means the address will be determined in the same way as for hive.server2.thrift.bind.host." +
"This is only necessary if the host has multiple network addresses and if a different network address other than " +
"hive.server2.thrift.bind.host is to be used."),
SPARK_RPC_SERVER_PORT("hive.spark.client.rpc.server.port", "", "A list of port ranges which can be used by RPC server " +
"with the format of 49152-49222,49228 and a random one is selected from the list. Default is empty, which randomly " +
"selects one port from all available ones."),
SPARK_DYNAMIC_PARTITION_PRUNING(
"hive.spark.dynamic.partition.pruning", false,
"When dynamic pruning is enabled, joins on partition keys will be processed by writing\n" +
"to a temporary HDFS file, and read later for removing unnecessary partitions."),
SPARK_DYNAMIC_PARTITION_PRUNING_MAX_DATA_SIZE(
"hive.spark.dynamic.partition.pruning.max.data.size", 100*1024*1024L,
"Maximum total data size in dynamic pruning."),
SPARK_DYNAMIC_PARTITION_PRUNING_MAP_JOIN_ONLY(
"hive.spark.dynamic.partition.pruning.map.join.only", false,
"Turn on dynamic partition pruning only for map joins.\n" +
"If hive.spark.dynamic.partition.pruning is set to true, this parameter value is ignored."),
SPARK_USE_GROUPBY_SHUFFLE(
"hive.spark.use.groupby.shuffle", true,
"Spark groupByKey transformation has better performance but uses unbounded memory." +
"Turn this off when there is a memory issue."),
SPARK_JOB_MAX_TASKS("hive.spark.job.max.tasks", -1, "The maximum number of tasks a Spark job may have.\n" +
"If a Spark job contains more tasks than the maximum, it will be cancelled. A value of -1 means no limit."),
SPARK_STAGE_MAX_TASKS("hive.spark.stage.max.tasks", -1, "The maximum number of tasks a stage in a Spark job may have.\n" +
"If a Spark job stage contains more tasks than the maximum, the job will be cancelled. A value of -1 means no limit."),
SPARK_CLIENT_TYPE("hive.spark.client.type", HIVE_SPARK_SUBMIT_CLIENT,
"Controls how the Spark application is launched. If " + HIVE_SPARK_SUBMIT_CLIENT + " is " +
"specified (default) then the spark-submit shell script is used to launch the Spark " +
"app. If " + HIVE_SPARK_LAUNCHER_CLIENT + " is specified then Spark's " +
"InProcessLauncher is used to programmatically launch the app."),
SPARK_SESSION_TIMEOUT("hive.spark.session.timeout", "30m", new TimeValidator(TimeUnit.MINUTES,
30L, true, null, true), "Amount of time the Spark Remote Driver should wait for " +
" a Spark job to be submitted before shutting down. Minimum value is 30 minutes"),
SPARK_SESSION_TIMEOUT_PERIOD("hive.spark.session.timeout.period", "60s",
new TimeValidator(TimeUnit.SECONDS, 60L, true, null, true),
"How frequently to check for idle Spark sessions. Minimum value is 60 seconds."),
NWAYJOINREORDER("hive.reorder.nway.joins", true,
"Runs reordering of tables within single n-way join (i.e.: picks streamtable)"),
HIVE_MERGE_NWAY_JOINS("hive.merge.nway.joins", false,
"Merge adjacent joins into a single n-way join"),
HIVE_LOG_N_RECORDS("hive.log.every.n.records", 0L, new RangeValidator(0L, null),
"If value is greater than 0 logs in fixed intervals of size n rather than exponentially."),
/**
* @deprecated Use MetastoreConf.MSCK_PATH_VALIDATION
*/
@Deprecated
HIVE_MSCK_PATH_VALIDATION("hive.msck.path.validation", "throw",
new StringSet("throw", "skip", "ignore"), "The approach msck should take with HDFS " +
"directories that are partition-like but contain unsupported characters. 'throw' (an " +
"exception) is the default; 'skip' will skip the invalid directories and still repair the" +
" others; 'ignore' will skip the validation (legacy behavior, causes bugs in many cases)"),
/**
* @deprecated Use MetastoreConf.MSCK_REPAIR_BATCH_SIZE
*/
@Deprecated
HIVE_MSCK_REPAIR_BATCH_SIZE(
"hive.msck.repair.batch.size", 3000,
"Batch size for the msck repair command. If the value is greater than zero,\n "
+ "it will execute batch wise with the configured batch size. In case of errors while\n"
+ "adding unknown partitions the batch size is automatically reduced by half in the subsequent\n"
+ "retry attempt. The default value is 3000 which means it will execute in the batches of 3000."),
/**
* @deprecated Use MetastoreConf.MSCK_REPAIR_BATCH_MAX_RETRIES
*/
@Deprecated
HIVE_MSCK_REPAIR_BATCH_MAX_RETRIES("hive.msck.repair.batch.max.retries", 4,
"Maximum number of retries for the msck repair command when adding unknown partitions.\n "
+ "If the value is greater than zero it will retry adding unknown partitions until the maximum\n"
+ "number of attempts is reached or batch size is reduced to 0, whichever is earlier.\n"
+ "In each retry attempt it will reduce the batch size by a factor of 2 until it reaches zero.\n"
+ "If the value is set to zero it will retry until the batch size becomes zero as described above."),
HIVE_SERVER2_LLAP_CONCURRENT_QUERIES("hive.server2.llap.concurrent.queries", -1,
"The number of queries allowed in parallel via llap. Negative number implies 'infinite'."),
HIVE_TEZ_ENABLE_MEMORY_MANAGER("hive.tez.enable.memory.manager", true,
"Enable memory manager for tez"),
HIVE_HASH_TABLE_INFLATION_FACTOR("hive.hash.table.inflation.factor", (float) 2.0,
"Expected inflation factor between disk/in memory representation of hash tables"),
HIVE_LOG_TRACE_ID("hive.log.trace.id", "",
"Log tracing id that can be used by upstream clients for tracking respective logs. " +
"Truncated to " + LOG_PREFIX_LENGTH + " characters. Defaults to use auto-generated session id."),
HIVE_MM_AVOID_GLOBSTATUS_ON_S3("hive.mm.avoid.s3.globstatus", true,
"Whether to use listFiles (optimized on S3) instead of globStatus when on S3."),
// If a parameter is added to the restricted list, add a test in TestRestrictedList.Java
HIVE_CONF_RESTRICTED_LIST("hive.conf.restricted.list",
"hive.security.authenticator.manager,hive.security.authorization.manager," +
"hive.security.metastore.authorization.manager,hive.security.metastore.authenticator.manager," +
"hive.users.in.admin.role,hive.server2.xsrf.filter.enabled,hive.security.authorization.enabled," +
"hive.distcp.privileged.doAs," +
"hive.server2.authentication.ldap.baseDN," +
"hive.server2.authentication.ldap.url," +
"hive.server2.authentication.ldap.Domain," +
"hive.server2.authentication.ldap.groupDNPattern," +
"hive.server2.authentication.ldap.groupFilter," +
"hive.server2.authentication.ldap.userDNPattern," +
"hive.server2.authentication.ldap.userFilter," +
"hive.server2.authentication.ldap.groupMembershipKey," +
"hive.server2.authentication.ldap.userMembershipKey," +
"hive.server2.authentication.ldap.groupClassKey," +
"hive.server2.authentication.ldap.customLDAPQuery," +
"hive.privilege.synchronizer," +
"hive.privilege.synchronizer.interval," +
"hive.spark.client.connect.timeout," +
"hive.spark.client.server.connect.timeout," +
"hive.spark.client.channel.log.level," +
"hive.spark.client.rpc.max.size," +
"hive.spark.client.rpc.threads," +
"hive.spark.client.secret.bits," +
"hive.spark.client.rpc.server.address," +
"hive.spark.client.rpc.server.port," +
"hive.spark.client.rpc.sasl.mechanisms," +
"bonecp.,"+
"hive.druid.broker.address.default,"+
"hive.druid.coordinator.address.default,"+
"hikaricp.,"+
"hadoop.bin.path,"+
"yarn.bin.path,"+
"spark.home,"+
"hive.driver.parallel.compilation.global.limit",
"Comma separated list of configuration options which are immutable at runtime"),
HIVE_CONF_HIDDEN_LIST("hive.conf.hidden.list",
METASTOREPWD.varname + "," + HIVE_SERVER2_SSL_KEYSTORE_PASSWORD.varname
+ "," + DRUID_METADATA_DB_PASSWORD.varname
// Adding the S3 credentials from Hadoop config to be hidden
+ ",fs.s3.awsAccessKeyId"
+ ",fs.s3.awsSecretAccessKey"
+ ",fs.s3n.awsAccessKeyId"
+ ",fs.s3n.awsSecretAccessKey"
+ ",fs.s3a.access.key"
+ ",fs.s3a.secret.key"
+ ",fs.s3a.proxy.password"
+ ",dfs.adls.oauth2.credential"
+ ",fs.adl.oauth2.credential",
"Comma separated list of configuration options which should not be read by normal user like passwords"),
HIVE_CONF_INTERNAL_VARIABLE_LIST("hive.conf.internal.variable.list",
"hive.added.files.path,hive.added.jars.path,hive.added.archives.path",
"Comma separated list of variables which are used internally and should not be configurable."),
HIVE_SPARK_RSC_CONF_LIST("hive.spark.rsc.conf.list",
SPARK_OPTIMIZE_SHUFFLE_SERDE.varname + "," +
SPARK_CLIENT_FUTURE_TIMEOUT.varname + "," +
SPARK_CLIENT_TYPE.varname,
"Comma separated list of variables which are related to remote spark context.\n" +
"Changing these variables will result in re-creating the spark session."),
HIVE_QUERY_TIMEOUT_SECONDS("hive.query.timeout.seconds", "0s",
new TimeValidator(TimeUnit.SECONDS),
"Timeout for Running Query in seconds. A nonpositive value means infinite. " +
"If the query timeout is also set by thrift API call, the smaller one will be taken."),
HIVE_EXEC_INPUT_LISTING_MAX_THREADS("hive.exec.input.listing.max.threads", 0, new SizeValidator(0L, true, 1024L, true),
"Maximum number of threads that Hive uses to list file information from file systems (recommended > 1 for blobstore)."),
HIVE_QUERY_REEXECUTION_ENABLED("hive.query.reexecution.enabled", true,
"Enable query reexecutions"),
HIVE_QUERY_REEXECUTION_STRATEGIES("hive.query.reexecution.strategies", "overlay,reoptimize",
"comma separated list of plugin can be used:\n"
+ " overlay: hiveconf subtree 'reexec.overlay' is used as an overlay in case of an execution errors out\n"
+ " reoptimize: collects operator statistics during execution and recompile the query after a failure"),
HIVE_QUERY_REEXECUTION_STATS_PERSISTENCE("hive.query.reexecution.stats.persist.scope", "query",
new StringSet("query", "hiveserver", "metastore"),
"Sets the persistence scope of runtime statistics\n"
+ " query: runtime statistics are only used during re-execution\n"
+ " hiveserver: runtime statistics are persisted in the hiveserver - all sessions share it\n"
+ " metastore: runtime statistics are persisted in the metastore as well"),
HIVE_QUERY_MAX_REEXECUTION_COUNT("hive.query.reexecution.max.count", 1,
"Maximum number of re-executions for a single query."),
HIVE_QUERY_REEXECUTION_ALWAYS_COLLECT_OPERATOR_STATS("hive.query.reexecution.always.collect.operator.stats", false,
"If sessionstats are enabled; this option can be used to collect statistics all the time"),
HIVE_QUERY_REEXECUTION_STATS_CACHE_BATCH_SIZE("hive.query.reexecution.stats.cache.batch.size", -1,
"If runtime stats are stored in metastore; the maximal batch size per round during load."),
HIVE_QUERY_REEXECUTION_STATS_CACHE_SIZE("hive.query.reexecution.stats.cache.size", 100_000,
"Size of the runtime statistics cache. Unit is: OperatorStat entry; a query plan consist ~100."),
HIVE_QUERY_RESULTS_CACHE_ENABLED("hive.query.results.cache.enabled", true,
"If the query results cache is enabled. This will keep results of previously executed queries " +
"to be reused if the same query is executed again."),
HIVE_QUERY_RESULTS_CACHE_NONTRANSACTIONAL_TABLES_ENABLED("hive.query.results.cache.nontransactional.tables.enabled", false,
"If the query results cache is enabled for queries involving non-transactional tables." +
"Users who enable this setting should be willing to tolerate some amount of stale results in the cache."),
HIVE_QUERY_RESULTS_CACHE_WAIT_FOR_PENDING_RESULTS("hive.query.results.cache.wait.for.pending.results", true,
"Should a query wait for the pending results of an already running query, " +
"in order to use the cached result when it becomes ready"),
HIVE_QUERY_RESULTS_CACHE_DIRECTORY("hive.query.results.cache.directory",
"/tmp/hive/_resultscache_",
"Location of the query results cache directory. Temporary results from queries " +
"will be moved to this location."),
HIVE_QUERY_RESULTS_CACHE_MAX_ENTRY_LIFETIME("hive.query.results.cache.max.entry.lifetime", "3600s",
new TimeValidator(TimeUnit.SECONDS),
"Maximum lifetime in seconds for an entry in the query results cache. A nonpositive value means infinite."),
HIVE_QUERY_RESULTS_CACHE_MAX_SIZE("hive.query.results.cache.max.size",
(long) 2 * 1024 * 1024 * 1024,
"Maximum total size in bytes that the query results cache directory is allowed to use on the filesystem."),
HIVE_QUERY_RESULTS_CACHE_MAX_ENTRY_SIZE("hive.query.results.cache.max.entry.size",
(long) 10 * 1024 * 1024,
"Maximum size in bytes that a single query result is allowed to use in the results cache directory"),
HIVE_NOTFICATION_EVENT_POLL_INTERVAL("hive.notification.event.poll.interval", "60s",
new TimeValidator(TimeUnit.SECONDS),
"How often the notification log is polled for new NotificationEvents from the metastore." +
"A nonpositive value means the notification log is never polled."),
HIVE_NOTFICATION_EVENT_CONSUMERS("hive.notification.event.consumers",
"org.apache.hadoop.hive.ql.cache.results.QueryResultsCache$InvalidationEventConsumer",
"Comma-separated list of class names extending EventConsumer," +
"to handle the NotificationEvents retreived by the notification event poll."),
/* BLOBSTORE section */
HIVE_BLOBSTORE_SUPPORTED_SCHEMES("hive.blobstore.supported.schemes", "s3,s3a,s3n",
"Comma-separated list of supported blobstore schemes."),
HIVE_BLOBSTORE_USE_BLOBSTORE_AS_SCRATCHDIR("hive.blobstore.use.blobstore.as.scratchdir", false,
"Enable the use of scratch directories directly on blob storage systems (it may cause performance penalties)."),
HIVE_BLOBSTORE_OPTIMIZATIONS_ENABLED("hive.blobstore.optimizations.enabled", true,
"This parameter enables a number of optimizations when running on blobstores:\n" +
"(1) If hive.blobstore.use.blobstore.as.scratchdir is false, force the last Hive job to write to the blobstore.\n" +
"This is a performance optimization that forces the final FileSinkOperator to write to the blobstore.\n" +
"See HIVE-15121 for details."),
HIVE_ADDITIONAL_CONFIG_FILES("hive.additional.config.files", "",
"The names of additional config files, such as ldap-site.xml," +
"spark-site.xml, etc in comma separated list.");
public final String varname;
public final String altName;
private final String defaultExpr;
public final String defaultStrVal;
public final int defaultIntVal;
public final long defaultLongVal;
public final float defaultFloatVal;
public final boolean defaultBoolVal;
private final Class<?> valClass;
private final VarType valType;
private final Validator validator;
private final String description;
private final boolean excluded;
private final boolean caseSensitive;
ConfVars(String varname, Object defaultVal, String description) {
this(varname, defaultVal, null, description, true, false, null);
}
ConfVars(String varname, Object defaultVal, String description, String altName) {
this(varname, defaultVal, null, description, true, false, altName);
}
ConfVars(String varname, Object defaultVal, Validator validator, String description,
String altName) {
this(varname, defaultVal, validator, description, true, false, altName);
}
ConfVars(String varname, Object defaultVal, String description, boolean excluded) {
this(varname, defaultVal, null, description, true, excluded, null);
}
ConfVars(String varname, String defaultVal, boolean caseSensitive, String description) {
this(varname, defaultVal, null, description, caseSensitive, false, null);
}
ConfVars(String varname, Object defaultVal, Validator validator, String description) {
this(varname, defaultVal, validator, description, true, false, null);
}
ConfVars(String varname, Object defaultVal, Validator validator, String description, boolean excluded) {
this(varname, defaultVal, validator, description, true, excluded, null);
}
ConfVars(String varname, Object defaultVal, Validator validator, String description,
boolean caseSensitive, boolean excluded, String altName) {
this.varname = varname;
this.validator = validator;
this.description = description;
this.defaultExpr = defaultVal == null ? null : String.valueOf(defaultVal);
this.excluded = excluded;
this.caseSensitive = caseSensitive;
this.altName = altName;
if (defaultVal == null || defaultVal instanceof String) {
this.valClass = String.class;
this.valType = VarType.STRING;
this.defaultStrVal = SystemVariables.substitute((String)defaultVal);
this.defaultIntVal = -1;
this.defaultLongVal = -1;
this.defaultFloatVal = -1;
this.defaultBoolVal = false;
} else if (defaultVal instanceof Integer) {
this.valClass = Integer.class;
this.valType = VarType.INT;
this.defaultStrVal = null;
this.defaultIntVal = (Integer)defaultVal;
this.defaultLongVal = -1;
this.defaultFloatVal = -1;
this.defaultBoolVal = false;
} else if (defaultVal instanceof Long) {
this.valClass = Long.class;
this.valType = VarType.LONG;
this.defaultStrVal = null;
this.defaultIntVal = -1;
this.defaultLongVal = (Long)defaultVal;
this.defaultFloatVal = -1;
this.defaultBoolVal = false;
} else if (defaultVal instanceof Float) {
this.valClass = Float.class;
this.valType = VarType.FLOAT;
this.defaultStrVal = null;
this.defaultIntVal = -1;
this.defaultLongVal = -1;
this.defaultFloatVal = (Float)defaultVal;
this.defaultBoolVal = false;
} else if (defaultVal instanceof Boolean) {
this.valClass = Boolean.class;
this.valType = VarType.BOOLEAN;
this.defaultStrVal = null;
this.defaultIntVal = -1;
this.defaultLongVal = -1;
this.defaultFloatVal = -1;
this.defaultBoolVal = (Boolean)defaultVal;
} else {
throw new IllegalArgumentException("Not supported type value " + defaultVal.getClass() +
" for name " + varname);
}
}
public boolean isType(String value) {
return valType.isType(value);
}
public Validator getValidator() {
return validator;
}
public String validate(String value) {
return validator == null ? null : validator.validate(value);
}
public String validatorDescription() {
return validator == null ? null : validator.toDescription();
}
public String typeString() {
String type = valType.typeString();
if (valType == VarType.STRING && validator != null) {
if (validator instanceof TimeValidator) {
type += "(TIME)";
}
}
return type;
}
public String getRawDescription() {
return description;
}
public String getDescription() {
String validator = validatorDescription();
if (validator != null) {
return validator + ".\n" + description;
}
return description;
}
public boolean isExcluded() {
return excluded;
}
public boolean isCaseSensitive() {
return caseSensitive;
}
@Override
public String toString() {
return varname;
}
private static String findHadoopBinary() {
String val = findHadoopHome();
// if can't find hadoop home we can at least try /usr/bin/hadoop
val = (val == null ? File.separator + "usr" : val)
+ File.separator + "bin" + File.separator + "hadoop";
// Launch hadoop command file on windows.
return val;
}
private static String findYarnBinary() {
String val = findHadoopHome();
val = (val == null ? "yarn" : val + File.separator + "bin" + File.separator + "yarn");
return val;
}
private static String findMapRedBinary() {
String val = findHadoopHome();
val = (val == null ? "mapred" : val + File.separator + "bin" + File.separator + "mapred");
return val;
}
private static String findHadoopHome() {
String val = System.getenv("HADOOP_HOME");
// In Hadoop 1.X and Hadoop 2.X HADOOP_HOME is gone and replaced with HADOOP_PREFIX
if (val == null) {
val = System.getenv("HADOOP_PREFIX");
}
return val;
}
public String getDefaultValue() {
return valType.defaultValueString(this);
}
public String getDefaultExpr() {
return defaultExpr;
}
private Set<String> getValidStringValues() {
if (validator == null || !(validator instanceof StringSet)) {
throw new RuntimeException(varname + " does not specify a list of valid values");
}
return ((StringSet)validator).getExpected();
}
enum VarType {
STRING {
@Override
void checkType(String value) throws Exception { }
@Override
String defaultValueString(ConfVars confVar) { return confVar.defaultStrVal; }
},
INT {
@Override
void checkType(String value) throws Exception { Integer.valueOf(value); }
},
LONG {
@Override
void checkType(String value) throws Exception { Long.valueOf(value); }
},
FLOAT {
@Override
void checkType(String value) throws Exception { Float.valueOf(value); }
},
BOOLEAN {
@Override
void checkType(String value) throws Exception { Boolean.valueOf(value); }
};
boolean isType(String value) {
try { checkType(value); } catch (Exception e) { return false; }
return true;
}
String typeString() { return name().toUpperCase();}
String defaultValueString(ConfVars confVar) { return confVar.defaultExpr; }
abstract void checkType(String value) throws Exception;
}
}
/**
* Writes the default ConfVars out to a byte array and returns an input
* stream wrapping that byte array.
*
* We need this in order to initialize the ConfVar properties
* in the underling Configuration object using the addResource(InputStream)
* method.
*
* It is important to use a LoopingByteArrayInputStream because it turns out
* addResource(InputStream) is broken since Configuration tries to read the
* entire contents of the same InputStream repeatedly without resetting it.
* LoopingByteArrayInputStream has special logic to handle this.
*/
private static synchronized InputStream getConfVarInputStream() {
if (confVarByteArray == null) {
try {
// Create a Hadoop configuration without inheriting default settings.
Configuration conf = new Configuration(false);
applyDefaultNonNullConfVars(conf);
ByteArrayOutputStream confVarBaos = new ByteArrayOutputStream();
conf.writeXml(confVarBaos);
confVarByteArray = confVarBaos.toByteArray();
} catch (Exception e) {
// We're pretty screwed if we can't load the default conf vars
throw new RuntimeException("Failed to initialize default Hive configuration variables!", e);
}
}
return new LoopingByteArrayInputStream(confVarByteArray);
}
public void verifyAndSet(String name, String value) throws IllegalArgumentException {
if (modWhiteListPattern != null) {
Matcher wlMatcher = modWhiteListPattern.matcher(name);
if (!wlMatcher.matches()) {
throw new IllegalArgumentException("Cannot modify " + name + " at runtime. "
+ "It is not in list of params that are allowed to be modified at runtime");
}
}
if (Iterables.any(restrictList,
restrictedVar -> name != null && name.startsWith(restrictedVar))) {
throw new IllegalArgumentException("Cannot modify " + name + " at runtime. It is in the list"
+ " of parameters that can't be modified at runtime or is prefixed by a restricted variable");
}
String oldValue = name != null ? get(name) : null;
if (name == null || value == null || !value.equals(oldValue)) {
// When either name or value is null, the set method below will fail,
// and throw IllegalArgumentException
set(name, value);
if (isSparkRelatedConfig(name)) {
isSparkConfigUpdated = true;
}
}
}
public boolean isHiddenConfig(String name) {
return Iterables.any(hiddenSet, hiddenVar -> name.startsWith(hiddenVar));
}
public static boolean isEncodedPar(String name) {
for (ConfVars confVar : HiveConf.ENCODED_CONF) {
ConfVars confVar1 = confVar;
if (confVar1.varname.equals(name)) {
return true;
}
}
return false;
}
/**
* check whether spark related property is updated, which includes spark configurations,
* RSC configurations and yarn configuration in Spark on YARN mode.
* @param name
* @return
*/
private boolean isSparkRelatedConfig(String name) {
boolean result = false;
if (name.startsWith("spark")) { // Spark property.
// for now we don't support changing spark app name on the fly
result = !name.equals("spark.app.name");
} else if (name.startsWith("yarn")) { // YARN property in Spark on YARN mode.
String sparkMaster = get("spark.master");
if (sparkMaster != null && sparkMaster.startsWith("yarn")) {
result = true;
}
} else if (rscList.stream().anyMatch(rscVar -> rscVar.equals(name))) { // Remote Spark Context property.
result = true;
} else if (name.equals("mapreduce.job.queuename")) {
// a special property starting with mapreduce that we would also like to effect if it changes
result = true;
}
return result;
}
public static int getIntVar(Configuration conf, ConfVars var) {
assert (var.valClass == Integer.class) : var.varname;
if (var.altName != null) {
return conf.getInt(var.varname, conf.getInt(var.altName, var.defaultIntVal));
}
return conf.getInt(var.varname, var.defaultIntVal);
}
public static void setIntVar(Configuration conf, ConfVars var, int val) {
assert (var.valClass == Integer.class) : var.varname;
conf.setInt(var.varname, val);
}
public int getIntVar(ConfVars var) {
return getIntVar(this, var);
}
public void setIntVar(ConfVars var, int val) {
setIntVar(this, var, val);
}
public static long getTimeVar(Configuration conf, ConfVars var, TimeUnit outUnit) {
return toTime(getVar(conf, var), getDefaultTimeUnit(var), outUnit);
}
public static void setTimeVar(Configuration conf, ConfVars var, long time, TimeUnit timeunit) {
assert (var.valClass == String.class) : var.varname;
conf.set(var.varname, time + stringFor(timeunit));
}
public long getTimeVar(ConfVars var, TimeUnit outUnit) {
return getTimeVar(this, var, outUnit);
}
public void setTimeVar(ConfVars var, long time, TimeUnit outUnit) {
setTimeVar(this, var, time, outUnit);
}
public static long getSizeVar(Configuration conf, ConfVars var) {
return toSizeBytes(getVar(conf, var));
}
public long getSizeVar(ConfVars var) {
return getSizeVar(this, var);
}
public static TimeUnit getDefaultTimeUnit(ConfVars var) {
TimeUnit inputUnit = null;
if (var.validator instanceof TimeValidator) {
inputUnit = ((TimeValidator)var.validator).getTimeUnit();
}
return inputUnit;
}
public static long toTime(String value, TimeUnit inputUnit, TimeUnit outUnit) {
String[] parsed = parseNumberFollowedByUnit(value.trim());
return outUnit.convert(Long.parseLong(parsed[0].trim()), unitFor(parsed[1].trim(), inputUnit));
}
public static long toSizeBytes(String value) {
String[] parsed = parseNumberFollowedByUnit(value.trim());
return Long.parseLong(parsed[0].trim()) * multiplierFor(parsed[1].trim());
}
private static String[] parseNumberFollowedByUnit(String value) {
char[] chars = value.toCharArray();
int i = 0;
for (; i < chars.length && (chars[i] == '-' || Character.isDigit(chars[i])); i++) {
}
return new String[] {value.substring(0, i), value.substring(i)};
}
private static Set<String> daysSet = ImmutableSet.of("d", "D", "day", "DAY", "days", "DAYS");
private static Set<String> hoursSet = ImmutableSet.of("h", "H", "hour", "HOUR", "hours", "HOURS");
private static Set<String> minutesSet = ImmutableSet.of("m", "M", "min", "MIN", "mins", "MINS",
"minute", "MINUTE", "minutes", "MINUTES");
private static Set<String> secondsSet = ImmutableSet.of("s", "S", "sec", "SEC", "secs", "SECS",
"second", "SECOND", "seconds", "SECONDS");
private static Set<String> millisSet = ImmutableSet.of("ms", "MS", "msec", "MSEC", "msecs", "MSECS",
"millisecond", "MILLISECOND", "milliseconds", "MILLISECONDS");
private static Set<String> microsSet = ImmutableSet.of("us", "US", "usec", "USEC", "usecs", "USECS",
"microsecond", "MICROSECOND", "microseconds", "MICROSECONDS");
private static Set<String> nanosSet = ImmutableSet.of("ns", "NS", "nsec", "NSEC", "nsecs", "NSECS",
"nanosecond", "NANOSECOND", "nanoseconds", "NANOSECONDS");
public static TimeUnit unitFor(String unit, TimeUnit defaultUnit) {
unit = unit.trim().toLowerCase();
if (unit.isEmpty() || unit.equals("l")) {
if (defaultUnit == null) {
throw new IllegalArgumentException("Time unit is not specified");
}
return defaultUnit;
} else if (daysSet.contains(unit)) {
return TimeUnit.DAYS;
} else if (hoursSet.contains(unit)) {
return TimeUnit.HOURS;
} else if (minutesSet.contains(unit)) {
return TimeUnit.MINUTES;
} else if (secondsSet.contains(unit)) {
return TimeUnit.SECONDS;
} else if (millisSet.contains(unit)) {
return TimeUnit.MILLISECONDS;
} else if (microsSet.contains(unit)) {
return TimeUnit.MICROSECONDS;
} else if (nanosSet.contains(unit)) {
return TimeUnit.NANOSECONDS;
}
throw new IllegalArgumentException("Invalid time unit " + unit);
}
public static long multiplierFor(String unit) {
unit = unit.trim().toLowerCase();
if (unit.isEmpty() || unit.equals("b") || unit.equals("bytes")) {
return 1;
} else if (unit.equals("kb")) {
return 1024;
} else if (unit.equals("mb")) {
return 1024*1024;
} else if (unit.equals("gb")) {
return 1024*1024*1024;
} else if (unit.equals("tb")) {
return 1024L*1024*1024*1024;
} else if (unit.equals("pb")) {
return 1024L*1024*1024*1024*1024;
}
throw new IllegalArgumentException("Invalid size unit " + unit);
}
public static String stringFor(TimeUnit timeunit) {
switch (timeunit) {
case DAYS: return "day";
case HOURS: return "hour";
case MINUTES: return "min";
case SECONDS: return "sec";
case MILLISECONDS: return "msec";
case MICROSECONDS: return "usec";
case NANOSECONDS: return "nsec";
}
throw new IllegalArgumentException("Invalid timeunit " + timeunit);
}
public static long getLongVar(Configuration conf, ConfVars var) {
assert (var.valClass == Long.class) : var.varname;
if (var.altName != null) {
return conf.getLong(var.varname, conf.getLong(var.altName, var.defaultLongVal));
}
return conf.getLong(var.varname, var.defaultLongVal);
}
public static long getLongVar(Configuration conf, ConfVars var, long defaultVal) {
if (var.altName != null) {
return conf.getLong(var.varname, conf.getLong(var.altName, defaultVal));
}
return conf.getLong(var.varname, defaultVal);
}
public static void setLongVar(Configuration conf, ConfVars var, long val) {
assert (var.valClass == Long.class) : var.varname;
conf.setLong(var.varname, val);
}
public long getLongVar(ConfVars var) {
return getLongVar(this, var);
}
public void setLongVar(ConfVars var, long val) {
setLongVar(this, var, val);
}
public static float getFloatVar(Configuration conf, ConfVars var) {
assert (var.valClass == Float.class) : var.varname;
if (var.altName != null) {
return conf.getFloat(var.varname, conf.getFloat(var.altName, var.defaultFloatVal));
}
return conf.getFloat(var.varname, var.defaultFloatVal);
}
public static float getFloatVar(Configuration conf, ConfVars var, float defaultVal) {
if (var.altName != null) {
return conf.getFloat(var.varname, conf.getFloat(var.altName, defaultVal));
}
return conf.getFloat(var.varname, defaultVal);
}
public static void setFloatVar(Configuration conf, ConfVars var, float val) {
assert (var.valClass == Float.class) : var.varname;
conf.setFloat(var.varname, val);
}
public float getFloatVar(ConfVars var) {
return getFloatVar(this, var);
}
public void setFloatVar(ConfVars var, float val) {
setFloatVar(this, var, val);
}
public static boolean getBoolVar(Configuration conf, ConfVars var) {
assert (var.valClass == Boolean.class) : var.varname;
if (var.altName != null) {
return conf.getBoolean(var.varname, conf.getBoolean(var.altName, var.defaultBoolVal));
}
return conf.getBoolean(var.varname, var.defaultBoolVal);
}
public static boolean getBoolVar(Configuration conf, ConfVars var, boolean defaultVal) {
if (var.altName != null) {
return conf.getBoolean(var.varname, conf.getBoolean(var.altName, defaultVal));
}
return conf.getBoolean(var.varname, defaultVal);
}
public static void setBoolVar(Configuration conf, ConfVars var, boolean val) {
assert (var.valClass == Boolean.class) : var.varname;
conf.setBoolean(var.varname, val);
}
/* Dynamic partition pruning is enabled in some or all cases if either
* hive.spark.dynamic.partition.pruning is true or
* hive.spark.dynamic.partition.pruning.map.join.only is true
*/
public static boolean isSparkDPPAny(Configuration conf) {
return (conf.getBoolean(ConfVars.SPARK_DYNAMIC_PARTITION_PRUNING.varname,
ConfVars.SPARK_DYNAMIC_PARTITION_PRUNING.defaultBoolVal) ||
conf.getBoolean(ConfVars.SPARK_DYNAMIC_PARTITION_PRUNING_MAP_JOIN_ONLY.varname,
ConfVars.SPARK_DYNAMIC_PARTITION_PRUNING_MAP_JOIN_ONLY.defaultBoolVal));
}
public boolean getBoolVar(ConfVars var) {
return getBoolVar(this, var);
}
public void setBoolVar(ConfVars var, boolean val) {
setBoolVar(this, var, val);
}
public static String getVar(Configuration conf, ConfVars var) {
assert (var.valClass == String.class) : var.varname;
return var.altName != null ? conf.get(var.varname, conf.get(var.altName, var.defaultStrVal))
: conf.get(var.varname, var.defaultStrVal);
}
public static String getVarWithoutType(Configuration conf, ConfVars var) {
return var.altName != null ? conf.get(var.varname, conf.get(var.altName, var.defaultExpr))
: conf.get(var.varname, var.defaultExpr);
}
public static String getTrimmedVar(Configuration conf, ConfVars var) {
assert (var.valClass == String.class) : var.varname;
if (var.altName != null) {
return conf.getTrimmed(var.varname, conf.getTrimmed(var.altName, var.defaultStrVal));
}
return conf.getTrimmed(var.varname, var.defaultStrVal);
}
public static String[] getTrimmedStringsVar(Configuration conf, ConfVars var) {
assert (var.valClass == String.class) : var.varname;
String[] result = conf.getTrimmedStrings(var.varname, (String[])null);
if (result != null) {
return result;
}
if (var.altName != null) {
result = conf.getTrimmedStrings(var.altName, (String[])null);
if (result != null) {
return result;
}
}
return org.apache.hadoop.util.StringUtils.getTrimmedStrings(var.defaultStrVal);
}
public static String getVar(Configuration conf, ConfVars var, String defaultVal) {
String ret = var.altName != null ? conf.get(var.varname, conf.get(var.altName, defaultVal))
: conf.get(var.varname, defaultVal);
return ret;
}
public static String getVar(Configuration conf, ConfVars var, EncoderDecoder<String, String> encoderDecoder) {
return encoderDecoder.decode(getVar(conf, var));
}
public String getLogIdVar(String defaultValue) {
String retval = getVar(ConfVars.HIVE_LOG_TRACE_ID);
if (StringUtils.EMPTY.equals(retval)) {
LOG.info("Using the default value passed in for log id: {}", defaultValue);
retval = defaultValue;
}
if (retval.length() > LOG_PREFIX_LENGTH) {
LOG.warn("The original log id prefix is {} has been truncated to {}", retval,
retval.substring(0, LOG_PREFIX_LENGTH - 1));
retval = retval.substring(0, LOG_PREFIX_LENGTH - 1);
}
return retval;
}
public static void setVar(Configuration conf, ConfVars var, String val) {
assert (var.valClass == String.class) : var.varname;
conf.set(var.varname, val);
}
public static void setVar(Configuration conf, ConfVars var, String val,
EncoderDecoder<String, String> encoderDecoder) {
setVar(conf, var, encoderDecoder.encode(val));
}
public static ConfVars getConfVars(String name) {
return vars.get(name);
}
public static ConfVars getMetaConf(String name) {
return metaConfs.get(name);
}
public String getVar(ConfVars var) {
return getVar(this, var);
}
public void setVar(ConfVars var, String val) {
setVar(this, var, val);
}
public String getQueryString() {
return getQueryString(this);
}
public static String getQueryString(Configuration conf) {
return getVar(conf, ConfVars.HIVEQUERYSTRING, EncoderDecoderFactory.URL_ENCODER_DECODER);
}
public void setQueryString(String query) {
setQueryString(this, query);
}
public static void setQueryString(Configuration conf, String query) {
setVar(conf, ConfVars.HIVEQUERYSTRING, query, EncoderDecoderFactory.URL_ENCODER_DECODER);
}
public void logVars(PrintStream ps) {
for (ConfVars one : ConfVars.values()) {
ps.println(one.varname + "=" + ((get(one.varname) != null) ? get(one.varname) : ""));
}
}
/**
* @return a ZooKeeperHiveHelper instance containing the ZooKeeper specifications from the
* given HiveConf.
*/
public ZooKeeperHiveHelper getZKConfig() {
return new ZooKeeperHiveHelper(getVar(HiveConf.ConfVars.HIVE_ZOOKEEPER_QUORUM),
getVar(HiveConf.ConfVars.HIVE_ZOOKEEPER_CLIENT_PORT),
getVar(HiveConf.ConfVars.HIVE_SERVER2_ZOOKEEPER_NAMESPACE),
(int) getTimeVar(HiveConf.ConfVars.HIVE_ZOOKEEPER_SESSION_TIMEOUT,
TimeUnit.MILLISECONDS),
(int) getTimeVar(HiveConf.ConfVars.HIVE_ZOOKEEPER_CONNECTION_BASESLEEPTIME,
TimeUnit.MILLISECONDS),
getIntVar(HiveConf.ConfVars.HIVE_ZOOKEEPER_CONNECTION_MAX_RETRIES));
}
public HiveConf() {
super();
initialize(this.getClass());
}
public HiveConf(Class<?> cls) {
super();
initialize(cls);
}
public HiveConf(Configuration other, Class<?> cls) {
super(other);
initialize(cls);
}
/**
* Copy constructor
*/
public HiveConf(HiveConf other) {
super(other);
hiveJar = other.hiveJar;
auxJars = other.auxJars;
isSparkConfigUpdated = other.isSparkConfigUpdated;
origProp = (Properties)other.origProp.clone();
restrictList.addAll(other.restrictList);
hiddenSet.addAll(other.hiddenSet);
modWhiteListPattern = other.modWhiteListPattern;
}
public Properties getAllProperties() {
return getProperties(this);
}
public static Properties getProperties(Configuration conf) {
Iterator<Map.Entry<String, String>> iter = conf.iterator();
Properties p = new Properties();
while (iter.hasNext()) {
Map.Entry<String, String> e = iter.next();
p.setProperty(e.getKey(), e.getValue());
}
return p;
}
private void initialize(Class<?> cls) {
hiveJar = (new JobConf(cls)).getJar();
// preserve the original configuration
origProp = getAllProperties();
// Overlay the ConfVars. Note that this ignores ConfVars with null values
addResource(getConfVarInputStream());
// Overlay hive-site.xml if it exists
if (hiveSiteURL != null) {
addResource(hiveSiteURL);
}
// if embedded metastore is to be used as per config so far
// then this is considered like the metastore server case
String msUri = this.getVar(HiveConf.ConfVars.METASTOREURIS);
// This is hackery, but having hive-common depend on standalone-metastore is really bad
// because it will pull all of the metastore code into every module. We need to check that
// we aren't using the standalone metastore. If we are, we should treat it the same as a
// remote metastore situation.
if (msUri == null || msUri.isEmpty()) {
msUri = this.get("metastore.thrift.uris");
}
LOG.debug("Found metastore URI of " + msUri);
if(HiveConfUtil.isEmbeddedMetaStore(msUri)){
setLoadMetastoreConfig(true);
}
// load hivemetastore-site.xml if this is metastore and file exists
if (isLoadMetastoreConfig() && hivemetastoreSiteUrl != null) {
addResource(hivemetastoreSiteUrl);
}
// load hiveserver2-site.xml if this is hiveserver2 and file exists
// metastore can be embedded within hiveserver2, in such cases
// the conf params in hiveserver2-site.xml will override whats defined
// in hivemetastore-site.xml
if (isLoadHiveServer2Config() && hiveServer2SiteUrl != null) {
addResource(hiveServer2SiteUrl);
}
String val = this.getVar(HiveConf.ConfVars.HIVE_ADDITIONAL_CONFIG_FILES);
ClassLoader classLoader = Thread.currentThread().getContextClassLoader();
if (val != null && !val.isEmpty()) {
String[] configFiles = val.split(",");
for (String config : configFiles) {
URL configURL = findConfigFile(classLoader, config, true);
if (configURL != null) {
addResource(configURL);
}
}
}
// Overlay the values of any system properties and manual overrides
applySystemProperties();
if ((this.get("hive.metastore.ds.retry.attempts") != null) ||
this.get("hive.metastore.ds.retry.interval") != null) {
LOG.warn("DEPRECATED: hive.metastore.ds.retry.* no longer has any effect. " +
"Use hive.hmshandler.retry.* instead");
}
// if the running class was loaded directly (through eclipse) rather than through a
// jar then this would be needed
if (hiveJar == null) {
hiveJar = this.get(ConfVars.HIVEJAR.varname);
}
if (auxJars == null) {
auxJars = StringUtils.join(FileUtils.getJarFilesByPath(this.get(ConfVars.HIVEAUXJARS.varname), this), ',');
}
if (getBoolVar(ConfVars.METASTORE_SCHEMA_VERIFICATION)) {
setBoolVar(ConfVars.METASTORE_AUTO_CREATE_ALL, false);
}
if (getBoolVar(HiveConf.ConfVars.HIVECONFVALIDATION)) {
List<String> trimmed = new ArrayList<String>();
for (Map.Entry<String,String> entry : this) {
String key = entry.getKey();
if (key == null || !key.startsWith("hive.")) {
continue;
}
ConfVars var = HiveConf.getConfVars(key);
if (var == null) {
var = HiveConf.getConfVars(key.trim());
if (var != null) {
trimmed.add(key);
}
}
if (var == null) {
LOG.warn("HiveConf of name {} does not exist", key);
} else if (!var.isType(entry.getValue())) {
LOG.warn("HiveConf {} expects {} type value", var.varname, var.typeString());
}
}
for (String key : trimmed) {
set(key.trim(), getRaw(key));
unset(key);
}
}
setupSQLStdAuthWhiteList();
// setup list of conf vars that are not allowed to change runtime
setupRestrictList();
hiddenSet.clear();
hiddenSet.addAll(HiveConfUtil.getHiddenSet(this));
setupRSCList();
}
/**
* If the config whitelist param for sql standard authorization is not set, set it up here.
*/
private void setupSQLStdAuthWhiteList() {
String whiteListParamsStr = getVar(ConfVars.HIVE_AUTHORIZATION_SQL_STD_AUTH_CONFIG_WHITELIST);
if (whiteListParamsStr == null || whiteListParamsStr.trim().isEmpty()) {
// set the default configs in whitelist
whiteListParamsStr = getSQLStdAuthDefaultWhiteListPattern();
}
setVar(ConfVars.HIVE_AUTHORIZATION_SQL_STD_AUTH_CONFIG_WHITELIST, whiteListParamsStr);
}
private static String getSQLStdAuthDefaultWhiteListPattern() {
// create the default white list from list of safe config params
// and regex list
String confVarPatternStr = Joiner.on("|").join(convertVarsToRegex(sqlStdAuthSafeVarNames));
String regexPatternStr = Joiner.on("|").join(sqlStdAuthSafeVarNameRegexes);
return regexPatternStr + "|" + confVarPatternStr;
}
/**
* Obtains the local time-zone ID.
*/
public ZoneId getLocalTimeZone() {
String timeZoneStr = getVar(ConfVars.HIVE_LOCAL_TIME_ZONE);
return TimestampTZUtil.parseTimeZone(timeZoneStr);
}
/**
* @param paramList list of parameter strings
* @return list of parameter strings with "." replaced by "\."
*/
private static String[] convertVarsToRegex(String[] paramList) {
String[] regexes = new String[paramList.length];
for(int i=0; i<paramList.length; i++) {
regexes[i] = paramList[i].replace(".", "\\." );
}
return regexes;
}
/**
* Default list of modifiable config parameters for sql standard authorization
* For internal use only.
*/
private static final String [] sqlStdAuthSafeVarNames = new String [] {
ConfVars.AGGR_JOIN_TRANSPOSE.varname,
ConfVars.BYTESPERREDUCER.varname,
ConfVars.CLIENT_STATS_COUNTERS.varname,
ConfVars.DEFAULTPARTITIONNAME.varname,
ConfVars.DROPIGNORESNONEXISTENT.varname,
ConfVars.HIVECOUNTERGROUP.varname,
ConfVars.HIVEDEFAULTMANAGEDFILEFORMAT.varname,
ConfVars.HIVEENFORCEBUCKETMAPJOIN.varname,
ConfVars.HIVEENFORCESORTMERGEBUCKETMAPJOIN.varname,
ConfVars.HIVEEXPREVALUATIONCACHE.varname,
ConfVars.HIVEQUERYRESULTFILEFORMAT.varname,
ConfVars.HIVEHASHTABLELOADFACTOR.varname,
ConfVars.HIVEHASHTABLETHRESHOLD.varname,
ConfVars.HIVEIGNOREMAPJOINHINT.varname,
ConfVars.HIVELIMITMAXROWSIZE.varname,
ConfVars.HIVEMAPREDMODE.varname,
ConfVars.HIVEMAPSIDEAGGREGATE.varname,
ConfVars.HIVEOPTIMIZEMETADATAQUERIES.varname,
ConfVars.HIVEROWOFFSET.varname,
ConfVars.HIVEVARIABLESUBSTITUTE.varname,
ConfVars.HIVEVARIABLESUBSTITUTEDEPTH.varname,
ConfVars.HIVE_AUTOGEN_COLUMNALIAS_PREFIX_INCLUDEFUNCNAME.varname,
ConfVars.HIVE_AUTOGEN_COLUMNALIAS_PREFIX_LABEL.varname,
ConfVars.HIVE_CHECK_CROSS_PRODUCT.varname,
ConfVars.HIVE_CLI_TEZ_SESSION_ASYNC.varname,
ConfVars.HIVE_COMPAT.varname,
ConfVars.HIVE_DISPLAY_PARTITION_COLUMNS_SEPARATELY.varname,
ConfVars.HIVE_ERROR_ON_EMPTY_PARTITION.varname,
ConfVars.HIVE_EXECUTION_ENGINE.varname,
ConfVars.HIVE_EXEC_COPYFILE_MAXSIZE.varname,
ConfVars.HIVE_EXIM_URI_SCHEME_WL.varname,
ConfVars.HIVE_FILE_MAX_FOOTER.varname,
ConfVars.HIVE_INSERT_INTO_MULTILEVEL_DIRS.varname,
ConfVars.HIVE_LOCALIZE_RESOURCE_NUM_WAIT_ATTEMPTS.varname,
ConfVars.HIVE_MULTI_INSERT_MOVE_TASKS_SHARE_DEPENDENCIES.varname,
ConfVars.HIVE_QUERY_RESULTS_CACHE_ENABLED.varname,
ConfVars.HIVE_QUERY_RESULTS_CACHE_WAIT_FOR_PENDING_RESULTS.varname,
ConfVars.HIVE_QUOTEDID_SUPPORT.varname,
ConfVars.HIVE_RESULTSET_USE_UNIQUE_COLUMN_NAMES.varname,
ConfVars.HIVE_STATS_COLLECT_PART_LEVEL_STATS.varname,
ConfVars.HIVE_SCHEMA_EVOLUTION.varname,
ConfVars.HIVE_SERVER2_LOGGING_OPERATION_LEVEL.varname,
ConfVars.HIVE_SERVER2_THRIFT_RESULTSET_SERIALIZE_IN_TASKS.varname,
ConfVars.HIVE_SUPPORT_SPECICAL_CHARACTERS_IN_TABLE_NAMES.varname,
ConfVars.JOB_DEBUG_CAPTURE_STACKTRACES.varname,
ConfVars.JOB_DEBUG_TIMEOUT.varname,
ConfVars.LLAP_IO_ENABLED.varname,
ConfVars.LLAP_IO_USE_FILEID_PATH.varname,
ConfVars.LLAP_DAEMON_SERVICE_HOSTS.varname,
ConfVars.LLAP_EXECUTION_MODE.varname,
ConfVars.LLAP_AUTO_ALLOW_UBER.varname,
ConfVars.LLAP_AUTO_ENFORCE_TREE.varname,
ConfVars.LLAP_AUTO_ENFORCE_VECTORIZED.varname,
ConfVars.LLAP_AUTO_ENFORCE_STATS.varname,
ConfVars.LLAP_AUTO_MAX_INPUT.varname,
ConfVars.LLAP_AUTO_MAX_OUTPUT.varname,
ConfVars.LLAP_SKIP_COMPILE_UDF_CHECK.varname,
ConfVars.LLAP_CLIENT_CONSISTENT_SPLITS.varname,
ConfVars.LLAP_ENABLE_GRACE_JOIN_IN_LLAP.varname,
ConfVars.LLAP_ALLOW_PERMANENT_FNS.varname,
ConfVars.MAXCREATEDFILES.varname,
ConfVars.MAXREDUCERS.varname,
ConfVars.NWAYJOINREORDER.varname,
ConfVars.OUTPUT_FILE_EXTENSION.varname,
ConfVars.SHOW_JOB_FAIL_DEBUG_INFO.varname,
ConfVars.TASKLOG_DEBUG_TIMEOUT.varname,
ConfVars.HIVEQUERYID.varname,
ConfVars.HIVEQUERYTAG.varname,
};
/**
* Default list of regexes for config parameters that are modifiable with
* sql standard authorization enabled
*/
static final String [] sqlStdAuthSafeVarNameRegexes = new String [] {
"hive\\.auto\\..*",
"hive\\.cbo\\..*",
"hive\\.convert\\..*",
"hive\\.druid\\..*",
"hive\\.exec\\.dynamic\\.partition.*",
"hive\\.exec\\.max\\.dynamic\\.partitions.*",
"hive\\.exec\\.compress\\..*",
"hive\\.exec\\.infer\\..*",
"hive\\.exec\\.mode.local\\..*",
"hive\\.exec\\.orc\\..*",
"hive\\.exec\\.parallel.*",
"hive\\.explain\\..*",
"hive\\.fetch.task\\..*",
"hive\\.groupby\\..*",
"hive\\.hbase\\..*",
"hive\\.index\\..*",
"hive\\.index\\..*",
"hive\\.intermediate\\..*",
"hive\\.jdbc\\..*",
"hive\\.join\\..*",
"hive\\.limit\\..*",
"hive\\.log\\..*",
"hive\\.mapjoin\\..*",
"hive\\.merge\\..*",
"hive\\.optimize\\..*",
"hive\\.orc\\..*",
"hive\\.outerjoin\\..*",
"hive\\.parquet\\..*",
"hive\\.ppd\\..*",
"hive\\.prewarm\\..*",
"hive\\.server2\\.thrift\\.resultset\\.default\\.fetch\\.size",
"hive\\.server2\\.proxy\\.user",
"hive\\.skewjoin\\..*",
"hive\\.smbjoin\\..*",
"hive\\.stats\\..*",
"hive\\.strict\\..*",
"hive\\.tez\\..*",
"hive\\.vectorized\\..*",
"fs\\.defaultFS",
"ssl\\.client\\.truststore\\.location",
"distcp\\.atomic",
"distcp\\.ignore\\.failures",
"distcp\\.preserve\\.status",
"distcp\\.preserve\\.rawxattrs",
"distcp\\.sync\\.folders",
"distcp\\.delete\\.missing\\.source",
"distcp\\.keystore\\.resource",
"distcp\\.liststatus\\.threads",
"distcp\\.max\\.maps",
"distcp\\.copy\\.strategy",
"distcp\\.skip\\.crc",
"distcp\\.copy\\.overwrite",
"distcp\\.copy\\.append",
"distcp\\.map\\.bandwidth\\.mb",
"distcp\\.dynamic\\..*",
"distcp\\.meta\\.folder",
"distcp\\.copy\\.listing\\.class",
"distcp\\.filters\\.class",
"distcp\\.options\\.skipcrccheck",
"distcp\\.options\\.m",
"distcp\\.options\\.numListstatusThreads",
"distcp\\.options\\.mapredSslConf",
"distcp\\.options\\.bandwidth",
"distcp\\.options\\.overwrite",
"distcp\\.options\\.strategy",
"distcp\\.options\\.i",
"distcp\\.options\\.p.*",
"distcp\\.options\\.update",
"distcp\\.options\\.delete",
"mapred\\.map\\..*",
"mapred\\.reduce\\..*",
"mapred\\.output\\.compression\\.codec",
"mapred\\.job\\.queue\\.name",
"mapred\\.output\\.compression\\.type",
"mapred\\.min\\.split\\.size",
"mapreduce\\.job\\.reduce\\.slowstart\\.completedmaps",
"mapreduce\\.job\\.queuename",
"mapreduce\\.job\\.tags",
"mapreduce\\.input\\.fileinputformat\\.split\\.minsize",
"mapreduce\\.map\\..*",
"mapreduce\\.reduce\\..*",
"mapreduce\\.output\\.fileoutputformat\\.compress\\.codec",
"mapreduce\\.output\\.fileoutputformat\\.compress\\.type",
"oozie\\..*",
"tez\\.am\\..*",
"tez\\.task\\..*",
"tez\\.runtime\\..*",
"tez\\.queue\\.name",
};
//Take care of conf overrides.
//Includes values in ConfVars as well as underlying configuration properties (ie, hadoop)
public static final Map<String, String> overrides = new HashMap<String, String>();
/**
* Apply system properties to this object if the property name is defined in ConfVars
* and the value is non-null and not an empty string.
*/
private void applySystemProperties() {
Map<String, String> systemProperties = getConfSystemProperties();
for (Entry<String, String> systemProperty : systemProperties.entrySet()) {
this.set(systemProperty.getKey(), systemProperty.getValue());
}
}
/**
* This method returns a mapping from config variable name to its value for all config variables
* which have been set using System properties
*/
public static Map<String, String> getConfSystemProperties() {
Map<String, String> systemProperties = new HashMap<String, String>();
for (ConfVars oneVar : ConfVars.values()) {
if (System.getProperty(oneVar.varname) != null) {
if (System.getProperty(oneVar.varname).length() > 0) {
systemProperties.put(oneVar.varname, System.getProperty(oneVar.varname));
}
}
}
for (Map.Entry<String, String> oneVar : overrides.entrySet()) {
if (overrides.get(oneVar.getKey()) != null) {
if (overrides.get(oneVar.getKey()).length() > 0) {
systemProperties.put(oneVar.getKey(), oneVar.getValue());
}
}
}
return systemProperties;
}
/**
* Overlays ConfVar properties with non-null values
*/
private static void applyDefaultNonNullConfVars(Configuration conf) {
for (ConfVars var : ConfVars.values()) {
String defaultValue = var.getDefaultValue();
if (defaultValue == null) {
// Don't override ConfVars with null values
continue;
}
conf.set(var.varname, defaultValue);
}
}
public Properties getChangedProperties() {
Properties ret = new Properties();
Properties newProp = getAllProperties();
for (Object one : newProp.keySet()) {
String oneProp = (String) one;
String oldValue = origProp.getProperty(oneProp);
if (!StringUtils.equals(oldValue, newProp.getProperty(oneProp))) {
ret.setProperty(oneProp, newProp.getProperty(oneProp));
}
}
return (ret);
}
public String getJar() {
return hiveJar;
}
/**
* @return the auxJars
*/
public String getAuxJars() {
return auxJars;
}
/**
* Set the auxiliary jars. Used for unit tests only.
* @param auxJars the auxJars to set.
*/
public void setAuxJars(String auxJars) {
this.auxJars = auxJars;
setVar(this, ConfVars.HIVEAUXJARS, auxJars);
}
public URL getHiveDefaultLocation() {
return hiveDefaultURL;
}
public static void setHiveSiteLocation(URL location) {
hiveSiteURL = location;
}
public static void setHivemetastoreSiteUrl(URL location) {
hivemetastoreSiteUrl = location;
}
public static URL getHiveSiteLocation() {
return hiveSiteURL;
}
public static URL getMetastoreSiteLocation() {
return hivemetastoreSiteUrl;
}
public static URL getHiveServer2SiteLocation() {
return hiveServer2SiteUrl;
}
/**
* @return the user name set in hadoop.job.ugi param or the current user from System
* @throws IOException
*/
public String getUser() throws IOException {
try {
UserGroupInformation ugi = Utils.getUGI();
return ugi.getUserName();
} catch (LoginException le) {
throw new IOException(le);
}
}
public static String getColumnInternalName(int pos) {
return "_col" + pos;
}
public static int getPositionFromInternalName(String internalName) {
Pattern internalPattern = Pattern.compile("_col([0-9]+)");
Matcher m = internalPattern.matcher(internalName);
if (!m.matches()){
return -1;
} else {
return Integer.parseInt(m.group(1));
}
}
/**
* Append comma separated list of config vars to the restrict List
* @param restrictListStr
*/
public void addToRestrictList(String restrictListStr) {
if (restrictListStr == null) {
return;
}
String oldList = this.getVar(ConfVars.HIVE_CONF_RESTRICTED_LIST);
if (oldList == null || oldList.isEmpty()) {
this.setVar(ConfVars.HIVE_CONF_RESTRICTED_LIST, restrictListStr);
} else {
this.setVar(ConfVars.HIVE_CONF_RESTRICTED_LIST, oldList + "," + restrictListStr);
}
setupRestrictList();
}
/**
* Set white list of parameters that are allowed to be modified
*
* @param paramNameRegex
*/
@LimitedPrivate(value = { "Currently only for use by HiveAuthorizer" })
public void setModifiableWhiteListRegex(String paramNameRegex) {
if (paramNameRegex == null) {
return;
}
modWhiteListPattern = Pattern.compile(paramNameRegex);
}
/**
* Add the HIVE_CONF_RESTRICTED_LIST values to restrictList,
* including HIVE_CONF_RESTRICTED_LIST itself
*/
private void setupRestrictList() {
String restrictListStr = this.getVar(ConfVars.HIVE_CONF_RESTRICTED_LIST);
restrictList.clear();
if (restrictListStr != null) {
for (String entry : restrictListStr.split(",")) {
restrictList.add(entry.trim());
}
}
String internalVariableListStr = this.getVar(ConfVars.HIVE_CONF_INTERNAL_VARIABLE_LIST);
if (internalVariableListStr != null) {
for (String entry : internalVariableListStr.split(",")) {
restrictList.add(entry.trim());
}
}
restrictList.add(ConfVars.HIVE_IN_TEST.varname);
restrictList.add(ConfVars.HIVE_CONF_RESTRICTED_LIST.varname);
restrictList.add(ConfVars.HIVE_CONF_HIDDEN_LIST.varname);
restrictList.add(ConfVars.HIVE_CONF_INTERNAL_VARIABLE_LIST.varname);
restrictList.add(ConfVars.HIVE_SPARK_RSC_CONF_LIST.varname);
}
private void setupRSCList() {
rscList.clear();
String vars = this.getVar(ConfVars.HIVE_SPARK_RSC_CONF_LIST);
if (vars != null) {
for (String var : vars.split(",")) {
rscList.add(var.trim());
}
}
}
/**
* Strips hidden config entries from configuration
*/
public void stripHiddenConfigurations(Configuration conf) {
HiveConfUtil.stripConfigurations(conf, hiddenSet);
}
/**
* @return true if HS2 webui is enabled
*/
public boolean isWebUiEnabled() {
return this.getIntVar(ConfVars.HIVE_SERVER2_WEBUI_PORT) != 0;
}
/**
* @return true if HS2 webui query-info cache is enabled
*/
public boolean isWebUiQueryInfoCacheEnabled() {
return isWebUiEnabled() && this.getIntVar(ConfVars.HIVE_SERVER2_WEBUI_MAX_HISTORIC_QUERIES) > 0;
}
/* Dynamic partition pruning is enabled in some or all cases
*/
public boolean isSparkDPPAny() {
return isSparkDPPAny(this);
}
/* Dynamic partition pruning is enabled only for map join
* hive.spark.dynamic.partition.pruning is false and
* hive.spark.dynamic.partition.pruning.map.join.only is true
*/
public boolean isSparkDPPOnlyMapjoin() {
return (!this.getBoolVar(ConfVars.SPARK_DYNAMIC_PARTITION_PRUNING) &&
this.getBoolVar(ConfVars.SPARK_DYNAMIC_PARTITION_PRUNING_MAP_JOIN_ONLY));
}
public static boolean isLoadMetastoreConfig() {
return loadMetastoreConfig;
}
public static void setLoadMetastoreConfig(boolean loadMetastoreConfig) {
HiveConf.loadMetastoreConfig = loadMetastoreConfig;
}
public static boolean isLoadHiveServer2Config() {
return loadHiveServer2Config;
}
public static void setLoadHiveServer2Config(boolean loadHiveServer2Config) {
HiveConf.loadHiveServer2Config = loadHiveServer2Config;
}
public static class StrictChecks {
private static final String NO_LIMIT_MSG = makeMessage(
"Order by-s without limit", ConfVars.HIVE_STRICT_CHECKS_ORDERBY_NO_LIMIT);
public static final String NO_PARTITIONLESS_MSG = makeMessage(
"Queries against partitioned tables without a partition filter",
ConfVars.HIVE_STRICT_CHECKS_NO_PARTITION_FILTER);
private static final String NO_COMPARES_MSG = makeMessage(
"Unsafe compares between different types", ConfVars.HIVE_STRICT_CHECKS_TYPE_SAFETY);
private static final String NO_CARTESIAN_MSG = makeMessage(
"Cartesian products", ConfVars.HIVE_STRICT_CHECKS_CARTESIAN);
private static final String NO_BUCKETING_MSG = makeMessage(
"Load into bucketed tables", ConfVars.HIVE_STRICT_CHECKS_BUCKETING);
private static String makeMessage(String what, ConfVars setting) {
return what + " are disabled for safety reasons. If you know what you are doing, please set "
+ setting.varname + " to false and make sure that " + ConfVars.HIVEMAPREDMODE.varname +
" is not set to 'strict' to proceed. Note that you may get errors or incorrect " +
"results if you make a mistake while using some of the unsafe features.";
}
public static String checkNoLimit(Configuration conf) {
return isAllowed(conf, ConfVars.HIVE_STRICT_CHECKS_ORDERBY_NO_LIMIT) ? null : NO_LIMIT_MSG;
}
public static String checkNoPartitionFilter(Configuration conf) {
return isAllowed(conf, ConfVars.HIVE_STRICT_CHECKS_NO_PARTITION_FILTER)
? null : NO_PARTITIONLESS_MSG;
}
public static String checkTypeSafety(Configuration conf) {
return isAllowed(conf, ConfVars.HIVE_STRICT_CHECKS_TYPE_SAFETY) ? null : NO_COMPARES_MSG;
}
public static String checkCartesian(Configuration conf) {
return isAllowed(conf, ConfVars.HIVE_STRICT_CHECKS_CARTESIAN) ? null : NO_CARTESIAN_MSG;
}
public static String checkBucketing(Configuration conf) {
return isAllowed(conf, ConfVars.HIVE_STRICT_CHECKS_BUCKETING) ? null : NO_BUCKETING_MSG;
}
private static boolean isAllowed(Configuration conf, ConfVars setting) {
String mode = HiveConf.getVar(conf, ConfVars.HIVEMAPREDMODE, (String)null);
return (mode != null) ? !"strict".equals(mode) : !HiveConf.getBoolVar(conf, setting);
}
}
public static String getNonMrEngines() {
String result = StringUtils.EMPTY;
for (String s : ConfVars.HIVE_EXECUTION_ENGINE.getValidStringValues()) {
if ("mr".equals(s)) {
continue;
}
if (!result.isEmpty()) {
result += ", ";
}
result += s;
}
return result;
}
public static String generateMrDeprecationWarning() {
return "Hive-on-MR is deprecated in Hive 2 and may not be available in the future versions. "
+ "Consider using a different execution engine (i.e. " + HiveConf.getNonMrEngines()
+ ") or using Hive 1.X releases.";
}
private static final Object reverseMapLock = new Object();
private static HashMap<String, ConfVars> reverseMap = null;
public static HashMap<String, ConfVars> getOrCreateReverseMap() {
// This should be called rarely enough; for now it's ok to just lock every time.
synchronized (reverseMapLock) {
if (reverseMap != null) {
return reverseMap;
}
}
HashMap<String, ConfVars> vars = new HashMap<>();
for (ConfVars val : ConfVars.values()) {
vars.put(val.varname.toLowerCase(), val);
if (val.altName != null && !val.altName.isEmpty()) {
vars.put(val.altName.toLowerCase(), val);
}
}
synchronized (reverseMapLock) {
if (reverseMap != null) {
return reverseMap;
}
reverseMap = vars;
return reverseMap;
}
}
public void verifyAndSetAll(Map<String, String> overlay) {
for (Entry<String, String> entry : overlay.entrySet()) {
verifyAndSet(entry.getKey(), entry.getValue());
}
}
public Map<String, String> subtree(String string) {
Map<String, String> ret = new HashMap<>();
for (Entry<Object, Object> entry : getProps().entrySet()) {
String key = (String) entry.getKey();
String value = (String) entry.getValue();
if (key.startsWith(string)) {
ret.put(key.substring(string.length() + 1), value);
}
}
return ret;
}
}
|
[
"\"HIVE_CONF_DIR\"",
"\"HIVE_HOME\"",
"\"HADOOP_HOME\"",
"\"HADOOP_PREFIX\""
] |
[] |
[
"HADOOP_PREFIX",
"HADOOP_HOME",
"HIVE_CONF_DIR",
"HIVE_HOME"
] |
[]
|
["HADOOP_PREFIX", "HADOOP_HOME", "HIVE_CONF_DIR", "HIVE_HOME"]
|
java
| 4 | 0 | |
pkg/oc/clusterup/up.go
|
package clusterup
import (
"bytes"
"fmt"
"io"
"io/ioutil"
"net"
"os"
"os/exec"
"path"
"path/filepath"
"runtime"
"strings"
"github.com/docker/docker/api/types/versions"
"github.com/golang/glog"
"github.com/spf13/cobra"
"github.com/spf13/pflag"
kerrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/client-go/rest"
kclientcmd "k8s.io/client-go/tools/clientcmd"
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
aggregatorinstall "k8s.io/kube-aggregator/pkg/apis/apiregistration/install"
"k8s.io/kubernetes/pkg/api/legacyscheme"
kclientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
"k8s.io/kubernetes/pkg/kubectl/cmd/templates"
kcmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util"
"k8s.io/kubernetes/pkg/kubectl/genericclioptions"
userv1client "github.com/openshift/client-go/user/clientset/versioned"
cmdutil "github.com/openshift/origin/pkg/cmd/util"
"github.com/openshift/origin/pkg/cmd/util/variable"
oauthclientinternal "github.com/openshift/origin/pkg/oauth/generated/internalclientset"
"github.com/openshift/origin/pkg/oc/clusteradd/components/registry"
"github.com/openshift/origin/pkg/oc/clusteradd/components/service-catalog"
"github.com/openshift/origin/pkg/oc/clusterup/coreinstall/components"
"github.com/openshift/origin/pkg/oc/clusterup/coreinstall/kubeapiserver"
"github.com/openshift/origin/pkg/oc/clusterup/docker/dockerhelper"
"github.com/openshift/origin/pkg/oc/clusterup/docker/errors"
"github.com/openshift/origin/pkg/oc/clusterup/docker/host"
"github.com/openshift/origin/pkg/oc/clusterup/docker/openshift"
"github.com/openshift/origin/pkg/version"
)
const (
// CmdUpRecommendedName is the recommended command name
CmdUpRecommendedName = "up"
initialUser = "developer"
initialPassword = "developer"
initialProjectName = "myproject"
initialProjectDisplay = "My Project"
initialProjectDesc = "Initial developer project"
defaultRedirectClient = "openshift-web-console"
developmentRedirectURI = "https://localhost:9000"
dockerAPIVersion122 = "1.22"
)
var (
cmdUpLong = templates.LongDesc(`
Starts an OpenShift cluster using Docker containers, provisioning a registry, router,
initial templates, and a default project.
This command will attempt to use an existing connection to a Docker daemon. Before running
the command, ensure that you can execute docker commands successfully (i.e. 'docker ps').
By default, the OpenShift cluster will be setup to use a routing suffix that ends in nip.io.
This is to allow dynamic host names to be created for routes. An alternate routing suffix
can be specified using the --routing-suffix flag.
A public hostname can also be specified for the server with the --public-hostname flag.`)
cmdUpExample = templates.Examples(`
# Start OpenShift using a specific public host name
%[1]s --public-hostname=my.address.example.com`)
// defaultImageStreams is the default key for the above imageStreams mapping.
// It should be set during build via -ldflags.
defaultImageStreams string
)
type ClusterUpConfig struct {
ImageTemplate variable.ImageTemplate
ImageTag string
DockerMachine string
SkipRegistryCheck bool
PortForwarding bool
ClusterAdd *cobra.Command
UserEnabledComponents []string
KubeOnly bool
// BaseTempDir is the directory to use as the root for temp directories
// This allows us to bundle all of the cluster-up directories in one spot for easier cleanup and ensures we aren't
// doing crazy thing like dirtying /var on the host (that does weird stuff)
BaseDir string
SpecifiedBaseDir bool
HostName string
UseExistingConfig bool
ServerLogLevel int
ComponentsToEnable *components.Components
HostVolumesDir string
HostConfigDir string
WriteConfig bool
HostDataDir string
UsePorts []int
DNSPort int
ServerIP string
AdditionalIPs []string
UseNsenterMount bool
PublicHostname string
RoutingSuffix string
HostPersistentVolumesDir string
HTTPProxy string
HTTPSProxy string
NoProxy []string
dockerClient dockerhelper.Interface
dockerHelper *dockerhelper.Helper
hostHelper *host.HostHelper
openshiftHelper *openshift.Helper
command *cobra.Command
defaultClientConfig clientcmdapi.Config
isRemoteDocker bool
usingDefaultImages bool
usingDefaultOpenShiftImage bool
pullPolicy string
createdUser bool
genericclioptions.IOStreams
}
func NewClusterUpConfig(streams genericclioptions.IOStreams, clusterAdd *cobra.Command) *ClusterUpConfig {
return &ClusterUpConfig{
UserEnabledComponents: []string{"*"},
UsePorts: openshift.BasePorts,
PortForwarding: defaultPortForwarding(),
DNSPort: openshift.DefaultDNSPort,
ImageTemplate: variable.NewDefaultImageTemplate(),
// We pass cluster add as a command to prevent anyone from ever cheating with their wiring. You either work from flags or
// or you don't work. You cannot add glue of any sort.
ClusterAdd: clusterAdd,
IOStreams: streams,
}
}
// NewCmdUp creates a command that starts OpenShift on Docker with reasonable defaults
func NewCmdUp(name, fullName string, f genericclioptions.RESTClientGetter, streams genericclioptions.IOStreams, clusterAdd *cobra.Command) *cobra.Command {
config := NewClusterUpConfig(streams, clusterAdd)
cmd := &cobra.Command{
Use: name,
Short: "Start OpenShift on Docker with reasonable defaults",
Long: cmdUpLong,
Example: fmt.Sprintf(cmdUpExample, fullName),
Run: func(c *cobra.Command, args []string) {
kcmdutil.CheckErr(config.Complete(f, c))
kcmdutil.CheckErr(config.Validate())
kcmdutil.CheckErr(config.Check())
if err := config.Start(); err != nil {
PrintError(err, streams.ErrOut)
os.Exit(1)
}
},
}
config.Bind(cmd.Flags())
return cmd
}
func (c *ClusterUpConfig) Bind(flags *pflag.FlagSet) {
flags.StringVar(&c.ImageTag, "tag", "", "Specify an explicit version for OpenShift images")
flags.MarkHidden("tag")
flags.StringVar(&c.ImageTemplate.Format, "image", c.ImageTemplate.Format, "Specify the images to use for OpenShift")
flags.BoolVar(&c.SkipRegistryCheck, "skip-registry-check", false, "Skip Docker daemon registry check")
flags.StringVar(&c.PublicHostname, "public-hostname", "", "Public hostname for OpenShift cluster")
flags.StringVar(&c.RoutingSuffix, "routing-suffix", "", "Default suffix for server routes")
flags.StringVar(&c.BaseDir, "base-dir", c.BaseDir, "Directory on Docker host for cluster up configuration")
flags.BoolVar(&c.WriteConfig, "write-config", false, "Write the configuration files into host config dir")
flags.BoolVar(&c.PortForwarding, "forward-ports", c.PortForwarding, "Use Docker port-forwarding to communicate with origin container. Requires 'socat' locally.")
flags.IntVar(&c.ServerLogLevel, "server-loglevel", 0, "Log level for OpenShift server")
flags.StringSliceVar(&c.UserEnabledComponents, "enable", c.UserEnabledComponents, fmt.Sprintf(""+
"A list of components to enable. '*' enables all on-by-default components, 'foo' enables the component "+
"named 'foo', '-foo' disables the component named 'foo'.\nAll components: %s\nDisabled-by-default components: %s",
strings.Join(knownComponents.List(), ", "), strings.Join(componentsDisabledByDefault.List(), ", ")))
flags.BoolVar(&c.KubeOnly, "kube-only", c.KubeOnly, "Only install Kubernetes, no OpenShift apiserver or controllers. Alpha, for development only. Can result in an unstable cluster.")
flags.MarkHidden("kube-only")
flags.StringVar(&c.HTTPProxy, "http-proxy", "", "HTTP proxy to use for master and builds")
flags.StringVar(&c.HTTPSProxy, "https-proxy", "", "HTTPS proxy to use for master and builds")
flags.StringArrayVar(&c.NoProxy, "no-proxy", c.NoProxy, "List of hosts or subnets for which a proxy should not be used")
}
var (
knownComponents = sets.NewString(
"centos-imagestreams",
"registry",
"rhel-imagestreams",
"router",
"sample-templates",
"persistent-volumes",
"automation-service-broker",
"service-catalog",
"template-service-broker",
"web-console",
)
componentsDisabledByDefault = sets.NewString(
"automation-service-broker",
"service-catalog",
"template-service-broker")
)
func init() {
switch defaultImageStreams {
case "centos7":
componentsDisabledByDefault.Insert("rhel-imagestreams")
case "rhel7":
componentsDisabledByDefault.Insert("centos-imagestreams")
}
}
func (c *ClusterUpConfig) Complete(f genericclioptions.RESTClientGetter, cmd *cobra.Command) error {
// TODO: remove this when we move to container/apply based component installation
aggregatorinstall.Install(legacyscheme.Scheme)
// Set the ImagePullPolicy field in static pods and components based in whether users specified
// the --tag flag or not.
c.pullPolicy = "Always"
if len(c.ImageTag) > 0 {
c.pullPolicy = "IfNotPresent"
}
glog.V(5).Infof("Using %q as default image pull policy", c.pullPolicy)
// Get the default client config for login
var err error
c.defaultClientConfig, err = f.ToRawKubeConfigLoader().RawConfig()
if err != nil {
if !os.IsNotExist(err) {
return err
}
c.defaultClientConfig = *clientcmdapi.NewConfig()
}
c.command = cmd
c.isRemoteDocker = len(os.Getenv("DOCKER_HOST")) > 0
c.ImageTemplate.Format = variable.Expand(c.ImageTemplate.Format, func(s string) (string, bool) {
if s == "version" {
if len(c.ImageTag) == 0 {
return strings.TrimRight("v"+version.Get().Major+"."+version.Get().Minor, "+"), true
}
return c.ImageTag, true
}
return "", false
}, variable.Identity)
if len(c.BaseDir) == 0 {
c.SpecifiedBaseDir = false
c.BaseDir = "openshift.local.clusterup"
}
if !path.IsAbs(c.BaseDir) {
cwd, err := os.Getwd()
if err != nil {
return err
}
absHostDir, err := cmdutil.MakeAbs(c.BaseDir, cwd)
if err != nil {
return err
}
c.BaseDir = absHostDir
}
if _, err := os.Stat(c.BaseDir); err != nil && os.IsNotExist(err) {
if err := os.MkdirAll(c.BaseDir, os.ModePerm); err != nil {
return fmt.Errorf("unable to create base directory %q: %v", c.BaseDir, err)
}
}
// When users run the cluster up for the first time we store the list of components the cluster up runs with
// into a "components.json" file inside base directory.
// On the second run, we won't allow users to specify new or delete existing components when the base directory
// was already initialized.
if _, err := os.Stat(filepath.Join(c.BaseDir, "components.json")); err == nil {
// If user tries to specify --enabled on non-empty cluster up base dir, give him an error
if !sets.NewString(c.UserEnabledComponents...).Equal(sets.NewString("*")) {
if _, err := os.Stat(filepath.Join(c.BaseDir, "components.json")); err == nil {
return fmt.Errorf("cannot use --enable when the cluster is already initialized, use cluster add instead")
}
}
componentFile, err := os.Open(filepath.Join(c.BaseDir, "components.json"))
if err != nil {
return fmt.Errorf("unable to read components.json file: %v", err)
}
defer componentFile.Close()
c.ComponentsToEnable, err = components.ReadComponentsEnabled(componentFile)
if err != nil {
return fmt.Errorf("unable to parse components.json file: %v", err)
}
} else {
// This is initial cluster up run on empty base dir
c.ComponentsToEnable = components.NewComponentsEnabled()
for _, currComponent := range knownComponents.UnsortedList() {
if isComponentEnabled(currComponent, componentsDisabledByDefault, c.UserEnabledComponents...) {
c.ComponentsToEnable.Add(currComponent)
}
}
// Store enabled components into file so on the second run we know what to enable
componentFile, err := os.Create(filepath.Join(c.BaseDir, "components.json"))
if err != nil {
return fmt.Errorf("unable to write components.json file: %v", err)
}
defer componentFile.Close()
components.WriteComponentsEnabled(componentFile, c.ComponentsToEnable)
}
// Get a Docker client.
// If a Docker machine was specified, make sure that the machine is running.
// Otherwise, use environment variables.
c.printProgress("Getting a Docker client")
client, err := dockerhelper.GetDockerClient()
if err != nil {
return err
}
c.dockerClient = client
// Ensure that the OpenShift Docker image is available.
// If not present, pull it.
// We do this here because the image is used in the next step if running Red Hat docker.
c.printProgress(fmt.Sprintf("Checking if image %s is available", c.openshiftImage()))
if err := c.checkOpenShiftImage(); err != nil {
return err
}
// Check whether the Docker host has the right binaries to use Kubernetes' nsenter mounter
// If not, use a shared volume to mount volumes on OpenShift
if isRedHatDocker, err := c.DockerHelper().IsRedHat(); err == nil && isRedHatDocker {
c.printProgress("Checking type of volume mount")
c.UseNsenterMount, err = c.HostHelper().CanUseNsenterMounter()
if err != nil {
return err
}
}
if err := os.MkdirAll(c.BaseDir, 0755); err != nil {
return err
}
if c.UseNsenterMount {
// This is default path when you run cluster up locally, with local docker daemon
c.HostVolumesDir = path.Join(c.BaseDir, "openshift.local.volumes")
if err := os.MkdirAll(c.HostVolumesDir, 0755); err != nil {
return err
}
} else {
// Snowflake for OSX Docker for Mac
c.HostVolumesDir = c.RemoteDirFor("openshift.local.volumes")
}
c.HostPersistentVolumesDir = path.Join(c.BaseDir, "openshift.local.pv")
if err := os.MkdirAll(c.HostPersistentVolumesDir, 0755); err != nil {
return err
}
c.HostDataDir = path.Join(c.BaseDir, "etcd")
if err := os.MkdirAll(c.HostDataDir, 0755); err != nil {
return err
}
// Ensure that host directories exist.
// If not using the nsenter mounter, create a volume share on the host machine to
// mount OpenShift volumes.
if !c.UseNsenterMount {
c.printProgress("Creating shared mount directory on the remote host")
if err := c.HostHelper().EnsureVolumeUseShareMount(c.HostVolumesDir); err != nil {
return err
}
}
// Determine an IP to use for OpenShift.
// The result is that c.ServerIP will be populated with
// the IP that will be used on the client configuration file.
// The c.ServerIP will be set to a specific IP when:
// 1 - DOCKER_HOST is populated with a particular tcp:// type of address
// 2 - a docker-machine has been specified
// 3 - 127.0.0.1 is not working and an alternate IP has been found
// Otherwise, the default c.ServerIP will be 127.0.0.1 which is what
// will get stored in the client's config file. The reason for this is that
// the client config will not depend on the machine's current IP address which
// could change over time.
//
// c.AdditionalIPs will be populated with additional IPs that should be
// included in the server's certificate. These include any IPs that are currently
// assigned to the Docker host (hostname -I)
// Each IP is tested to ensure that it can be accessed from the current client
c.printProgress("Determining server IP")
c.ServerIP, c.AdditionalIPs, err = c.determineServerIP()
if err != nil {
return err
}
glog.V(3).Infof("Using %q as primary server IP and %q as additional IPs", c.ServerIP, strings.Join(c.AdditionalIPs, ","))
if len(c.RoutingSuffix) == 0 {
c.RoutingSuffix = c.ServerIP + ".nip.io"
}
// this used to be done in the openshift start method, but its mutating state.
if len(c.HTTPProxy) > 0 || len(c.HTTPSProxy) > 0 {
c.updateNoProxy()
}
return nil
}
// Validate validates that required fields in StartConfig have been populated
func (c *ClusterUpConfig) Validate() error {
if c.dockerClient == nil {
return fmt.Errorf("missing dockerClient")
}
return nil
}
func (c *ClusterUpConfig) printProgress(msg string) {
fmt.Fprintf(c.Out, msg+" ...\n")
}
// Check is a spot to do NON-MUTATING, preflight checks. Over time, we should try to move our non-mutating checks out of
// Complete and into Check.
func (c *ClusterUpConfig) Check() error {
// Check for an OpenShift container. If one exists and is running, exit.
// If one exists but not running, delete it.
c.printProgress("Checking if OpenShift is already running")
if err := checkExistingOpenShiftContainer(c.DockerHelper()); err != nil {
return err
}
// Docker checks
c.printProgress(fmt.Sprintf("Checking for supported Docker version (=>%s)", dockerAPIVersion122))
ver, err := c.DockerHelper().APIVersion()
if err != nil {
return err
}
if versions.LessThan(ver.APIVersion, dockerAPIVersion122) {
return fmt.Errorf("unsupported Docker version %s, need at least %s", ver.APIVersion, dockerAPIVersion122)
}
if !c.SkipRegistryCheck {
c.printProgress("Checking if insecured registry is configured properly in Docker")
if err := c.checkDockerInsecureRegistry(); err != nil {
return err
}
}
// Networking checks
if c.PortForwarding {
c.printProgress("Checking prerequisites for port forwarding")
if err := checkPortForwardingPrerequisites(); err != nil {
return err
}
if err := openshift.CheckSocat(); err != nil {
return err
}
}
c.printProgress("Checking if required ports are available")
if err := c.checkAvailablePorts(); err != nil {
return err
}
// OpenShift checks
c.printProgress("Checking if OpenShift client is configured properly")
if err := c.checkOpenShiftClient(); err != nil {
return err
}
// Ensure that the OpenShift Docker image is available.
// If not present, pull it.
c.printProgress(fmt.Sprintf("Checking if image %s is available", c.openshiftImage()))
if err := c.checkOpenShiftImage(); err != nil {
return err
}
return nil
}
// Start runs the start tasks ensuring that they are executed in sequence
func (c *ClusterUpConfig) Start() error {
fmt.Fprintf(c.Out, "Starting OpenShift using %s ...\n", c.openshiftImage())
if c.PortForwarding {
if err := c.OpenShiftHelper().StartSocatTunnel(c.ServerIP); err != nil {
return err
}
}
if err := c.StartSelfHosted(c.Out); err != nil {
return err
}
if c.WriteConfig {
return nil
}
if err := c.PostClusterStartupMutations(c.Out); err != nil {
return err
}
// if we're only supposed to install kube, only install kube. Maybe later we'll add back components.
if c.KubeOnly {
c.printProgress("Server Information")
c.serverInfo(c.Out)
return nil
}
// Add default redirect URIs to an OAuthClient to enable local web-console development.
c.printProgress("Adding default OAuthClient redirect URIs")
if err := c.ensureDefaultRedirectURIs(c.Out); err != nil {
return err
}
if len(c.ComponentsToEnable.Enabled) > 0 {
args := append([]string{}, "--image="+c.ImageTemplate.Format)
args = append(args, "--base-dir="+c.BaseDir)
if len(c.ImageTag) > 0 {
args = append(args, "--tag="+c.ImageTag)
}
args = append(args, c.ComponentsToEnable.Enabled...)
if err := c.ClusterAdd.ParseFlags(args); err != nil {
return err
}
glog.V(2).Infof("oc cluster add %v", args)
if err := c.ClusterAdd.RunE(c.ClusterAdd, args); err != nil {
return err
}
}
if c.ShouldCreateUser() {
// Login with an initial default user
c.printProgress("Login to server")
if err := c.login(c.IOStreams); err != nil {
return err
}
c.createdUser = true
// Create an initial project
c.printProgress(fmt.Sprintf("Creating initial project %q", initialProjectName))
if err := c.createProject(c.Out); err != nil {
return err
}
}
c.printProgress("Server Information")
c.serverInfo(c.Out)
return nil
}
func defaultPortForwarding() bool {
// Defaults to true if running on Mac, with no DOCKER_HOST defined
return runtime.GOOS == "darwin" && len(os.Getenv("DOCKER_HOST")) == 0
}
// checkOpenShiftClient ensures that the client can be configured
// for the new server
func (c *ClusterUpConfig) checkOpenShiftClient() error {
kubeConfig := os.Getenv("KUBECONFIG")
if len(kubeConfig) == 0 {
return nil
}
// if you're trying to use the kubeconfig into a subdirectory of the basedir, you're probably using a KUBECONFIG
// location that is going to overwrite a "real" kubeconfig, usually admin.kubeconfig which will break every other component
// relying on it being a full power kubeconfig
kubeConfigDir := filepath.Dir(kubeConfig)
cwd, err := os.Getwd()
if err != nil {
return err
}
absKubeConfigDir, err := cmdutil.MakeAbs(kubeConfigDir, cwd)
if err != nil {
return err
}
if strings.HasPrefix(absKubeConfigDir, c.BaseDir+"/") {
return fmt.Errorf("cannot choose kubeconfig in subdirectory of the --base-dir: %q", kubeConfig)
}
var (
kubeConfigError error
f *os.File
)
_, err = os.Stat(kubeConfig)
switch {
case os.IsNotExist(err):
err = os.MkdirAll(filepath.Dir(kubeConfig), 0755)
if err != nil {
kubeConfigError = fmt.Errorf("cannot make directory: %v", err)
break
}
f, err = os.Create(kubeConfig)
if err != nil {
kubeConfigError = fmt.Errorf("cannot create file: %v", err)
break
}
f.Close()
case err == nil:
f, err = os.OpenFile(kubeConfig, os.O_RDWR, 0644)
if err != nil {
kubeConfigError = fmt.Errorf("cannot open %s for write: %v", kubeConfig, err)
break
}
f.Close()
default:
kubeConfigError = fmt.Errorf("cannot access %s: %v", kubeConfig, err)
}
if kubeConfigError != nil {
return errors.ErrKubeConfigNotWriteable(kubeConfig, kubeConfigError)
}
return nil
}
// GetDockerClient obtains a new Docker client from the environment or
// from a Docker machine, starting it if necessary
func (c *ClusterUpConfig) GetDockerClient() dockerhelper.Interface {
return c.dockerClient
}
// checkExistingOpenShiftContainer checks the state of an OpenShift container.
// If one is already running, it throws an error.
// If one exists, it removes it so a new one can be created.
func checkExistingOpenShiftContainer(dockerHelper *dockerhelper.Helper) error {
container, running, err := dockerHelper.GetContainerState(openshift.ContainerName)
if err != nil {
return errors.NewError("unexpected error while checking OpenShift container state").WithCause(err)
}
if running {
return errors.NewError("OpenShift is already running").WithSolution("To start OpenShift again, stop the current cluster:\n$ %s\n", "oc cluster down")
}
if container != nil {
err = dockerHelper.RemoveContainer(openshift.ContainerName)
if err != nil {
return errors.NewError("cannot delete existing OpenShift container").WithCause(err)
}
glog.V(2).Info("Deleted existing OpenShift container")
}
return nil
}
// checkOpenShiftImage checks whether the OpenShift image exists.
// If not it tells the Docker daemon to pull it.
func (c *ClusterUpConfig) checkOpenShiftImage() error {
if err := c.DockerHelper().CheckAndPull(c.openshiftImage(), c.Out); err != nil {
return err
}
if err := c.DockerHelper().CheckAndPull(c.cliImage(), c.Out); err != nil {
return err
}
if err := c.DockerHelper().CheckAndPull(c.nodeImage(), c.Out); err != nil {
return err
}
return nil
}
// checkDockerInsecureRegistry checks to see if the Docker daemon has an appropriate insecure registry argument set so that our services can access the registry
func (c *ClusterUpConfig) checkDockerInsecureRegistry() error {
configured, hasEntries, err := c.DockerHelper().InsecureRegistryIsConfigured(openshift.DefaultSvcCIDR)
if err != nil {
return err
}
if !configured {
if hasEntries {
return errors.ErrInvalidInsecureRegistryArgument()
}
return errors.ErrNoInsecureRegistryArgument()
}
return nil
}
// checkPortForwardingPrerequisites checks that socat is installed when port forwarding is enabled
// Socat needs to be installed manually on MacOS
func checkPortForwardingPrerequisites() error {
commandOut, err := exec.Command("socat", "-V").CombinedOutput()
if err != nil {
glog.V(2).Infof("Error from socat command execution: %v\n%s", err, string(commandOut))
glog.Warning("Port forwarding requires socat command line utility." +
"Cluster public ip may not be reachable. Please make sure socat installed in your operating system.")
}
return nil
}
// ensureDefaultRedirectURIs merges a default URL to an auth client's RedirectURIs array
func (c *ClusterUpConfig) ensureDefaultRedirectURIs(out io.Writer) error {
restConfig, err := c.RESTConfig()
if err != nil {
return err
}
oauthClient, err := oauthclientinternal.NewForConfig(restConfig)
if err != nil {
return err
}
webConsoleOAuth, err := oauthClient.Oauth().OAuthClients().Get(defaultRedirectClient, metav1.GetOptions{})
if err != nil {
if kerrors.IsNotFound(err) {
fmt.Fprintf(out, "Unable to find OAuthClient %q\n", defaultRedirectClient)
return nil
}
// announce fetch error without interrupting remaining tasks
suggestedCmd := fmt.Sprintf("oc patch %s/%s -p '{%q:[%q]}'", "oauthclient", defaultRedirectClient, "redirectURIs", developmentRedirectURI)
errMsg := fmt.Sprintf("Unable to fetch OAuthClient %q.\nTo manually add a development redirect URI, run %q\n", defaultRedirectClient, suggestedCmd)
fmt.Fprintf(out, "%s\n", errMsg)
return nil
}
// ensure the default redirect URI is not already present
redirects := sets.NewString(webConsoleOAuth.RedirectURIs...)
if redirects.Has(developmentRedirectURI) {
return nil
}
webConsoleOAuth.RedirectURIs = append(webConsoleOAuth.RedirectURIs, developmentRedirectURI)
_, err = oauthClient.Oauth().OAuthClients().Update(webConsoleOAuth)
if err != nil {
// announce error without interrupting remaining tasks
suggestedCmd := fmt.Sprintf("oc patch %s/%s -p '{%q:[%q]}'", "oauthclient", defaultRedirectClient, "redirectURIs", developmentRedirectURI)
fmt.Fprintf(out, fmt.Sprintf("Unable to add development redirect URI to the %q OAuthClient.\nTo manually add it, run %q\n", defaultRedirectClient, suggestedCmd))
return nil
}
return nil
}
// checkAvailablePorts ensures that ports used by OpenShift are available on the Docker host
func (c *ClusterUpConfig) checkAvailablePorts() error {
err := c.OpenShiftHelper().TestPorts(openshift.AllPorts)
if err == nil {
return nil
}
if !openshift.IsPortsNotAvailableErr(err) {
return err
}
unavailable := sets.NewInt(openshift.UnavailablePorts(err)...)
if unavailable.HasAny(openshift.BasePorts...) {
return errors.NewError("a port needed by OpenShift is not available").WithCause(err)
}
if unavailable.Has(openshift.DefaultDNSPort) {
return errors.NewError(fmt.Sprintf("DNS port %d is not available", openshift.DefaultDNSPort))
}
for _, port := range openshift.RouterPorts {
if unavailable.Has(port) {
glog.Warningf("Port %d is already in use and may cause routing issues for applications.\n", port)
}
}
return nil
}
// determineServerIP gets an appropriate IP address to communicate with the OpenShift server
func (c *ClusterUpConfig) determineServerIP() (string, []string, error) {
ip, err := c.determineIP()
if err != nil {
return "", nil, errors.NewError("cannot determine a server IP to use").WithCause(err)
}
serverIP := ip
additionalIPs, err := c.determineAdditionalIPs(c.ServerIP)
if err != nil {
return "", nil, errors.NewError("cannot determine additional IPs").WithCause(err)
}
return serverIP, additionalIPs, nil
}
// updateNoProxy will add some default values to the NO_PROXY setting if they are not present
func (c *ClusterUpConfig) updateNoProxy() {
values := []string{"127.0.0.1", c.ServerIP, "localhost", service_catalog.ServiceCatalogServiceIP, registry.RegistryServiceClusterIP}
ipFromServer, err := c.OpenShiftHelper().ServerIP()
if err == nil {
values = append(values, ipFromServer)
}
noProxySet := sets.NewString(c.NoProxy...)
for _, v := range values {
if !noProxySet.Has(v) {
noProxySet.Insert(v)
c.NoProxy = append(c.NoProxy, v)
}
}
}
func (c *ClusterUpConfig) PostClusterStartupMutations(out io.Writer) error {
restConfig, err := c.RESTConfig()
if err != nil {
return err
}
kClient, err := kclientset.NewForConfig(restConfig)
if err != nil {
return err
}
// Remove any duplicate nodes
if err := c.OpenShiftHelper().CheckNodes(kClient); err != nil {
return err
}
return nil
}
func (c *ClusterUpConfig) imageFormat() string {
return c.ImageTemplate.Format
}
// Login logs into the new server and sets up a default user and project
func (c *ClusterUpConfig) login(streams genericclioptions.IOStreams) error {
server := c.OpenShiftHelper().Master(c.ServerIP)
return openshift.Login(initialUser, initialPassword, server, c.GetKubeAPIServerConfigDir(), c.defaultClientConfig, c.command, streams)
}
// createProject creates a new project for the current user
func (c *ClusterUpConfig) createProject(out io.Writer) error {
f, err := openshift.LoggedInUserFactory()
if err != nil {
return errors.NewError("cannot get logged in user client").WithCause(err)
}
return openshift.CreateProject(f, initialProjectName, initialProjectDisplay, initialProjectDesc, "oc", out)
}
// serverInfo displays server information after a successful start
func (c *ClusterUpConfig) serverInfo(out io.Writer) {
masterURL := fmt.Sprintf("https://%s:8443", c.GetPublicHostName())
msg := fmt.Sprintf("OpenShift server started.\n\n"+
"The server is accessible via web console at:\n"+
" %s\n\n", masterURL)
if c.createdUser {
msg += fmt.Sprintf("You are logged in as:\n"+
" User: %s\n"+
" Password: <any value>\n\n", initialUser)
msg += "To login as administrator:\n" +
" oc login -u system:admin\n\n"
}
msg += c.checkProxySettings()
fmt.Fprintf(out, msg)
}
// checkProxySettings compares proxy settings specified for cluster up
// and those on the Docker daemon and generates appropriate warnings.
func (c *ClusterUpConfig) checkProxySettings() string {
warnings := []string{}
dockerHTTPProxy, dockerHTTPSProxy, dockerNoProxy, err := c.DockerHelper().GetDockerProxySettings()
if err != nil {
return "Unexpected error: " + err.Error()
}
// Check HTTP proxy
if len(c.HTTPProxy) > 0 && len(dockerHTTPProxy) == 0 {
warnings = append(warnings, "You specified an HTTP proxy for cluster up, but one is not configured for the Docker daemon")
} else if len(c.HTTPProxy) == 0 && len(dockerHTTPProxy) > 0 {
warnings = append(warnings, fmt.Sprintf("An HTTP proxy (%s) is configured for the Docker daemon, but you did not specify one for cluster up", dockerHTTPProxy))
} else if c.HTTPProxy != dockerHTTPProxy {
warnings = append(warnings, fmt.Sprintf("The HTTP proxy configured for the Docker daemon (%s) is not the same one you specified for cluster up", dockerHTTPProxy))
}
// Check HTTPS proxy
if len(c.HTTPSProxy) > 0 && len(dockerHTTPSProxy) == 0 {
warnings = append(warnings, "You specified an HTTPS proxy for cluster up, but one is not configured for the Docker daemon")
} else if len(c.HTTPSProxy) == 0 && len(dockerHTTPSProxy) > 0 {
warnings = append(warnings, fmt.Sprintf("An HTTPS proxy (%s) is configured for the Docker daemon, but you did not specify one for cluster up", dockerHTTPSProxy))
} else if c.HTTPSProxy != dockerHTTPSProxy {
warnings = append(warnings, fmt.Sprintf("The HTTPS proxy configured for the Docker daemon (%s) is not the same one you specified for cluster up", dockerHTTPSProxy))
}
if len(dockerHTTPProxy) > 0 || len(dockerHTTPSProxy) > 0 {
dockerNoProxyList := strings.Split(dockerNoProxy, ",")
dockerNoProxySet := sets.NewString(dockerNoProxyList...)
if !dockerNoProxySet.Has(registry.RegistryServiceClusterIP) {
warnings = append(warnings, fmt.Sprintf("A proxy is configured for Docker, however %[1]s is not included in its NO_PROXY list.\n"+
" %[1]s needs to be included in the Docker daemon's NO_PROXY environment variable so pushes to the local OpenShift registry can succeed.", registry.RegistryServiceClusterIP))
}
}
if len(warnings) > 0 {
buf := &bytes.Buffer{}
for _, w := range warnings {
fmt.Fprintf(buf, "WARNING: %s\n", w)
}
return buf.String()
}
return ""
}
// OpenShiftHelper returns a helper object to work with OpenShift on the server
func (c *ClusterUpConfig) OpenShiftHelper() *openshift.Helper {
if c.openshiftHelper == nil {
c.openshiftHelper = openshift.NewHelper(c.DockerHelper(), c.openshiftImage(), openshift.ContainerName)
}
return c.openshiftHelper
}
// HostHelper returns a helper object to check Host configuration
func (c *ClusterUpConfig) HostHelper() *host.HostHelper {
if c.hostHelper == nil {
c.hostHelper = host.NewHostHelper(c.DockerHelper(), c.openshiftImage())
}
return c.hostHelper
}
// DockerHelper returns a helper object to work with the Docker client
func (c *ClusterUpConfig) DockerHelper() *dockerhelper.Helper {
if c.dockerHelper == nil {
c.dockerHelper = dockerhelper.NewHelper(c.dockerClient)
}
return c.dockerHelper
}
func (c *ClusterUpConfig) openshiftImage() string {
return c.ImageTemplate.ExpandOrDie("control-plane")
}
func (c *ClusterUpConfig) hypershiftImage() string {
return c.ImageTemplate.ExpandOrDie("hypershift")
}
func (c *ClusterUpConfig) hyperkubeImage() string {
return c.ImageTemplate.ExpandOrDie("hyperkube")
}
func (c *ClusterUpConfig) cliImage() string {
return c.ImageTemplate.ExpandOrDie("cli")
}
func (c *ClusterUpConfig) nodeImage() string {
return c.ImageTemplate.ExpandOrDie("node")
}
func (c *ClusterUpConfig) determineAdditionalIPs(ip string) ([]string, error) {
additionalIPs := sets.NewString()
serverIPs, err := c.OpenShiftHelper().OtherIPs(ip)
if err != nil {
return nil, errors.NewError("could not determine additional IPs").WithCause(err)
}
additionalIPs.Insert(serverIPs...)
if c.PortForwarding {
localIPs, err := c.localIPs()
if err != nil {
return nil, errors.NewError("could not determine additional local IPs").WithCause(err)
}
additionalIPs.Insert(localIPs...)
}
return additionalIPs.List(), nil
}
func (c *ClusterUpConfig) localIPs() ([]string, error) {
ips := []string{}
devices, err := net.Interfaces()
if err != nil {
return nil, err
}
for _, dev := range devices {
if (dev.Flags&net.FlagUp != 0) && (dev.Flags&net.FlagLoopback == 0) {
addrs, err := dev.Addrs()
if err != nil {
continue
}
for i := range addrs {
if ip, ok := addrs[i].(*net.IPNet); ok {
if ip.IP.To4() != nil {
ips = append(ips, ip.IP.String())
}
}
}
}
}
return ips, nil
}
func (c *ClusterUpConfig) determineIP() (string, error) {
if ip := net.ParseIP(c.PublicHostname); ip != nil && !ip.IsUnspecified() {
fmt.Fprintf(c.Out, "Using public hostname IP %s as the host IP\n", ip)
return ip.String(), nil
}
// If using port-forwarding, use the default loopback address
if c.PortForwarding {
return "127.0.0.1", nil
}
// Try to get the host from the DOCKER_HOST if communicating via tcp
var err error
ip := c.DockerHelper().HostIP()
if ip != "" {
glog.V(2).Infof("Testing Docker host IP (%s)", ip)
if err = c.OpenShiftHelper().TestIP(ip); err == nil {
return ip, nil
}
}
glog.V(2).Infof("Cannot use the Docker host IP(%s): %v", ip, err)
// If IP is not specified, try to use the loopback IP
// This is to default to an ip-agnostic client setup
// where the real IP of the host will not affect client operations
if err = c.OpenShiftHelper().TestIP("127.0.0.1"); err == nil {
return "127.0.0.1", nil
}
// Next, use the the --print-ip output from openshift
ip, err = c.OpenShiftHelper().ServerIP()
if err == nil {
glog.V(2).Infof("Testing openshift --print-ip (%s)", ip)
if err = c.OpenShiftHelper().TestIP(ip); err == nil {
return ip, nil
}
glog.V(2).Infof("OpenShift server ip test failed: %v", err)
}
glog.V(2).Infof("Cannot use OpenShift IP: %v", err)
// Next, try other IPs on Docker host
ips, err := c.OpenShiftHelper().OtherIPs(ip)
if err != nil {
return "", err
}
for i := range ips {
glog.V(2).Infof("Testing additional IP (%s)", ip)
if err = c.OpenShiftHelper().TestIP(ips[i]); err == nil {
return ip, nil
}
glog.V(2).Infof("OpenShift additional ip test failed: %v", err)
}
return "", errors.NewError("cannot determine an IP to use for your server.")
}
// ShouldCreateUser determines whether a user and project should
// be created. If the user provider has been modified in the config, then it should
// not attempt to create a user. Also, even if the user provider has not been
// modified, but data has been initialized, then we should also not create user.
func (c *ClusterUpConfig) ShouldCreateUser() bool {
restClientConfig, err := c.RESTConfig()
if err != nil {
glog.Warningf("error checking user: %v", err)
return true
}
userClient, err := userv1client.NewForConfig(restClientConfig)
if err != nil {
glog.Warningf("error checking user: %v", err)
return true
}
_, err = userClient.UserV1().Users().Get(initialUser, metav1.GetOptions{})
if kerrors.IsNotFound(err) {
return true
}
if err != nil {
glog.Warningf("error checking user: %v", err)
return true
}
return false
}
func (c *ClusterUpConfig) GetKubeAPIServerConfigDir() string {
return path.Join(c.BaseDir, kubeapiserver.KubeAPIServerDirName)
}
func (c *ClusterUpConfig) RESTConfig() (*rest.Config, error) {
clusterAdminKubeConfigBytes, err := c.ClusterAdminKubeConfigBytes()
if err != nil {
return nil, err
}
clusterAdminKubeConfig, err := kclientcmd.RESTConfigFromKubeConfig(clusterAdminKubeConfigBytes)
if err != nil {
return nil, err
}
return clusterAdminKubeConfig, nil
}
func (c *ClusterUpConfig) ClusterAdminKubeConfigBytes() ([]byte, error) {
return ioutil.ReadFile(path.Join(c.GetKubeAPIServerConfigDir(), "admin.kubeconfig"))
}
func (c *ClusterUpConfig) GetPublicHostName() string {
if len(c.PublicHostname) > 0 {
return c.PublicHostname
}
return c.ServerIP
}
func isComponentEnabled(name string, disabledByDefaultComponents sets.String, components ...string) bool {
hasStar := false
for _, ctrl := range components {
if ctrl == name {
return true
}
if ctrl == "-"+name {
return false
}
if ctrl == "*" {
hasStar = true
}
}
// if we get here, there was no explicit choice
if !hasStar {
// nothing on by default
return false
}
if disabledByDefaultComponents.Has(name) {
return false
}
return true
}
|
[
"\"DOCKER_HOST\"",
"\"DOCKER_HOST\"",
"\"KUBECONFIG\""
] |
[] |
[
"DOCKER_HOST",
"KUBECONFIG"
] |
[]
|
["DOCKER_HOST", "KUBECONFIG"]
|
go
| 2 | 0 | |
util/logging.go
|
package util
import (
"fmt"
"log"
"os"
"path/filepath"
"go.uber.org/zap"
"go.uber.org/zap/zapcore"
)
// NewOrionLogger returns an instance of a Zap logger configured with logFlag level, name of orionRuntime, and outputPath
func NewOrionLogger(logFlag string, orionRuntime string, outputPath string) (*zap.Logger, *os.File, error) {
cfg := zap.Config{
Encoding: "json",
Level: zap.NewAtomicLevelAt(zapcore.DebugLevel),
OutputPaths: []string{outputPath},
ErrorOutputPaths: []string{"stderr"},
EncoderConfig: zapcore.EncoderConfig{
MessageKey: "message",
LevelKey: "level",
EncodeLevel: zapcore.CapitalColorLevelEncoder,
TimeKey: "time",
EncodeTime: zapcore.ISO8601TimeEncoder,
CallerKey: "caller",
EncodeCaller: zapcore.ShortCallerEncoder,
},
}
filename := orionRuntime + ".json"
filePath, _ := filepath.Abs(outputPath + "/" + filename)
// Ensure directory path exists and if not create it
if _, err := os.Stat(outputPath); os.IsNotExist(err) {
os.MkdirAll(outputPath, 0700)
}
f, err := os.OpenFile(filePath, os.O_RDWR|os.O_CREATE, 0755)
if err != nil {
if os.IsNotExist(err) {
fmt.Println("[Logging] File does not exist")
}
// return nil, err
log.Fatal(err)
panic("Log failure!")
}
fileEncoder := zapcore.NewJSONEncoder(cfg.EncoderConfig)
consoleEncoder := zapcore.NewConsoleEncoder(cfg.EncoderConfig)
level := zap.InfoLevel
if logFlag == "debug" {
fmt.Println("[Logging] Orion output filepath: " + filePath)
level = zap.DebugLevel
}
core := zapcore.NewTee(
zapcore.NewCore(fileEncoder, zapcore.AddSync(f), level),
zapcore.NewCore(consoleEncoder, zapcore.AddSync(os.Stdout), level),
)
logger := zap.New(core)
zap.ReplaceGlobals(logger)
defer logger.Sync() // flushes buffer, if any
zap.L().Debug("Global logger established")
logger.Debug("Starting logger.")
return logger, f, nil
}
|
[] |
[] |
[] |
[]
|
[]
|
go
| null | null | null |
Django-apiTest/manage.py
|
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'apiTest.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
.gitpod/loan/x/loan/client/cli/query_loan_test.go
|
package cli_test
import (
"fmt"
"testing"
"github.com/cosmos/cosmos-sdk/client/flags"
clitestutil "github.com/cosmos/cosmos-sdk/testutil/cli"
"github.com/stretchr/testify/require"
tmcli "github.com/tendermint/tendermint/libs/cli"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
"github.com/cosmonaut/loan/testutil/network"
"github.com/cosmonaut/loan/x/loan/client/cli"
"github.com/cosmonaut/loan/x/loan/types"
)
func networkWithLoanObjects(t *testing.T, n int) (*network.Network, []types.Loan) {
t.Helper()
cfg := network.DefaultConfig()
state := types.GenesisState{}
require.NoError(t, cfg.Codec.UnmarshalJSON(cfg.GenesisState[types.ModuleName], &state))
for i := 0; i < n; i++ {
state.LoanList = append(state.LoanList, types.Loan{
Id: uint64(i),
})
}
buf, err := cfg.Codec.MarshalJSON(&state)
require.NoError(t, err)
cfg.GenesisState[types.ModuleName] = buf
return network.New(t, cfg), state.LoanList
}
func TestShowLoan(t *testing.T) {
net, objs := networkWithLoanObjects(t, 2)
ctx := net.Validators[0].ClientCtx
common := []string{
fmt.Sprintf("--%s=json", tmcli.OutputFlag),
}
for _, tc := range []struct {
desc string
id string
args []string
err error
obj types.Loan
}{
{
desc: "found",
id: fmt.Sprintf("%d", objs[0].Id),
args: common,
obj: objs[0],
},
{
desc: "not found",
id: "not_found",
args: common,
err: status.Error(codes.InvalidArgument, "not found"),
},
} {
tc := tc
t.Run(tc.desc, func(t *testing.T) {
args := []string{tc.id}
args = append(args, tc.args...)
out, err := clitestutil.ExecTestCLICmd(ctx, cli.CmdShowLoan(), args)
if tc.err != nil {
stat, ok := status.FromError(tc.err)
require.True(t, ok)
require.ErrorIs(t, stat.Err(), tc.err)
} else {
require.NoError(t, err)
var resp types.QueryGetLoanResponse
require.NoError(t, net.Config.Codec.UnmarshalJSON(out.Bytes(), &resp))
require.NotNil(t, resp.Loan)
require.Equal(t, tc.obj, resp.Loan)
}
})
}
}
func TestListLoan(t *testing.T) {
net, objs := networkWithLoanObjects(t, 5)
ctx := net.Validators[0].ClientCtx
request := func(next []byte, offset, limit uint64, total bool) []string {
args := []string{
fmt.Sprintf("--%s=json", tmcli.OutputFlag),
}
if next == nil {
args = append(args, fmt.Sprintf("--%s=%d", flags.FlagOffset, offset))
} else {
args = append(args, fmt.Sprintf("--%s=%s", flags.FlagPageKey, next))
}
args = append(args, fmt.Sprintf("--%s=%d", flags.FlagLimit, limit))
if total {
args = append(args, fmt.Sprintf("--%s", flags.FlagCountTotal))
}
return args
}
t.Run("ByOffset", func(t *testing.T) {
step := 2
for i := 0; i < len(objs); i += step {
args := request(nil, uint64(i), uint64(step), false)
out, err := clitestutil.ExecTestCLICmd(ctx, cli.CmdListLoan(), args)
require.NoError(t, err)
var resp types.QueryAllLoanResponse
require.NoError(t, net.Config.Codec.UnmarshalJSON(out.Bytes(), &resp))
require.LessOrEqual(t, len(resp.Loan), step)
require.Subset(t, objs, resp.Loan)
}
})
t.Run("ByKey", func(t *testing.T) {
step := 2
var next []byte
for i := 0; i < len(objs); i += step {
args := request(next, 0, uint64(step), false)
out, err := clitestutil.ExecTestCLICmd(ctx, cli.CmdListLoan(), args)
require.NoError(t, err)
var resp types.QueryAllLoanResponse
require.NoError(t, net.Config.Codec.UnmarshalJSON(out.Bytes(), &resp))
require.LessOrEqual(t, len(resp.Loan), step)
require.Subset(t, objs, resp.Loan)
next = resp.Pagination.NextKey
}
})
t.Run("Total", func(t *testing.T) {
args := request(nil, 0, uint64(len(objs)), true)
out, err := clitestutil.ExecTestCLICmd(ctx, cli.CmdListLoan(), args)
require.NoError(t, err)
var resp types.QueryAllLoanResponse
require.NoError(t, net.Config.Codec.UnmarshalJSON(out.Bytes(), &resp))
require.NoError(t, err)
require.Equal(t, len(objs), int(resp.Pagination.Total))
require.Equal(t, objs, resp.Loan)
})
}
|
[] |
[] |
[] |
[]
|
[]
|
go
| null | null | null |
app/app/settings.py
|
"""
Django settings for app project.
Generated by 'django-admin startproject' using Django 3.2.6.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
from pathlib import Path
import os
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'django-insecure--(pr8m@apzp9$p7+2==4n_e_c2g2djay^n48zs_(i!i6lo85v^'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'rest_framework.authtoken',
'core',
'user',
'recipe'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'app.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'app.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'HOST': os.environ.get('DB_HOST'),
'NAME': os.environ.get('DB_NAME'),
'USER': os.environ.get('DB_USER'),
'PASSWORD': os.environ.get('DB_PASS'),
'PORT': 5432,
}
}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = '/static/'
MEDIA_URL = '/media/'
MEDIA_ROOT = '/vol/web/media'
STATIC_ROOT = '/vol/web/static'
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
AUTH_USER_MODEL = 'core.User'
|
[] |
[] |
[
"DB_PASS",
"DB_USER",
"DB_NAME",
"DB_HOST"
] |
[]
|
["DB_PASS", "DB_USER", "DB_NAME", "DB_HOST"]
|
python
| 4 | 0 | |
cogs/botstat.py
|
import asyncio
import os
import random
import statcord
import aiosqlite
from dotenv import load_dotenv
import discord
from discord import errors
from discord.ext import commands
import koreanbots
from koreanbots.integrations import discord
load_dotenv(verbose=True)
class botstat(commands.Cog):
def __init__(self, bot):
self.bot = bot
self.krb = koreanbots.Koreanbots(api_key=os.getenv("KRB_TOKEN"))
self._krb = discord.DiscordpyKoreanbots(client=self.bot,api_key=os.getenv("KRB_TOKEN"),run_task=True)
self.statcord = statcord.Client(self.bot, os.getenv("STATCORD"),custom1=self.custom1,custom2=self.custom2,logging_level='INFO')
self.statcord.start_loop()
@commands.command(name="하트인증", aliases=["추천인증","추천","하트","ㅊㅊ"])
async def heart_check(self,ctx):
voted = await self.krb.is_voted(user_id=ctx.author.id,bot_id=self.bot.user.id)
db = await aiosqlite.connect("db/db.sqlite")
cur = await db.execute("SELECT * FROM badge WHERE user = ? AND badge_type = ?", (ctx.author.id, "heartverify"))
res = await cur.fetchone()
if voted.voted:
if res is not None:
badge_msg = "이미 <:heartverify_1:905318776407478283><:heartverify_2:905318776864649236><:heartverify_3:905318776424255501>배지를 소유하고 있어 무시되었어요."
else:
await db.execute("INSERT INTO badge(user,badge_type) VALUES (?,?)", (ctx.author.id, "heartverify"))
await db.commit()
badge_msg = "하트 인증이 확인되어 <:heartverify_1:905318776407478283><:heartverify_2:905318776864649236><:heartverify_3:905318776424255501>배지를 부여해드렸어요!"
return await ctx.reply("> 추천해주셔서 감사해요!💕\n> " + badge_msg)
msg = await ctx.reply("> 추천하지 않으신 것 같아요.. 아래링크로 이동하셔서 추천해주세요!\n> 링크: https://koreanbots.dev/bots/893841721958469703/vote\n> 1분후 재확인 할게요!")
await asyncio.sleep(60)
cur = await db.execute("SELECT * FROM badge WHERE user = ? AND badge_type = ?", (ctx.author.id, "heartverify"))
res = await cur.fetchone()
voted = await self.krb.is_voted(user_id=ctx.author.id, bot_id=self.bot.user.id)
if voted.voted:
if res is not None:
badge_msg = "이미 <:heartverify_1:905318776407478283><:heartverify_2:905318776864649236><:heartverify_3:905318776424255501>배지를 소유하고 있어 무시되었어요."
else:
await db.execute("INSERT INTO badge(user,badge_type) VALUES (?,?)", (ctx.author.id, "heartverify"))
await db.commit()
badge_msg = "하트 인증이 확인되어 <:heartverify_1:905318776407478283><:heartverify_2:905318776864649236><:heartverify_3:905318776424255501>배지를 부여해드렸어요!"
return await msg.edit("> 추천이 확인되었어요! 추천해주셔서 감사해요!💕\n> " + badge_msg)
await msg.edit("> 추천이 확인되지않았어요..😢 혹시 마음에 드시지않으신가요..?🥺")
@commands.Cog.listener()
async def on_command(self, ctx):
self.statcord.command_run(ctx)
async def custom1(self):
resp = (await self._krb.botinfo(self.bot.user.id)).votes
return str(resp)
async def custom2(self):
return str(len(self.bot.voice_clients))
def setup(bot):
bot.add_cog(botstat(bot))
|
[] |
[] |
[
"STATCORD",
"KRB_TOKEN"
] |
[]
|
["STATCORD", "KRB_TOKEN"]
|
python
| 2 | 0 | |
48 Lisas Workbook.java
|
/*Lisas workbook
Written By - Aditya Wagholikar*/
import java.io.*;
import java.math.*;
import java.security.*;
import java.text.*;
import java.util.*;
import java.util.concurrent.*;
import java.util.regex.*;
public class Solution {
// Complete the workbook function below.
static int workbook(int n, int k, int[] arr) {
boolean flag = true;
int current_page_no = 1;
int no_of_chapters = arr.length;
int index = 0;
int beginning = 1;
int end = k;
int no_special_page = 0;
int no_of_problems_in_chapter=0;
while(flag){
no_of_problems_in_chapter = arr[index];
if (current_page_no >=beginning && current_page_no <= end){
//System.out.println("Current Page : "+current_page_no+" , Chapter # "+(index+1)+" ,Problem Number Range : "+beginning+" to "+end);
no_special_page++;
}
//System.out.println("*** General : Current Page : "+current_page_no+" , Chapter # "+(index+1)+" ,Problem Number Range : "+beginning+" to "+end);
current_page_no++;
if ((beginning + k) > no_of_problems_in_chapter){
index++;
if(index >= no_of_chapters){
//System.out.println("We are at break point, index : "+index+" , No of chapters : "+no_of_chapters);
break;
}
beginning = 1;
end = arr[index] <k? arr[index]:k;
}else{
beginning+=k;
end = end + k > no_of_problems_in_chapter?no_of_problems_in_chapter:end+k;
}
//flag = index==no_of_chapters? false:true;
}
return(no_special_page);
}
private static final Scanner scanner = new Scanner(System.in);
public static void main(String[] args) throws IOException {
BufferedWriter bufferedWriter = new BufferedWriter(new FileWriter(System.getenv("OUTPUT_PATH")));
String[] nk = scanner.nextLine().split(" ");
int n = Integer.parseInt(nk[0]);
int k = Integer.parseInt(nk[1]);
int[] arr = new int[n];
String[] arrItems = scanner.nextLine().split(" ");
scanner.skip("(\r\n|[\n\r\u2028\u2029\u0085])?");
for (int i = 0; i < n; i++) {
int arrItem = Integer.parseInt(arrItems[i]);
arr[i] = arrItem;
}
int result = workbook(n, k, arr);
bufferedWriter.write(String.valueOf(result));
bufferedWriter.newLine();
bufferedWriter.close();
scanner.close();
}
}
|
[
"\"OUTPUT_PATH\""
] |
[] |
[
"OUTPUT_PATH"
] |
[]
|
["OUTPUT_PATH"]
|
java
| 1 | 0 | |
src/main/python/pybuilder/plugins/python/python_plugin_helper.py
|
# -*- coding: utf-8 -*-
#
# This file is part of PyBuilder
#
# Copyright 2011-2015 PyBuilder Team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import itertools
import os
from pybuilder.utils import (discover_modules,
discover_files_matching,
execute_command,
as_list,
read_file)
def log_report(logger, name, report_lines):
count_of_warnings = len(report_lines)
if count_of_warnings > 0:
for report_line in report_lines:
logger.warn(name + ': ' + report_line[:-1])
def discover_python_files(directory):
return discover_files_matching(directory, "*.py")
def discover_affected_files(include_test_sources, include_scripts, project):
source_dir = project.get_property("dir_source_main_python")
files = discover_python_files(source_dir)
if include_test_sources:
if project.get_property("dir_source_unittest_python"):
unittest_dir = project.get_property("dir_source_unittest_python")
files = itertools.chain(files, discover_python_files(unittest_dir))
if project.get_property("dir_source_integrationtest_python"):
integrationtest_dir = project.get_property("dir_source_integrationtest_python")
files = itertools.chain(files, discover_python_files(integrationtest_dir))
if include_scripts and project.get_property("dir_source_main_scripts"):
scripts_dir = project.get_property("dir_source_main_scripts")
files = itertools.chain(files,
discover_files_matching(scripts_dir, "*")) # we have no idea how scripts might look
return files
def discover_affected_dirs(include_test_sources, include_scripts, project):
files = [project.get_property("dir_source_main_python")]
if include_test_sources:
if _if_property_set_and_dir_exists(project.get_property("dir_source_unittest_python")):
files.append(project.get_property("dir_source_unittest_python"))
if _if_property_set_and_dir_exists(project.get_property("dir_source_integrationtest_python")):
files.append(project.get_property("dir_source_integrationtest_python"))
if include_scripts and _if_property_set_and_dir_exists(project.get_property("dir_source_main_scripts")):
files.append(project.get_property("dir_source_main_scripts"))
return files
def _if_property_set_and_dir_exists(property_value):
return property_value and os.path.isdir(property_value)
def execute_tool_on_source_files(project, name, command_and_arguments, logger=None,
include_test_sources=False, include_scripts=False, include_dirs_only=False):
if include_dirs_only:
files = discover_affected_dirs(include_test_sources, include_scripts, project)
else:
files = discover_affected_files(include_test_sources, include_scripts, project)
command = as_list(command_and_arguments) + [f for f in files]
report_file = project.expand_path("$dir_reports/{0}".format(name))
execution_result = execute_command(command, report_file), report_file
report_file = execution_result[1]
report_lines = read_file(report_file)
if project.get_property(name + "_verbose_output") and logger:
log_report(logger, name, report_lines)
return execution_result
def execute_tool_on_modules(project, name, command_and_arguments, extend_pythonpath=True):
source_dir = project.expand_path("$dir_source_main_python")
modules = discover_modules(source_dir)
command = as_list(command_and_arguments) + modules
report_file = project.expand_path("$dir_reports/%s" % name)
env = os.environ
if extend_pythonpath:
env["PYTHONPATH"] = source_dir
return execute_command(command, report_file, env=env), report_file
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
v3/net/net_test.go
|
package net
import (
"fmt"
"math"
"os"
"runtime"
"testing"
"github.com/shirou/gopsutil/v3/internal/common"
)
func skipIfNotImplementedErr(t *testing.T, err error) {
if err == common.ErrNotImplementedError {
t.Skip("not implemented")
}
}
func TestAddrString(t *testing.T) {
v := Addr{IP: "192.168.0.1", Port: 8000}
s := fmt.Sprintf("%v", v)
if s != `{"ip":"192.168.0.1","port":8000}` {
t.Errorf("Addr string is invalid: %v", v)
}
}
func TestNetIOCountersStatString(t *testing.T) {
v := IOCountersStat{
Name: "test",
BytesSent: 100,
}
e := `{"name":"test","bytesSent":100,"bytesRecv":0,"packetsSent":0,"packetsRecv":0,"errin":0,"errout":0,"dropin":0,"dropout":0,"fifoin":0,"fifoout":0}`
if e != fmt.Sprintf("%v", v) {
t.Errorf("NetIOCountersStat string is invalid: %v", v)
}
}
func TestNetProtoCountersStatString(t *testing.T) {
v := ProtoCountersStat{
Protocol: "tcp",
Stats: map[string]int64{
"MaxConn": -1,
"ActiveOpens": 4000,
"PassiveOpens": 3000,
},
}
e := `{"protocol":"tcp","stats":{"ActiveOpens":4000,"MaxConn":-1,"PassiveOpens":3000}}`
if e != fmt.Sprintf("%v", v) {
t.Errorf("NetProtoCountersStat string is invalid: %v", v)
}
}
func TestNetConnectionStatString(t *testing.T) {
v := ConnectionStat{
Fd: 10,
Family: 10,
Type: 10,
Uids: []int32{10, 10},
}
e := `{"fd":10,"family":10,"type":10,"localaddr":{"ip":"","port":0},"remoteaddr":{"ip":"","port":0},"status":"","uids":[10,10],"pid":0}`
if e != fmt.Sprintf("%v", v) {
t.Errorf("NetConnectionStat string is invalid: %v", v)
}
}
func TestNetIOCountersAll(t *testing.T) {
v, err := IOCounters(false)
skipIfNotImplementedErr(t, err)
if err != nil {
t.Errorf("Could not get NetIOCounters: %v", err)
}
per, err := IOCounters(true)
skipIfNotImplementedErr(t, err)
if err != nil {
t.Errorf("Could not get NetIOCounters: %v", err)
}
if len(v) != 1 {
t.Errorf("Could not get NetIOCounters: %v", v)
}
if v[0].Name != "all" {
t.Errorf("Invalid NetIOCounters: %v", v)
}
var pr uint64
for _, p := range per {
pr += p.PacketsRecv
}
// small diff is ok
if math.Abs(float64(v[0].PacketsRecv-pr)) > 5 {
if ci := os.Getenv("CI"); ci != "" {
// This test often fails in CI. so just print even if failed.
fmt.Printf("invalid sum value: %v, %v", v[0].PacketsRecv, pr)
} else {
t.Errorf("invalid sum value: %v, %v", v[0].PacketsRecv, pr)
}
}
}
func TestNetIOCountersPerNic(t *testing.T) {
v, err := IOCounters(true)
skipIfNotImplementedErr(t, err)
if err != nil {
t.Errorf("Could not get NetIOCounters: %v", err)
}
if len(v) == 0 {
t.Errorf("Could not get NetIOCounters: %v", v)
}
for _, vv := range v {
if vv.Name == "" {
t.Errorf("Invalid NetIOCounters: %v", vv)
}
}
}
func TestGetNetIOCountersAll(t *testing.T) {
n := []IOCountersStat{
{
Name: "a",
BytesRecv: 10,
PacketsRecv: 10,
},
{
Name: "b",
BytesRecv: 10,
PacketsRecv: 10,
Errin: 10,
},
}
ret, err := getIOCountersAll(n)
skipIfNotImplementedErr(t, err)
if err != nil {
t.Error(err)
}
if len(ret) != 1 {
t.Errorf("invalid return count")
}
if ret[0].Name != "all" {
t.Errorf("invalid return name")
}
if ret[0].BytesRecv != 20 {
t.Errorf("invalid count bytesrecv")
}
if ret[0].Errin != 10 {
t.Errorf("invalid count errin")
}
}
func TestNetInterfaces(t *testing.T) {
v, err := Interfaces()
skipIfNotImplementedErr(t, err)
if err != nil {
t.Errorf("Could not get NetInterfaceStat: %v", err)
}
if len(v) == 0 {
t.Errorf("Could not get NetInterfaceStat: %v", err)
}
for _, vv := range v {
if vv.Name == "" {
t.Errorf("Invalid NetInterface: %v", vv)
}
}
}
func TestNetProtoCountersStatsAll(t *testing.T) {
v, err := ProtoCounters(nil)
skipIfNotImplementedErr(t, err)
if err != nil {
t.Fatalf("Could not get NetProtoCounters: %v", err)
}
if len(v) == 0 {
t.Fatalf("Could not get NetProtoCounters: %v", err)
}
for _, vv := range v {
if vv.Protocol == "" {
t.Errorf("Invalid NetProtoCountersStat: %v", vv)
}
if len(vv.Stats) == 0 {
t.Errorf("Invalid NetProtoCountersStat: %v", vv)
}
}
}
func TestNetProtoCountersStats(t *testing.T) {
v, err := ProtoCounters([]string{"tcp", "ip"})
skipIfNotImplementedErr(t, err)
if err != nil {
t.Fatalf("Could not get NetProtoCounters: %v", err)
}
if len(v) == 0 {
t.Fatalf("Could not get NetProtoCounters: %v", err)
}
if len(v) != 2 {
t.Fatalf("Go incorrect number of NetProtoCounters: %v", err)
}
for _, vv := range v {
if vv.Protocol != "tcp" && vv.Protocol != "ip" {
t.Errorf("Invalid NetProtoCountersStat: %v", vv)
}
if len(vv.Stats) == 0 {
t.Errorf("Invalid NetProtoCountersStat: %v", vv)
}
}
}
func TestNetConnections(t *testing.T) {
if ci := os.Getenv("CI"); ci != "" { // skip if test on drone.io
return
}
v, err := Connections("inet")
skipIfNotImplementedErr(t, err)
if err != nil {
t.Errorf("could not get NetConnections: %v", err)
}
if len(v) == 0 {
t.Errorf("could not get NetConnections: %v", v)
}
for _, vv := range v {
if vv.Family == 0 {
t.Errorf("invalid NetConnections: %v", vv)
}
}
}
func TestNetFilterCounters(t *testing.T) {
if ci := os.Getenv("CI"); ci != "" { // skip if test on drone.io
return
}
if runtime.GOOS == "linux" {
// some test environment has not the path.
if !common.PathExists("/proc/sys/net/netfilter/nf_connTrackCount") {
t.SkipNow()
}
}
v, err := FilterCounters()
skipIfNotImplementedErr(t, err)
if err != nil {
t.Errorf("could not get NetConnections: %v", err)
}
if len(v) == 0 {
t.Errorf("could not get NetConnections: %v", v)
}
for _, vv := range v {
if vv.ConnTrackMax == 0 {
t.Errorf("nf_connTrackMax needs to be greater than zero: %v", vv)
}
}
}
func TestInterfaceStatString(t *testing.T) {
v := InterfaceStat{
Index: 0,
MTU: 1500,
Name: "eth0",
HardwareAddr: "01:23:45:67:89:ab",
Flags: []string{"up", "down"},
Addrs: InterfaceAddrList{{Addr: "1.2.3.4"}, {Addr: "5.6.7.8"}},
}
s := fmt.Sprintf("%v", v)
if s != `{"index":0,"mtu":1500,"name":"eth0","hardwareAddr":"01:23:45:67:89:ab","flags":["up","down"],"addrs":[{"addr":"1.2.3.4"},{"addr":"5.6.7.8"}]}` {
t.Errorf("InterfaceStat string is invalid: %v", s)
}
list := InterfaceStatList{v, v}
s = fmt.Sprintf("%v", list)
if s != `[{"index":0,"mtu":1500,"name":"eth0","hardwareAddr":"01:23:45:67:89:ab","flags":["up","down"],"addrs":[{"addr":"1.2.3.4"},{"addr":"5.6.7.8"}]},{"index":0,"mtu":1500,"name":"eth0","hardwareAddr":"01:23:45:67:89:ab","flags":["up","down"],"addrs":[{"addr":"1.2.3.4"},{"addr":"5.6.7.8"}]}]` {
t.Errorf("InterfaceStatList string is invalid: %v", s)
}
}
|
[
"\"CI\"",
"\"CI\"",
"\"CI\""
] |
[] |
[
"CI"
] |
[]
|
["CI"]
|
go
| 1 | 0 | |
main.go
|
// Package main defines a command line interface for the sqlboiler package
package main
import (
"fmt"
"os"
"os/exec"
"path/filepath"
"strings"
"github.com/pkg/errors"
"github.com/spf13/cobra"
"github.com/spf13/viper"
"github.com/volatiletech/sqlboiler/boilingcore"
"github.com/volatiletech/sqlboiler/drivers"
"github.com/volatiletech/sqlboiler/importers"
)
//go:generate go-bindata -nometadata -pkg templatebin -o templatebin/bindata.go templates templates/singleton templates_test templates_test/singleton templates_mgr
const sqlBoilerVersion = "3.2.1"
var (
flagConfigFile string
cmdState *boilingcore.State
cmdConfig *boilingcore.Config
)
func initConfig() {
if len(flagConfigFile) != 0 {
viper.SetConfigFile(flagConfigFile)
if err := viper.ReadInConfig(); err != nil {
fmt.Println("Can't read config:", err)
os.Exit(1)
}
return
}
var err error
viper.SetConfigName("sqlboiler")
configHome := os.Getenv("XDG_CONFIG_HOME")
homePath := os.Getenv("HOME")
wd, err := os.Getwd()
if err != nil {
wd = "."
}
configPaths := []string{wd}
if len(configHome) > 0 {
configPaths = append(configPaths, filepath.Join(configHome, "sqlboiler"))
} else {
configPaths = append(configPaths, filepath.Join(homePath, ".config/sqlboiler"))
}
for _, p := range configPaths {
viper.AddConfigPath(p)
}
// Ignore errors here, fallback to other validation methods.
// Users can use environment variables if a config is not found.
_ = viper.ReadInConfig()
}
func main() {
// Too much happens between here and cobra's argument handling, for
// something so simple just do it immediately.
for _, arg := range os.Args {
if arg == "--version" {
fmt.Println("SQLBoiler v" + sqlBoilerVersion)
return
}
}
// Set up the cobra root command
var rootCmd = &cobra.Command{
Use: "sqlboiler [flags] <driver>",
Short: "SQL Boiler generates an ORM tailored to your database schema.",
Long: "SQL Boiler generates a Go ORM from template files, tailored to your database schema.\n" +
`Complete documentation is available at http://github.com/volatiletech/sqlboiler`,
Example: `sqlboiler psql`,
PreRunE: preRun,
RunE: run,
PostRunE: postRun,
SilenceErrors: true,
SilenceUsage: true,
}
cobra.OnInitialize(initConfig)
// Set up the cobra root command flags
rootCmd.PersistentFlags().StringVarP(&flagConfigFile, "config", "c", "", "Filename of config file to override default lookup")
rootCmd.PersistentFlags().StringP("output", "o", "models", "The name of the folder to output to")
rootCmd.PersistentFlags().StringP("pkgname", "p", "models", "The name you wish to assign to your generated package")
rootCmd.PersistentFlags().StringSliceP("templates", "", nil, "A templates directory, overrides the bindata'd template folders in sqlboiler")
rootCmd.PersistentFlags().StringSliceP("tag", "t", nil, "Struct tags to be included on your models in addition to json, yaml, toml")
rootCmd.PersistentFlags().StringSliceP("replace", "", nil, "Replace templates by directory: relpath/to_file.tpl:relpath/to_replacement.tpl")
rootCmd.PersistentFlags().BoolP("debug", "d", false, "Debug mode prints stack traces on error")
rootCmd.PersistentFlags().BoolP("no-context", "", false, "Disable context.Context usage in the generated code")
rootCmd.PersistentFlags().BoolP("no-tests", "", false, "Disable generated go test files")
rootCmd.PersistentFlags().BoolP("no-hooks", "", false, "Disable hooks feature for your models")
rootCmd.PersistentFlags().BoolP("no-rows-affected", "", false, "Disable rows affected in the generated API")
rootCmd.PersistentFlags().BoolP("no-auto-timestamps", "", false, "Disable automatic timestamps for created_at/updated_at")
rootCmd.PersistentFlags().BoolP("add-global-variants", "", false, "Enable generation for global variants")
rootCmd.PersistentFlags().BoolP("add-panic-variants", "", false, "Enable generation for panic variants")
rootCmd.PersistentFlags().BoolP("version", "", false, "Print the version")
rootCmd.PersistentFlags().BoolP("wipe", "", false, "Delete the output folder (rm -rf) before generation to ensure sanity")
rootCmd.PersistentFlags().StringP("struct-tag-casing", "", "snake", "Decides the casing for go structure tag names. camel or snake (default snake)")
rootCmd.PersistentFlags().BoolP("sharding", "", false, "Enables database sharding")
// hide flags not recommended for use
rootCmd.PersistentFlags().MarkHidden("replace")
viper.BindPFlags(rootCmd.PersistentFlags())
viper.SetEnvKeyReplacer(strings.NewReplacer(".", "_"))
viper.AutomaticEnv()
if err := rootCmd.Execute(); err != nil {
if e, ok := err.(commandFailure); ok {
fmt.Printf("Error: %v\n\n", string(e))
rootCmd.Help()
} else if !viper.GetBool("debug") {
fmt.Printf("Error: %v\n", err)
} else {
fmt.Printf("Error: %+v\n", err)
}
os.Exit(1)
}
}
type commandFailure string
func (c commandFailure) Error() string {
return string(c)
}
func preRun(cmd *cobra.Command, args []string) error {
var err error
if len(args) == 0 {
return commandFailure("must provide a driver name")
}
driverName := args[0]
driverPath := args[0]
if strings.ContainsRune(driverName, os.PathSeparator) {
driverName = strings.Replace(filepath.Base(driverName), "sqlboiler-", "", 1)
driverName = strings.Replace(driverName, ".exe", "", 1)
} else {
driverPath = "sqlboiler-" + driverPath
if p, err := exec.LookPath(driverPath); err == nil {
driverPath = p
}
}
driverPath, err = filepath.Abs(driverPath)
if err != nil {
return errors.Wrap(err, "could not find absolute path to driver")
}
drivers.RegisterBinary(driverName, driverPath)
cmdConfig = &boilingcore.Config{
DriverName: driverName,
OutFolder: viper.GetString("output"),
PkgName: viper.GetString("pkgname"),
Debug: viper.GetBool("debug"),
AddGlobal: viper.GetBool("add-global-variants"),
AddPanic: viper.GetBool("add-panic-variants"),
NoContext: viper.GetBool("no-context"),
NoTests: viper.GetBool("no-tests"),
NoHooks: viper.GetBool("no-hooks"),
NoRowsAffected: viper.GetBool("no-rows-affected"),
NoAutoTimestamps: viper.GetBool("no-auto-timestamps"),
Wipe: viper.GetBool("wipe"),
StructTagCasing: strings.ToLower(viper.GetString("struct-tag-casing")), // camel | snake
TemplateDirs: viper.GetStringSlice("templates"),
Tags: viper.GetStringSlice("tag"),
Replacements: viper.GetStringSlice("replace"),
Aliases: boilingcore.ConvertAliases(viper.Get("aliases")),
TypeReplaces: boilingcore.ConvertTypeReplace(viper.Get("types")),
Sharding: viper.GetBool("sharding"),
}
if cmdConfig.Debug {
fmt.Fprintln(os.Stderr, "using driver:", driverPath)
}
// Configure the driver
cmdConfig.DriverConfig = map[string]interface{}{
"whitelist": viper.GetStringSlice(driverName + ".whitelist"),
"blacklist": viper.GetStringSlice(driverName + ".blacklist"),
}
keys := allKeys(driverName)
for _, key := range keys {
prefixedKey := fmt.Sprintf("%s.%s", driverName, key)
cmdConfig.DriverConfig[key] = viper.Get(prefixedKey)
}
cmdConfig.Imports = configureImports()
cmdState, err = boilingcore.New(cmdConfig)
return err
}
func configureImports() importers.Collection {
imports := importers.NewDefaultImports()
mustMap := func(m importers.Map, err error) importers.Map {
if err != nil {
panic("failed to change viper interface into importers.Map: " + err.Error())
}
return m
}
if viper.IsSet("imports.all.standard") {
imports.All.Standard = viper.GetStringSlice("imports.all.standard")
}
if viper.IsSet("imports.all.third_party") {
imports.All.ThirdParty = viper.GetStringSlice("imports.all.third_party")
}
if viper.IsSet("imports.test.standard") {
imports.Test.Standard = viper.GetStringSlice("imports.test.standard")
}
if viper.IsSet("imports.test.third_party") {
imports.Test.ThirdParty = viper.GetStringSlice("imports.test.third_party")
}
if viper.IsSet("imports.singleton") {
imports.Singleton = mustMap(importers.MapFromInterface(viper.Get("imports.singleton")))
}
if viper.IsSet("imports.test_singleton") {
imports.TestSingleton = mustMap(importers.MapFromInterface(viper.Get("imports.test_singleton")))
}
if viper.IsSet("imports.based_on_type") {
imports.BasedOnType = mustMap(importers.MapFromInterface(viper.Get("imports.based_on_type")))
}
return imports
}
func run(cmd *cobra.Command, args []string) error {
return cmdState.Run()
}
func postRun(cmd *cobra.Command, args []string) error {
return cmdState.Cleanup()
}
func allKeys(prefix string) []string {
keys := make(map[string]bool)
prefix = prefix + "."
for _, e := range os.Environ() {
splits := strings.SplitN(e, "=", 2)
key := strings.Replace(strings.ToLower(splits[0]), "_", ".", -1)
if strings.HasPrefix(key, prefix) {
keys[strings.Replace(key, prefix, "", -1)] = true
}
}
for _, key := range viper.AllKeys() {
if strings.HasPrefix(key, prefix) {
keys[strings.Replace(key, prefix, "", -1)] = true
}
}
keySlice := make([]string, 0, len(keys))
for k := range keys {
keySlice = append(keySlice, k)
}
return keySlice
}
|
[
"\"XDG_CONFIG_HOME\"",
"\"HOME\""
] |
[] |
[
"HOME",
"XDG_CONFIG_HOME"
] |
[]
|
["HOME", "XDG_CONFIG_HOME"]
|
go
| 2 | 0 | |
tests/st/dump/test_async_a_plus_m_dump.py
|
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import os
import sys
import tempfile
import time
import shutil
import glob
import json
import numpy as np
import pytest
import mindspore.context as context
import mindspore.nn as nn
from mindspore import Tensor
from mindspore.ops import operations as P
from dump_test_utils import generate_dump_json, generate_dump_json_with_overflow, check_dump_structure
from tests.security_utils import security_off_wrap
class Net(nn.Cell):
def __init__(self):
super(Net, self).__init__()
self.add = P.Add()
def construct(self, x_, y_):
return self.add(x_, y_)
x = np.array([[1, 2, 3], [4, 5, 6]]).astype(np.float32)
y = np.array([[7, 8, 9], [10, 11, 12]]).astype(np.float32)
def run_async_dump(test_name):
context.set_context(mode=context.GRAPH_MODE, device_target="Ascend")
with tempfile.TemporaryDirectory(dir='/tmp') as tmp_dir:
dump_path = os.path.join(tmp_dir, 'async_dump')
dump_config_path = os.path.join(tmp_dir, 'async_dump.json')
generate_dump_json(dump_path, dump_config_path, test_name)
os.environ['MINDSPORE_DUMP_CONFIG'] = dump_config_path
dump_file_path = os.path.join(dump_path, 'rank_0', 'Net', '0', '0')
if os.path.isdir(dump_path):
shutil.rmtree(dump_path)
add = Net()
add(Tensor(x), Tensor(y))
for _ in range(3):
if not os.path.exists(dump_file_path):
time.sleep(2)
check_dump_structure(dump_path, dump_config_path, 1, 1, 1)
assert len(os.listdir(dump_file_path)) == 1
# check content of the generated dump data
if test_name == "test_async_dump_npy":
output_name = "Add.Add-op*.*.*.*.output.0.ND.npy"
output_path = glob.glob(os.path.join(dump_file_path, output_name))[0]
real_path = os.path.realpath(output_path)
output = np.load(real_path)
expect = np.array([[8, 10, 12], [14, 16, 18]], np.float32)
assert np.array_equal(output, expect)
del os.environ['MINDSPORE_DUMP_CONFIG']
@pytest.mark.level0
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
@security_off_wrap
def test_async_dump_npy():
"""
Feature: async dump on Ascend
Description: test async dump with file_format = "npy"
Expectation: dump data are generated as npy file format
"""
run_async_dump("test_async_dump_npy")
@pytest.mark.level0
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
@security_off_wrap
def test_async_dump_bin():
"""
Feature: async dump on Ascend in npy format
Description: test async dump with file_format = "bin"
Expectation: dump data are generated as protobuf file format (suffix with timestamp)
"""
run_async_dump("test_async_dump_bin")
def run_overflow_dump(test_name):
"""Run async dump and generate overflow"""
if sys.platform != 'linux':
return
overflow_x = np.array([60000, 60000]).astype(np.float16)
with tempfile.TemporaryDirectory(dir='/tmp') as tmp_dir:
dump_path = os.path.join(tmp_dir, 'overflow_dump')
dump_config_path = os.path.join(tmp_dir, 'overflow_dump.json')
generate_dump_json_with_overflow(dump_path, dump_config_path, test_name, 3)
os.environ['MINDSPORE_DUMP_CONFIG'] = dump_config_path
if os.path.isdir(dump_path):
shutil.rmtree(dump_path)
add = Net()
add(Tensor(overflow_x), Tensor(overflow_x))
exe_graph_path = os.path.join(dump_path, 'rank_0', 'Net', '0', '0')
for _ in range(5):
if not os.path.exists(exe_graph_path):
time.sleep(2)
check_dump_structure(dump_path, dump_config_path, 1, 1, 1)
# check if overflow dump generate exact two files, and the naming format
assert len(os.listdir(exe_graph_path)) == 2
output_path = glob.glob(os.path.join(exe_graph_path, "Add.Add-op*.*.*.*.output.0.ND.npy"))[0]
overflow_path = glob.glob(os.path.join(exe_graph_path, "Opdebug.Node_OpDebug.*.*.*.output.0.json"))[0]
assert output_path
assert overflow_path
# check content of the output tensor
real_path = os.path.realpath(output_path)
output = np.load(real_path)
expect = np.array([65504, 65504], np.float16)
assert np.array_equal(output, expect)
# check content of opdebug info json file
with open(overflow_path, 'rb') as json_file:
data = json.load(json_file)
assert data
del os.environ['MINDSPORE_DUMP_CONFIG']
@pytest.mark.level0
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
@security_off_wrap
def test_ascend_overflow_dump():
"""
Feature: Overflow Dump
Description: Test overflow dump
Expectation: Overflow is occurred, and overflow dump file is in correct format
"""
context.set_context(mode=context.GRAPH_MODE, device_target='Ascend')
run_overflow_dump("test_async_dump_npy")
|
[] |
[] |
[
"MINDSPORE_DUMP_CONFIG"
] |
[]
|
["MINDSPORE_DUMP_CONFIG"]
|
python
| 1 | 0 | |
main.py
|
import requests
from pathlib import Path
import os
from dotenv import load_dotenv
import random
def get_file_extension(url):
return Path(url).suffix
def download_a_pic(url, filename):
response = requests.get(url)
response.raise_for_status()
with open(Path.cwd().joinpath(filename), 'wb') as image:
image.write(response.content)
def get_a_comic(number_of_comic):
url = f'http://xkcd.com/{number_of_comic}/info.0.json'
response = requests.get(url)
response.raise_for_status()
comic_pic = response.json()
pic_extension = get_file_extension(comic_pic['img'])
pic_filename = f"{comic_pic['title']}{pic_extension}"
download_a_pic(comic_pic['img'], pic_filename)
return pic_filename, comic_pic['alt']
def check_http_response_error(response):
if 'error' in response:
raise requests.exceptions.HTTPError(response['error'])
def get_upload_url(token, group_id):
vk_url = 'https://api.vk.com/method/photos.getWallUploadServer'
vk_params = {'access_token': token, 'v': '5.122', 'group_id': group_id}
response = requests.get(vk_url, params=vk_params)
upload_url = response.json()
check_http_response_error(upload_url)
return upload_url['response']['upload_url']
def post_a_comic(upload_url, token, message, pic_filename, group_id):
"""put all three requests to VK api in one
function cause they can be used only together"""
with open(pic_filename, 'rb') as file:
files = {'photo': file}
response = requests.post(upload_url, files=files)
uploaded_pic = response.json()
check_http_response_error(uploaded_pic)
url = 'https://api.vk.com/method/photos.saveWallPhoto'
params = {
'server': uploaded_pic['server'],
'photo': uploaded_pic['photo'],
'hash': uploaded_pic['hash'],
'access_token': token,
'v': '5.122',
'group_id': group_id
}
response = requests.post(url, params=params)
saved_pic = response.json()
check_http_response_error(saved_pic)
saved_pic_ids = saved_pic['response'][0]
url = 'https://api.vk.com/method/wall.post'
attachments = f"photo{saved_pic_ids['owner_id']}_{saved_pic_ids['id']}"
params = {
'attachments': attachments,
'access_token': token,
'v': '5.122',
'owner_id': f'-{group_id}',
'from_group': '1',
'message': message
}
response = requests.post(url, params=params)
posted_pic = response.json()
check_http_response_error(posted_pic)
def get_number_of_comics():
url = 'http://xkcd.com/info.0.json'
response = requests.get(url)
response.raise_for_status()
last_pic = response.json()
return random.randint(0, last_pic['num'])
def main():
load_dotenv()
vk_token = os.getenv('ACCESS_TOKEN')
group_id = os.getenv('GROUP_ID')
number_of_comic = get_number_of_comics()
pic_filename, message = get_a_comic(number_of_comic)
try:
upload_url = get_upload_url(vk_token, group_id)
post_a_comic(upload_url, vk_token, message, pic_filename, group_id)
finally:
Path.unlink(Path.cwd().joinpath(pic_filename))
if __name__ == '__main__':
main()
|
[] |
[] |
[
"GROUP_ID",
"ACCESS_TOKEN"
] |
[]
|
["GROUP_ID", "ACCESS_TOKEN"]
|
python
| 2 | 0 | |
cmd/searcher/search/search_structural_test.go
|
package search
import (
"context"
"encoding/json"
"os"
"path/filepath"
"reflect"
"sort"
"strings"
"testing"
"github.com/google/go-cmp/cmp"
"github.com/stretchr/testify/require"
"github.com/sourcegraph/sourcegraph/cmd/searcher/protocol"
"github.com/sourcegraph/sourcegraph/internal/comby"
"github.com/sourcegraph/sourcegraph/internal/search"
"github.com/sourcegraph/sourcegraph/internal/testutil"
)
func TestMatcherLookupByLanguage(t *testing.T) {
// If we are not on CI skip the test.
if os.Getenv("CI") == "" {
t.Skip("Not on CI, skipping comby-dependent test")
}
input := map[string]string{
"file_without_extension": `
/* This foo(plain string) {} is in a Go comment should not match in Go, but should match in plaintext */
func foo(go string) {}
`,
}
cases := []struct {
Name string
Languages []string
Want []string
}{
{
Name: "Language test for no language",
Languages: []string{},
Want: []string{"foo(plain string)", "foo(go string)"},
},
{
Name: "Language test for Go",
Languages: []string{"go"},
Want: []string{"foo(go string)"},
},
{
Name: "Language test for plaintext",
Languages: []string{"text"},
Want: []string{"foo(plain string)", "foo(go string)"},
},
}
zipData, err := testutil.CreateZip(input)
if err != nil {
t.Fatal(err)
}
zf, cleanup, err := testutil.TempZipFileOnDisk(zipData)
if err != nil {
t.Fatal(err)
}
defer cleanup()
t.Run("group", func(t *testing.T) {
for _, tt := range cases {
tt := tt
t.Run(tt.Name, func(t *testing.T) {
t.Parallel()
p := &protocol.PatternInfo{
Pattern: "foo(:[args])",
IncludePatterns: []string{"file_without_extension"},
Languages: tt.Languages,
}
ctx, cancel, sender := newLimitedStreamCollector(context.Background(), 100000000)
defer cancel()
err := structuralSearch(ctx, zf, Subset(p.IncludePatterns), "", p.Pattern, p.CombyRule, p.Languages, "repo_foo", sender)
if err != nil {
t.Fatal(err)
}
var got []string
for _, fileMatches := range sender.collected {
for _, m := range fileMatches.LineMatches {
got = append(got, m.Preview)
}
}
if !reflect.DeepEqual(got, tt.Want) {
t.Fatalf("got file matches %v, want %v", got, tt.Want)
}
})
}
})
}
func TestMatcherLookupByExtension(t *testing.T) {
// If we are not on CI skip the test.
if os.Getenv("CI") == "" {
t.Skip("Not on CI, skipping comby-dependent test")
}
t.Parallel()
input := map[string]string{
"file_without_extension": `
/* This foo(plain.empty) {} is in a Go comment should not match in Go, but should match in plaintext */
func foo(go.empty) {}
`,
"file.go": `
/* This foo(plain.go) {} is in a Go comment should not match in Go, but should match in plaintext */
func foo(go.go) {}
`,
"file.txt": `
/* This foo(plain.txt) {} is in a Go comment should not match in Go, but should match in plaintext */
func foo(go.txt) {}
`,
}
zipData, err := testutil.CreateZip(input)
if err != nil {
t.Fatal(err)
}
zf, cleanup, err := testutil.TempZipFileOnDisk(zipData)
if err != nil {
t.Fatal(err)
}
defer cleanup()
test := func(language, filename string) string {
var languages []string
if language != "" {
languages = []string{language}
}
extensionHint := filepath.Ext(filename)
ctx, cancel, sender := newLimitedStreamCollector(context.Background(), 1000000000)
defer cancel()
err := structuralSearch(ctx, zf, All, extensionHint, "foo(:[args])", "", languages, "repo_foo", sender)
if err != nil {
return "ERROR: " + err.Error()
}
var got []string
for _, fileMatches := range sender.collected {
for _, m := range fileMatches.LineMatches {
got = append(got, m.Preview)
}
}
sort.Strings(got)
return strings.Join(got, " ")
}
cases := []struct {
name string
want string
language string
filename string
}{{
name: "No language and no file extension => .generic matcher",
want: "foo(go.empty) foo(go.go) foo(go.txt) foo(plain.empty) foo(plain.go) foo(plain.txt)",
language: "",
filename: "file_without_extension",
}, {
name: "No language and .go file extension => .go matcher",
want: "foo(go.empty) foo(go.go) foo(go.txt)",
language: "",
filename: "a/b/c/file.go",
}, {
name: "Language Go and no file extension => .go matcher",
want: "foo(go.empty) foo(go.go) foo(go.txt)",
language: "go",
filename: "",
}, {
name: "Language .go and .txt file extension => .go matcher",
want: "foo(go.empty) foo(go.go) foo(go.txt)",
language: "go",
filename: "file.txt",
}}
t.Run("group", func(t *testing.T) {
for _, tc := range cases {
tc := tc
t.Run(tc.name, func(t *testing.T) {
t.Parallel()
got := test(tc.language, tc.filename)
if d := cmp.Diff(tc.want, got); d != "" {
t.Errorf("mismatch (-want +got):\n%s", d)
}
})
}
})
}
// Tests that structural search correctly infers the Go matcher from the .go
// file extension.
func TestInferredMatcher(t *testing.T) {
// If we are not on CI skip the test.
if os.Getenv("CI") == "" {
t.Skip("Not on CI, skipping comby-dependent test")
}
input := map[string]string{
"main.go": `
/* This foo(ignore string) {} is in a Go comment should not match */
func foo(real string) {}
`,
}
pattern := "foo(:[args])"
want := "foo(real string)"
zipData, err := testutil.CreateZip(input)
if err != nil {
t.Fatal(err)
}
zPath, cleanup, err := testutil.TempZipFileOnDisk(zipData)
if err != nil {
t.Fatal(err)
}
defer cleanup()
zFile, _ := testutil.MockZipFile(zipData)
if err != nil {
t.Fatal(err)
}
p := &protocol.PatternInfo{
Pattern: pattern,
Limit: 30,
}
ctx, cancel, sender := newLimitedStreamCollector(context.Background(), 1000000000)
defer cancel()
err = filteredStructuralSearch(ctx, zPath, zFile, p, "foo", sender)
if err != nil {
t.Fatal(err)
}
got := sender.collected[0].LineMatches[0].Preview
if err != nil {
t.Fatal(err)
}
if got != want {
t.Fatalf("got file matches %v, want %v", got, want)
}
}
func TestRecordMetrics(t *testing.T) {
cases := []struct {
name string
language []string
includePatterns []string
want string
}{
{
name: "Empty values",
language: nil,
includePatterns: []string{},
want: ".generic",
},
{
name: "Include patterns no extension",
language: nil,
includePatterns: []string{"foo", "bar.go"},
want: ".generic",
},
{
name: "Include patterns first extension",
language: nil,
includePatterns: []string{"foo.c", "bar.go"},
want: ".c",
},
{
name: "Non-empty language",
language: []string{"xml"},
includePatterns: []string{"foo.c", "bar.go"},
want: ".xml",
},
}
for _, tt := range cases {
t.Run(tt.name, func(t *testing.T) {
var extensionHint string
if len(tt.includePatterns) > 0 {
filename := tt.includePatterns[0]
extensionHint = filepath.Ext(filename)
}
got := toMatcher(tt.language, extensionHint)
if diff := cmp.Diff(tt.want, got); diff != "" {
t.Fatal(diff)
}
})
}
}
// Tests that includePatterns works. includePatterns serve a similar role in
// structural search compared to regex search, but is interpreted _differently_.
// includePatterns cannot be a regex expression (as in traditional search), but
// instead (currently) expects a list of patterns that represent a set of file
// paths to search.
func TestIncludePatterns(t *testing.T) {
// If we are not on CI skip the test.
if os.Getenv("CI") == "" {
t.Skip("Not on CI, skipping comby-dependent test")
}
input := map[string]string{
"/a/b/c": "",
"/a/b/c/foo.go": "",
"c/foo.go": "",
"bar.go": "",
"/x/y/z/bar.go": "",
"/a/b/c/nope.go": "",
"nope.go": "",
}
want := []string{
"/a/b/c/foo.go",
"/x/y/z/bar.go",
"bar.go",
}
includePatterns := []string{"a/b/c/foo.go", "bar.go"}
zipData, err := testutil.CreateZip(input)
if err != nil {
t.Fatal(err)
}
zf, cleanup, err := testutil.TempZipFileOnDisk(zipData)
if err != nil {
t.Fatal(err)
}
defer cleanup()
p := &protocol.PatternInfo{
Pattern: "",
IncludePatterns: includePatterns,
}
ctx, cancel, sender := newLimitedStreamCollector(context.Background(), 1000000000)
defer cancel()
err = structuralSearch(ctx, zf, Subset(p.IncludePatterns), "", p.Pattern, p.CombyRule, p.Languages, "foo", sender)
if err != nil {
t.Fatal(err)
}
fileMatches := sender.collected
got := make([]string, len(fileMatches))
for i, fm := range fileMatches {
got[i] = fm.Path
}
sort.Strings(got)
if !reflect.DeepEqual(got, want) {
t.Fatalf("got file matches %v, want %v", got, want)
}
}
func TestRule(t *testing.T) {
// If we are not on CI skip the test.
if os.Getenv("CI") == "" {
t.Skip("Not on CI, skipping comby-dependent test")
}
input := map[string]string{
"file.go": "func foo(success) {} func bar(fail) {}",
}
zipData, err := testutil.CreateZip(input)
if err != nil {
t.Fatal(err)
}
zf, cleanup, err := testutil.TempZipFileOnDisk(zipData)
if err != nil {
t.Fatal(err)
}
defer cleanup()
p := &protocol.PatternInfo{
Pattern: "func :[[fn]](:[args])",
IncludePatterns: []string{".go"},
CombyRule: `where :[args] == "success"`,
}
ctx, cancel, sender := newLimitedStreamCollector(context.Background(), 1000000000)
defer cancel()
err = structuralSearch(ctx, zf, Subset(p.IncludePatterns), "", p.Pattern, p.CombyRule, p.Languages, "repo", sender)
if err != nil {
t.Fatal(err)
}
got := sender.collected
want := []protocol.FileMatch{
{
Path: "file.go",
LimitHit: false,
LineMatches: []protocol.LineMatch{
{
LineNumber: 0,
OffsetAndLengths: [][2]int{{0, 17}},
Preview: "func foo(success)",
},
},
MatchCount: 1,
},
}
if !reflect.DeepEqual(got, want) {
t.Fatalf("got file matches %v, want %v", got, want)
}
}
func TestStructuralLimits(t *testing.T) {
// If we are not on CI skip the test.
if os.Getenv("CI") == "" {
t.Skip("Not on CI, skipping comby-dependent test")
}
input := map[string]string{
"test1.go": `
func foo() {
fmt.Println("foo")
}
func bar() {
fmt.Println("bar")
}
`,
"test2.go": `
func foo() {
fmt.Println("foo")
}
func bar() {
fmt.Println("bar")
}
`,
}
zipData, err := testutil.CreateZip(input)
require.NoError(t, err)
zf, cleanup, err := testutil.TempZipFileOnDisk(zipData)
require.NoError(t, err)
defer cleanup()
count := func(matches []protocol.FileMatch) int {
c := 0
for _, match := range matches {
c += match.MatchCount
}
return c
}
test := func(limit, wantCount int, p *protocol.PatternInfo) func(t *testing.T) {
return func(t *testing.T) {
ctx, cancel, sender := newLimitedStreamCollector(context.Background(), limit)
defer cancel()
err := structuralSearch(ctx, zf, Subset(p.IncludePatterns), "", p.Pattern, p.CombyRule, p.Languages, "repo_foo", sender)
require.NoError(t, err)
require.Equal(t, wantCount, count(sender.collected))
}
}
t.Run("unlimited", test(10000, 4, &protocol.PatternInfo{Pattern: "{:[body]}"}))
t.Run("exact limit", test(4, 4, &protocol.PatternInfo{Pattern: "{:[body]}"}))
t.Run("limited", test(2, 2, &protocol.PatternInfo{Pattern: "{:[body]}"}))
t.Run("many", test(12, 8, &protocol.PatternInfo{Pattern: "(:[_])"}))
}
func TestHighlightMultipleLines(t *testing.T) {
cases := []struct {
Name string
Match *comby.Match
Want []protocol.LineMatch
}{
{
Name: "Single line",
Match: &comby.Match{
Range: comby.Range{
Start: comby.Location{
Line: 1,
Column: 1,
},
End: comby.Location{
Line: 1,
Column: 2,
},
},
Matched: "this is a single line match",
},
Want: []protocol.LineMatch{
{
LineNumber: 0,
OffsetAndLengths: [][2]int{
{
0,
1,
},
},
Preview: "this is a single line match",
},
},
},
{
Name: "Three lines",
Match: &comby.Match{
Range: comby.Range{
Start: comby.Location{
Line: 1,
Column: 1,
},
End: comby.Location{
Line: 3,
Column: 5,
},
},
Matched: "this is a match across\nthree\nlines",
},
Want: []protocol.LineMatch{
{
LineNumber: 0,
OffsetAndLengths: [][2]int{
{
0,
22,
},
},
Preview: "this is a match across",
},
{
LineNumber: 1,
OffsetAndLengths: [][2]int{
{
0,
5,
},
},
Preview: "three",
},
{
LineNumber: 2,
OffsetAndLengths: [][2]int{
{
0,
4, // don't include trailing newline
},
},
Preview: "lines",
},
},
},
}
for _, tt := range cases {
t.Run(tt.Name, func(t *testing.T) {
got := highlightMultipleLines(tt.Match)
if !reflect.DeepEqual(got, tt.Want) {
jsonGot, _ := json.Marshal(got)
jsonWant, _ := json.Marshal(tt.Want)
t.Errorf("got: %s, want: %s", jsonGot, jsonWant)
}
})
}
}
func TestMatchCountForMultilineMatches(t *testing.T) {
// If we are not on CI skip the test.
if os.Getenv("CI") == "" {
t.Skip("Not on CI, skipping comby-dependent test")
}
input := map[string]string{
"main.go": `
func foo() {
fmt.Println("foo")
}
func bar() {
fmt.Println("bar")
}
`,
}
wantMatchCount := 2
p := &protocol.PatternInfo{Pattern: "{:[body]}"}
zipData, err := testutil.CreateZip(input)
if err != nil {
t.Fatal(err)
}
zf, cleanup, err := testutil.TempZipFileOnDisk(zipData)
if err != nil {
t.Fatal(err)
}
defer cleanup()
t.Run("Strutural search match count", func(t *testing.T) {
ctx, cancel, sender := newLimitedStreamCollector(context.Background(), 1000000000)
defer cancel()
err := structuralSearch(ctx, zf, Subset(p.IncludePatterns), "", p.Pattern, p.CombyRule, p.Languages, "repo_foo", sender)
if err != nil {
t.Fatal(err)
}
matches := sender.collected
var gotMatchCount int
for _, fileMatches := range matches {
gotMatchCount += fileMatches.MatchCount
}
if gotMatchCount != wantMatchCount {
t.Fatalf("got match count %d, want %d", gotMatchCount, wantMatchCount)
}
})
}
func TestBuildQuery(t *testing.T) {
pattern := ":[x~*]"
want := "error parsing regexp: missing argument to repetition operator: `*`"
t.Run("build query", func(t *testing.T) {
_, err := buildQuery(&search.TextPatternInfo{Pattern: pattern}, nil, nil, false)
if diff := cmp.Diff(err.Error(), want); diff != "" {
t.Error(diff)
}
})
}
|
[
"\"CI\"",
"\"CI\"",
"\"CI\"",
"\"CI\"",
"\"CI\"",
"\"CI\"",
"\"CI\""
] |
[] |
[
"CI"
] |
[]
|
["CI"]
|
go
| 1 | 0 | |
cf-go/app.go
|
package main
import (
appd "appdynamics"
"fmt"
"math/rand"
"net/http"
"os"
"strconv"
"time"
)
func helloWorld(w http.ResponseWriter, r *http.Request) {
bt := appd.StartBT("/", "")
time.Sleep(time.Duration(rand.Intn(5)) * time.Millisecond)
fmt.Fprintf(w, "Hello World")
appd.EndBT(bt)
}
func main() {
cfg := appd.Config{}
cfg.AppName = os.Getenv("APPD_APPLICATION_NAME")
cfg.TierName = os.Getenv("APPD_TIER_NAME")
cfg.NodeName = os.Getenv("APPD_NODE_NAME") + ":" + os.Getenv("CF_INSTANCE_INDEX")
port, err := strconv.ParseInt(os.Getenv("APPD_CONTROLLER_PORT"), 10, 16)
if err != nil {
port = 8080
}
cfg.Controller.Host = os.Getenv("APPD_CONTROLLER_HOST")
cfg.Controller.Port = uint16(port)
cfg.Controller.Account = os.Getenv("APPD_ACCOUNT_NAME")
cfg.Controller.AccessKey = os.Getenv("APPD_ACCOUNT_ACCESS_KEY")
cfg.InitTimeoutMs = 1000
err = appd.InitSDK(&cfg)
if err != nil {
fmt.Println(err)
}
http.HandleFunc("/", helloWorld)
http.ListenAndServe(":8080", nil)
}
|
[
"\"APPD_APPLICATION_NAME\"",
"\"APPD_TIER_NAME\"",
"\"APPD_NODE_NAME\"",
"\"CF_INSTANCE_INDEX\"",
"\"APPD_CONTROLLER_PORT\"",
"\"APPD_CONTROLLER_HOST\"",
"\"APPD_ACCOUNT_NAME\"",
"\"APPD_ACCOUNT_ACCESS_KEY\""
] |
[] |
[
"APPD_TIER_NAME",
"CF_INSTANCE_INDEX",
"APPD_CONTROLLER_HOST",
"APPD_ACCOUNT_NAME",
"APPD_ACCOUNT_ACCESS_KEY",
"APPD_CONTROLLER_PORT",
"APPD_APPLICATION_NAME",
"APPD_NODE_NAME"
] |
[]
|
["APPD_TIER_NAME", "CF_INSTANCE_INDEX", "APPD_CONTROLLER_HOST", "APPD_ACCOUNT_NAME", "APPD_ACCOUNT_ACCESS_KEY", "APPD_CONTROLLER_PORT", "APPD_APPLICATION_NAME", "APPD_NODE_NAME"]
|
go
| 8 | 0 | |
Lib/site-packages/debugpy/_vendored/pydevd/_pydev_runfiles/pydev_runfiles_pytest2.py
|
from _pydev_runfiles import pydev_runfiles_xml_rpc
import pickle
import zlib
import base64
import os
from pydevd_file_utils import canonical_normalized_path
import pytest
import sys
import time
from pathlib import Path
#=========================================================================
# Load filters with tests we should skip
#=========================================================================
py_test_accept_filter = None
def _load_filters():
global py_test_accept_filter
if py_test_accept_filter is None:
py_test_accept_filter = os.environ.get('PYDEV_PYTEST_SKIP')
if py_test_accept_filter:
py_test_accept_filter = pickle.loads(
zlib.decompress(base64.b64decode(py_test_accept_filter)))
# Newer versions of pytest resolve symlinks, so, we
# may need to filter with a resolved path too.
new_dct = {}
for filename, value in py_test_accept_filter.items():
new_dct[canonical_normalized_path(str(Path(filename).resolve()))] = value
py_test_accept_filter.update(new_dct)
else:
py_test_accept_filter = {}
def is_in_xdist_node():
main_pid = os.environ.get('PYDEV_MAIN_PID')
if main_pid and main_pid != str(os.getpid()):
return True
return False
connected = False
def connect_to_server_for_communication_to_xml_rpc_on_xdist():
global connected
if connected:
return
connected = True
if is_in_xdist_node():
port = os.environ.get('PYDEV_PYTEST_SERVER')
if not port:
sys.stderr.write(
'Error: no PYDEV_PYTEST_SERVER environment variable defined.\n')
else:
pydev_runfiles_xml_rpc.initialize_server(int(port), daemon=True)
PY2 = sys.version_info[0] <= 2
PY3 = not PY2
class State:
start_time = time.time()
buf_err = None
buf_out = None
def start_redirect():
if State.buf_out is not None:
return
from _pydevd_bundle import pydevd_io
State.buf_err = pydevd_io.start_redirect(keep_original_redirection=True, std='stderr')
State.buf_out = pydevd_io.start_redirect(keep_original_redirection=True, std='stdout')
def get_curr_output():
buf_out = State.buf_out
buf_err = State.buf_err
return buf_out.getvalue() if buf_out is not None else '', buf_err.getvalue() if buf_err is not None else ''
def pytest_unconfigure():
if is_in_xdist_node():
return
# Only report that it finished when on the main node (we don't want to report
# the finish on each separate node).
pydev_runfiles_xml_rpc.notifyTestRunFinished(
'Finished in: %.2f secs.' % (time.time() - State.start_time,))
def pytest_collection_modifyitems(session, config, items):
# A note: in xdist, this is not called on the main process, only in the
# secondary nodes, so, we'll actually make the filter and report it multiple
# times.
connect_to_server_for_communication_to_xml_rpc_on_xdist()
_load_filters()
if not py_test_accept_filter:
pydev_runfiles_xml_rpc.notifyTestsCollected(len(items))
return # Keep on going (nothing to filter)
new_items = []
for item in items:
f = canonical_normalized_path(str(item.parent.fspath))
name = item.name
if f not in py_test_accept_filter:
# print('Skip file: %s' % (f,))
continue # Skip the file
i = name.find('[')
name_without_parametrize = None
if i > 0:
name_without_parametrize = name[:i]
accept_tests = py_test_accept_filter[f]
if item.cls is not None:
class_name = item.cls.__name__
else:
class_name = None
for test in accept_tests:
if test == name:
# Direct match of the test (just go on with the default
# loading)
new_items.append(item)
break
if name_without_parametrize is not None and test == name_without_parametrize:
# This happens when parameterizing pytest tests on older versions
# of pytest where the test name doesn't include the fixture name
# in it.
new_items.append(item)
break
if class_name is not None:
if test == class_name + '.' + name:
new_items.append(item)
break
if name_without_parametrize is not None and test == class_name + '.' + name_without_parametrize:
new_items.append(item)
break
if class_name == test:
new_items.append(item)
break
else:
pass
# print('Skip test: %s.%s. Accept: %s' % (class_name, name, accept_tests))
# Modify the original list
items[:] = new_items
pydev_runfiles_xml_rpc.notifyTestsCollected(len(items))
try:
"""
pytest > 5.4 uses own version of TerminalWriter based on py.io.TerminalWriter
and assumes there is a specific method TerminalWriter._write_source
so try load pytest version first or fallback to default one
"""
from _pytest._io import TerminalWriter
except ImportError:
from py.io import TerminalWriter
def _get_error_contents_from_report(report):
if report.longrepr is not None:
try:
tw = TerminalWriter(stringio=True)
stringio = tw.stringio
except TypeError:
import io
stringio = io.StringIO()
tw = TerminalWriter(file=stringio)
tw.hasmarkup = False
report.toterminal(tw)
exc = stringio.getvalue()
s = exc.strip()
if s:
return s
return ''
def pytest_collectreport(report):
error_contents = _get_error_contents_from_report(report)
if error_contents:
report_test('fail', '<collect errors>', '<collect errors>', '', error_contents, 0.0)
def append_strings(s1, s2):
if s1.__class__ == s2.__class__:
return s1 + s2
# Prefer str
if isinstance(s1, bytes):
s1 = s1.decode('utf-8', 'replace')
if isinstance(s2, bytes):
s2 = s2.decode('utf-8', 'replace')
return s1 + s2
def pytest_runtest_logreport(report):
if is_in_xdist_node():
# When running with xdist, we don't want the report to be called from the node, only
# from the main process.
return
report_duration = report.duration
report_when = report.when
report_outcome = report.outcome
if hasattr(report, 'wasxfail'):
if report_outcome != 'skipped':
report_outcome = 'passed'
if report_outcome == 'passed':
# passed on setup/teardown: no need to report if in setup or teardown
# (only on the actual test if it passed).
if report_when in ('setup', 'teardown'):
return
status = 'ok'
elif report_outcome == 'skipped':
status = 'skip'
else:
# It has only passed, skipped and failed (no error), so, let's consider
# error if not on call.
if report_when in ('setup', 'teardown'):
status = 'error'
else:
# any error in the call (not in setup or teardown) is considered a
# regular failure.
status = 'fail'
# This will work if pytest is not capturing it, if it is, nothing will
# come from here...
captured_output, error_contents = getattr(report, 'pydev_captured_output', ''), getattr(report, 'pydev_error_contents', '')
for type_section, value in report.sections:
if value:
if type_section in ('err', 'stderr', 'Captured stderr call'):
error_contents = append_strings(error_contents, value)
else:
captured_output = append_strings(error_contents, value)
filename = getattr(report, 'pydev_fspath_strpath', '<unable to get>')
test = report.location[2]
if report_outcome != 'skipped':
# On skipped, we'll have a traceback for the skip, which is not what we
# want.
exc = _get_error_contents_from_report(report)
if exc:
if error_contents:
error_contents = append_strings(error_contents, '----------------------------- Exceptions -----------------------------\n')
error_contents = append_strings(error_contents, exc)
report_test(status, filename, test, captured_output, error_contents, report_duration)
def report_test(status, filename, test, captured_output, error_contents, duration):
'''
@param filename: 'D:\\src\\mod1\\hello.py'
@param test: 'TestCase.testMet1'
@param status: fail, error, ok
'''
time_str = '%.2f' % (duration,)
pydev_runfiles_xml_rpc.notifyTest(
status, captured_output, error_contents, filename, test, time_str)
if not hasattr(pytest, 'hookimpl'):
raise AssertionError('Please upgrade pytest (the current version of pytest: %s is unsupported)' % (pytest.__version__,))
@pytest.hookimpl(hookwrapper=True)
def pytest_runtest_makereport(item, call):
outcome = yield
report = outcome.get_result()
report.pydev_fspath_strpath = item.fspath.strpath
report.pydev_captured_output, report.pydev_error_contents = get_curr_output()
@pytest.mark.tryfirst
def pytest_runtest_setup(item):
'''
Note: with xdist will be on a secondary process.
'''
# We have our own redirection: if xdist does its redirection, we'll have
# nothing in our contents (which is OK), but if it does, we'll get nothing
# from pytest but will get our own here.
start_redirect()
filename = item.fspath.strpath
test = item.location[2]
pydev_runfiles_xml_rpc.notifyStartTest(filename, test)
|
[] |
[] |
[
"PYDEV_PYTEST_SERVER",
"PYDEV_MAIN_PID",
"PYDEV_PYTEST_SKIP"
] |
[]
|
["PYDEV_PYTEST_SERVER", "PYDEV_MAIN_PID", "PYDEV_PYTEST_SKIP"]
|
python
| 3 | 0 | |
plan9/client/fid.go
|
package client
import (
"io"
"os"
"strings"
"sync"
"9fans.net/go/plan9"
)
func getuser() string { return os.Getenv("USER") }
type Fid struct {
c *Conn
qid plan9.Qid
fid uint32
mode uint8
offset int64
f sync.Mutex
}
func (fid *Fid) Close() error {
if fid == nil {
return nil
}
tx := &plan9.Fcall{Type: plan9.Tclunk, Fid: fid.fid}
_, err := fid.c.rpc(tx)
fid.c.putfid(fid)
return err
}
func (fid *Fid) Create(name string, mode uint8, perm plan9.Perm) error {
tx := &plan9.Fcall{Type: plan9.Tcreate, Fid: fid.fid, Name: name, Mode: mode, Perm: perm}
rx, err := fid.c.rpc(tx)
if err != nil {
return err
}
fid.mode = mode
fid.qid = rx.Qid
return nil
}
func (fid *Fid) Dirread() ([]*plan9.Dir, error) {
buf := make([]byte, plan9.STATMAX)
n, err := fid.Read(buf)
if err != nil {
return nil, err
}
return dirUnpack(buf[0:n])
}
func (fid *Fid) Dirreadall() ([]*plan9.Dir, error) {
// Note: Cannot use ioutil.ReadAll / io.ReadAll here
// because it assumes it can read small amounts.
// Plan 9 requires providing a buffer big enough for
// at least a single directory entry.
var dirs []*plan9.Dir
for {
d, err := fid.Dirread()
dirs = append(dirs, d...)
if err != nil {
if err == io.EOF {
err = nil
}
return dirs, err
}
}
}
func dirUnpack(b []byte) ([]*plan9.Dir, error) {
var err error
dirs := make([]*plan9.Dir, 0, 10)
for len(b) > 0 {
if len(b) < 2 {
err = io.ErrUnexpectedEOF
break
}
n := int(b[0]) | int(b[1])<<8
if len(b) < n+2 {
err = io.ErrUnexpectedEOF
break
}
var d *plan9.Dir
d, err = plan9.UnmarshalDir(b[0 : n+2])
if err != nil {
break
}
b = b[n+2:]
if len(dirs) >= cap(dirs) {
ndirs := make([]*plan9.Dir, len(dirs), 2*cap(dirs))
copy(ndirs, dirs)
dirs = ndirs
}
n = len(dirs)
dirs = dirs[0 : n+1]
dirs[n] = d
}
return dirs, err
}
func (fid *Fid) Open(mode uint8) error {
tx := &plan9.Fcall{Type: plan9.Topen, Fid: fid.fid, Mode: mode}
_, err := fid.c.rpc(tx)
if err != nil {
return err
}
fid.mode = mode
return nil
}
func (fid *Fid) Qid() plan9.Qid {
return fid.qid
}
func (fid *Fid) Read(b []byte) (n int, err error) {
return fid.readAt(b, -1)
}
func (fid *Fid) ReadAt(b []byte, offset int64) (n int, err error) {
for len(b) > 0 {
m, err := fid.readAt(b, offset)
if err != nil {
return n, err
}
n += m
b = b[m:]
if offset != -1 {
offset += int64(m)
}
}
return n, nil
}
func (fid *Fid) readAt(b []byte, offset int64) (n int, err error) {
msize := fid.c.msize - plan9.IOHDRSZ
n = len(b)
if uint32(n) > msize {
n = int(msize)
}
o := offset
if o == -1 {
fid.f.Lock()
o = fid.offset
fid.f.Unlock()
}
tx := &plan9.Fcall{Type: plan9.Tread, Fid: fid.fid, Offset: uint64(o), Count: uint32(n)}
rx, err := fid.c.rpc(tx)
if err != nil {
return 0, err
}
if len(rx.Data) == 0 {
return 0, io.EOF
}
copy(b, rx.Data)
if offset == -1 {
fid.f.Lock()
fid.offset += int64(len(rx.Data))
fid.f.Unlock()
}
return len(rx.Data), nil
}
func (fid *Fid) ReadFull(b []byte) (n int, err error) {
return io.ReadFull(fid, b)
}
func (fid *Fid) Remove() error {
tx := &plan9.Fcall{Type: plan9.Tremove, Fid: fid.fid}
_, err := fid.c.rpc(tx)
fid.c.putfid(fid)
return err
}
func (fid *Fid) Seek(n int64, whence int) (int64, error) {
switch whence {
case 0:
fid.f.Lock()
fid.offset = n
fid.f.Unlock()
case 1:
fid.f.Lock()
n += fid.offset
if n < 0 {
fid.f.Unlock()
return 0, Error("negative offset")
}
fid.offset = n
fid.f.Unlock()
case 2:
d, err := fid.Stat()
if err != nil {
return 0, err
}
n += int64(d.Length)
if n < 0 {
return 0, Error("negative offset")
}
fid.f.Lock()
fid.offset = n
fid.f.Unlock()
default:
return 0, Error("bad whence in seek")
}
return n, nil
}
func (fid *Fid) Stat() (*plan9.Dir, error) {
tx := &plan9.Fcall{Type: plan9.Tstat, Fid: fid.fid}
rx, err := fid.c.rpc(tx)
if err != nil {
return nil, err
}
return plan9.UnmarshalDir(rx.Stat)
}
// TODO(rsc): Could use ...string instead?
func (fid *Fid) Walk(name string) (*Fid, error) {
wfid, err := fid.c.newfid()
if err != nil {
return nil, err
}
// Split, delete empty strings and dot.
elem := strings.Split(name, "/")
j := 0
for _, e := range elem {
if e != "" && e != "." {
elem[j] = e
j++
}
}
elem = elem[0:j]
for nwalk := 0; ; nwalk++ {
n := len(elem)
if n > plan9.MAXWELEM {
n = plan9.MAXWELEM
}
tx := &plan9.Fcall{Type: plan9.Twalk, Newfid: wfid.fid, Wname: elem[0:n]}
if nwalk == 0 {
tx.Fid = fid.fid
} else {
tx.Fid = wfid.fid
}
rx, err := fid.c.rpc(tx)
if err == nil && len(rx.Wqid) != n {
err = Error("file '" + name + "' not found")
}
if err != nil {
if nwalk > 0 {
wfid.Close()
} else {
fid.c.putfid(wfid)
}
return nil, err
}
if n == 0 {
wfid.qid = fid.qid
} else {
wfid.qid = rx.Wqid[n-1]
}
elem = elem[n:]
if len(elem) == 0 {
break
}
}
return wfid, nil
}
func (fid *Fid) Write(b []byte) (n int, err error) {
return fid.WriteAt(b, -1)
}
func (fid *Fid) WriteAt(b []byte, offset int64) (n int, err error) {
msize := fid.c.msize - plan9.IOHDRSIZE
tot := 0
n = len(b)
first := true
for tot < n || first {
want := n - tot
if uint32(want) > msize {
want = int(msize)
}
got, err := fid.writeAt(b[tot:tot+want], offset)
tot += got
if err != nil {
return tot, err
}
if offset != -1 {
offset += int64(got)
}
first = false
}
return tot, nil
}
func (fid *Fid) writeAt(b []byte, offset int64) (n int, err error) {
o := offset
if o == -1 {
fid.f.Lock()
o = fid.offset
fid.f.Unlock()
}
tx := &plan9.Fcall{Type: plan9.Twrite, Fid: fid.fid, Offset: uint64(o), Data: b}
rx, err := fid.c.rpc(tx)
if err != nil {
return 0, err
}
if offset == -1 && rx.Count > 0 {
fid.f.Lock()
fid.offset += int64(rx.Count)
fid.f.Unlock()
}
return int(rx.Count), nil
}
func (fid *Fid) Wstat(d *plan9.Dir) error {
b, err := d.Bytes()
if err != nil {
return err
}
tx := &plan9.Fcall{Type: plan9.Twstat, Fid: fid.fid, Stat: b}
_, err = fid.c.rpc(tx)
return err
}
|
[
"\"USER\""
] |
[] |
[
"USER"
] |
[]
|
["USER"]
|
go
| 1 | 0 | |
runsc/test/testutil/testutil.go
|
// Copyright 2018 The gVisor Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package testutil contains utility functions for runsc tests.
package testutil
import (
"bufio"
"context"
"encoding/base32"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"log"
"math/rand"
"net/http"
"os"
"os/exec"
"os/signal"
"path/filepath"
"strings"
"sync"
"sync/atomic"
"syscall"
"time"
"github.com/cenkalti/backoff"
specs "github.com/opencontainers/runtime-spec/specs-go"
"gvisor.dev/gvisor/runsc/boot"
"gvisor.dev/gvisor/runsc/specutils"
)
func init() {
rand.Seed(time.Now().UnixNano())
}
// RaceEnabled is set to true if it was built with '--race' option.
var RaceEnabled = false
// TmpDir returns the absolute path to a writable directory that can be used as
// scratch by the test.
func TmpDir() string {
dir := os.Getenv("TEST_TMPDIR")
if dir == "" {
dir = "/tmp"
}
return dir
}
// ConfigureExePath configures the executable for runsc in the test environment.
func ConfigureExePath() error {
path, err := FindFile("runsc/runsc")
if err != nil {
return err
}
specutils.ExePath = path
return nil
}
// FindFile searchs for a file inside the test run environment. It returns the
// full path to the file. It fails if none or more than one file is found.
func FindFile(path string) (string, error) {
wd, err := os.Getwd()
if err != nil {
return "", err
}
// The test root is demarcated by a path element called "__main__". Search for
// it backwards from the working directory.
root := wd
for {
dir, name := filepath.Split(root)
if name == "__main__" {
break
}
if len(dir) == 0 {
return "", fmt.Errorf("directory __main__ not found in %q", wd)
}
// Remove ending slash to loop around.
root = dir[:len(dir)-1]
}
// Annoyingly, bazel adds the build type to the directory path for go
// binaries, but not for c++ binaries. We use two different patterns to
// to find our file.
patterns := []string{
// Try the obvious path first.
filepath.Join(root, path),
// If it was a go binary, use a wildcard to match the build
// type. The pattern is: /test-path/__main__/directories/*/file.
filepath.Join(root, filepath.Dir(path), "*", filepath.Base(path)),
}
for _, p := range patterns {
matches, err := filepath.Glob(p)
if err != nil {
// "The only possible returned error is ErrBadPattern,
// when pattern is malformed." -godoc
return "", fmt.Errorf("error globbing %q: %v", p, err)
}
switch len(matches) {
case 0:
// Try the next pattern.
case 1:
// We found it.
return matches[0], nil
default:
return "", fmt.Errorf("more than one match found for %q: %s", path, matches)
}
}
return "", fmt.Errorf("file %q not found", path)
}
// TestConfig returns the default configuration to use in tests. Note that
// 'RootDir' must be set by caller if required.
func TestConfig() *boot.Config {
return &boot.Config{
Debug: true,
LogFormat: "text",
DebugLogFormat: "text",
AlsoLogToStderr: true,
LogPackets: true,
Network: boot.NetworkNone,
Strace: true,
Platform: "ptrace",
FileAccess: boot.FileAccessExclusive,
TestOnlyAllowRunAsCurrentUserWithoutChroot: true,
NumNetworkChannels: 1,
}
}
// TestConfigWithRoot returns the default configuration to use in tests.
func TestConfigWithRoot(rootDir string) *boot.Config {
conf := TestConfig()
conf.RootDir = rootDir
return conf
}
// NewSpecWithArgs creates a simple spec with the given args suitable for use
// in tests.
func NewSpecWithArgs(args ...string) *specs.Spec {
return &specs.Spec{
// The host filesystem root is the container root.
Root: &specs.Root{
Path: "/",
Readonly: true,
},
Process: &specs.Process{
Args: args,
Env: []string{
"PATH=" + os.Getenv("PATH"),
},
Capabilities: specutils.AllCapabilities(),
},
Mounts: []specs.Mount{
// Root is readonly, but many tests want to write to tmpdir.
// This creates a writable mount inside the root. Also, when tmpdir points
// to "/tmp", it makes the the actual /tmp to be mounted and not a tmpfs
// inside the sentry.
{
Type: "bind",
Destination: TmpDir(),
Source: TmpDir(),
},
},
Hostname: "runsc-test-hostname",
}
}
// SetupRootDir creates a root directory for containers.
func SetupRootDir() (string, error) {
rootDir, err := ioutil.TempDir(TmpDir(), "containers")
if err != nil {
return "", fmt.Errorf("error creating root dir: %v", err)
}
return rootDir, nil
}
// SetupContainer creates a bundle and root dir for the container, generates a
// test config, and writes the spec to config.json in the bundle dir.
func SetupContainer(spec *specs.Spec, conf *boot.Config) (rootDir, bundleDir string, err error) {
// Setup root dir if one hasn't been provided.
if len(conf.RootDir) == 0 {
rootDir, err = SetupRootDir()
if err != nil {
return "", "", err
}
conf.RootDir = rootDir
}
bundleDir, err = SetupBundleDir(spec)
return rootDir, bundleDir, err
}
// SetupBundleDir creates a bundle dir and writes the spec to config.json.
func SetupBundleDir(spec *specs.Spec) (bundleDir string, err error) {
bundleDir, err = ioutil.TempDir(TmpDir(), "bundle")
if err != nil {
return "", fmt.Errorf("error creating bundle dir: %v", err)
}
if err = writeSpec(bundleDir, spec); err != nil {
return "", fmt.Errorf("error writing spec: %v", err)
}
return bundleDir, nil
}
// writeSpec writes the spec to disk in the given directory.
func writeSpec(dir string, spec *specs.Spec) error {
b, err := json.Marshal(spec)
if err != nil {
return err
}
return ioutil.WriteFile(filepath.Join(dir, "config.json"), b, 0755)
}
// UniqueContainerID generates a unique container id for each test.
//
// The container id is used to create an abstract unix domain socket, which must
// be unique. While the container forbids creating two containers with the same
// name, sometimes between test runs the socket does not get cleaned up quickly
// enough, causing container creation to fail.
func UniqueContainerID() string {
// Read 20 random bytes.
b := make([]byte, 20)
// "[Read] always returns len(p) and a nil error." --godoc
if _, err := rand.Read(b); err != nil {
panic("rand.Read failed: " + err.Error())
}
// base32 encode the random bytes, so that the name is a valid
// container id and can be used as a socket name in the filesystem.
return fmt.Sprintf("test-container-%s", base32.StdEncoding.EncodeToString(b))
}
// Copy copies file from src to dst.
func Copy(src, dst string) error {
in, err := os.Open(src)
if err != nil {
return err
}
defer in.Close()
out, err := os.Create(dst)
if err != nil {
return err
}
defer out.Close()
_, err = io.Copy(out, in)
return err
}
// Poll is a shorthand function to poll for something with given timeout.
func Poll(cb func() error, timeout time.Duration) error {
ctx, cancel := context.WithTimeout(context.Background(), timeout)
defer cancel()
b := backoff.WithContext(backoff.NewConstantBackOff(100*time.Millisecond), ctx)
return backoff.Retry(cb, b)
}
// WaitForHTTP tries GET requests on a port until the call succeeds or timeout.
func WaitForHTTP(port int, timeout time.Duration) error {
cb := func() error {
c := &http.Client{
// Calculate timeout to be able to do minimum 5 attempts.
Timeout: timeout / 5,
}
url := fmt.Sprintf("http://localhost:%d/", port)
resp, err := c.Get(url)
if err != nil {
log.Printf("Waiting %s: %v", url, err)
return err
}
resp.Body.Close()
return nil
}
return Poll(cb, timeout)
}
// Reaper reaps child processes.
type Reaper struct {
// mu protects ch, which will be nil if the reaper is not running.
mu sync.Mutex
ch chan os.Signal
}
// Start starts reaping child processes.
func (r *Reaper) Start() {
r.mu.Lock()
defer r.mu.Unlock()
if r.ch != nil {
panic("reaper.Start called on a running reaper")
}
r.ch = make(chan os.Signal, 1)
signal.Notify(r.ch, syscall.SIGCHLD)
go func() {
for {
r.mu.Lock()
ch := r.ch
r.mu.Unlock()
if ch == nil {
return
}
_, ok := <-ch
if !ok {
// Channel closed.
return
}
for {
cpid, _ := syscall.Wait4(-1, nil, syscall.WNOHANG, nil)
if cpid < 1 {
break
}
}
}
}()
}
// Stop stops reaping child processes.
func (r *Reaper) Stop() {
r.mu.Lock()
defer r.mu.Unlock()
if r.ch == nil {
panic("reaper.Stop called on a stopped reaper")
}
signal.Stop(r.ch)
close(r.ch)
r.ch = nil
}
// StartReaper is a helper that starts a new Reaper and returns a function to
// stop it.
func StartReaper() func() {
r := &Reaper{}
r.Start()
return r.Stop
}
// WaitUntilRead reads from the given reader until the wanted string is found
// or until timeout.
func WaitUntilRead(r io.Reader, want string, split bufio.SplitFunc, timeout time.Duration) error {
sc := bufio.NewScanner(r)
if split != nil {
sc.Split(split)
}
// done must be accessed atomically. A value greater than 0 indicates
// that the read loop can exit.
var done uint32
doneCh := make(chan struct{})
go func() {
for sc.Scan() {
t := sc.Text()
if strings.Contains(t, want) {
atomic.StoreUint32(&done, 1)
close(doneCh)
break
}
if atomic.LoadUint32(&done) > 0 {
break
}
}
}()
select {
case <-time.After(timeout):
atomic.StoreUint32(&done, 1)
return fmt.Errorf("timeout waiting to read %q", want)
case <-doneCh:
return nil
}
}
// KillCommand kills the process running cmd unless it hasn't been started. It
// returns an error if it cannot kill the process unless the reason is that the
// process has already exited.
func KillCommand(cmd *exec.Cmd) error {
if cmd.Process == nil {
return nil
}
if err := cmd.Process.Kill(); err != nil {
if !strings.Contains(err.Error(), "process already finished") {
return fmt.Errorf("failed to kill process %v: %v", cmd, err)
}
}
return nil
}
// WriteTmpFile writes text to a temporary file, closes the file, and returns
// the name of the file.
func WriteTmpFile(pattern, text string) (string, error) {
file, err := ioutil.TempFile(TmpDir(), pattern)
if err != nil {
return "", err
}
defer file.Close()
if _, err := file.Write([]byte(text)); err != nil {
return "", err
}
return file.Name(), nil
}
// RandomName create a name with a 6 digit random number appended to it.
func RandomName(prefix string) string {
return fmt.Sprintf("%s-%06d", prefix, rand.Int31n(1000000))
}
|
[
"\"TEST_TMPDIR\"",
"\"PATH\""
] |
[] |
[
"TEST_TMPDIR",
"PATH"
] |
[]
|
["TEST_TMPDIR", "PATH"]
|
go
| 2 | 0 | |
kivy/input/providers/probesysfs.py
|
'''
Auto Create Input Provider Config Entry for Available MT Hardware (linux only).
===============================================================================
Thanks to Marc Tardif for the probing code, taken from scan-for-mt-device.
The device discovery is done by this provider. However, the reading of
input can be performed by other providers like: hidinput, mtdev and
linuxwacom. mtdev is used prior to other providers. For more
information about mtdev, check :py:class:`~kivy.input.providers.mtdev`.
Here is an example of auto creation::
[input]
# using mtdev
device_%(name)s = probesysfs,provider=mtdev
# using hidinput
device_%(name)s = probesysfs,provider=hidinput
# using mtdev with a match on name
device_%(name)s = probesysfs,provider=mtdev,match=acer
# using hidinput with custom parameters to hidinput (all on one line)
%(name)s = probesysfs,
provider=hidinput,param=min_pressure=1,param=max_pressure=99
# you can also match your wacom touchscreen
touch = probesysfs,match=E3 Finger,provider=linuxwacom,
select_all=1,param=mode=touch
# and your wacom pen
pen = probesysfs,match=E3 Pen,provider=linuxwacom,
select_all=1,param=mode=pen
By default, ProbeSysfs module will enumerate hardware from the /sys/class/input
device, and configure hardware with ABS_MT_POSITION_X capability. But for
example, the wacom screen doesn't support this capability. You can prevent this
behavior by putting select_all=1 in your config line.
'''
__all__ = ('ProbeSysfsHardwareProbe', )
import os
from os.path import sep
if 'KIVY_DOC' in os.environ:
ProbeSysfsHardwareProbe = None
else:
from re import match, IGNORECASE
from glob import glob
from subprocess import Popen, PIPE
from kivy.logger import Logger
from kivy.input.provider import MotionEventProvider
from kivy.input.providers.mouse import MouseMotionEventProvider
from kivy.input.factory import MotionEventFactory
from kivy.config import _is_rpi
EventLoop = None
# See linux/input.h
ABS_MT_POSITION_X = 0x35
_cache_input = None
_cache_xinput = None
class Input(object):
def __init__(self, path):
query_xinput()
self.path = path
@property
def device(self):
base = os.path.basename(self.path)
return os.path.join("/dev", "input", base)
@property
def name(self):
path = os.path.join(self.path, "device", "name")
return read_line(path)
def get_capabilities(self):
path = os.path.join(self.path, "device", "capabilities", "abs")
line = read_line(path)
capabilities = []
long_bit = getconf("LONG_BIT")
for i, word in enumerate(line.split(" ")):
word = int(word, 16)
subcapabilities = [bool(word & 1 << i)
for i in range(long_bit)]
capabilities[:0] = subcapabilities
return capabilities
def has_capability(self, capability):
capabilities = self.get_capabilities()
return len(capabilities) > capability and capabilities[capability]
@property
def is_mouse(self):
return self.device in _cache_xinput
def getout(*args):
try:
return Popen(args, stdout=PIPE).communicate()[0]
except OSError:
return ''
def getconf(var):
output = getout("getconf", var)
return int(output)
def query_xinput():
global _cache_xinput
if _cache_xinput is None:
_cache_xinput = []
devids = getout('xinput', '--list', '--id-only')
for did in devids.splitlines():
devprops = getout('xinput', '--list-props', did)
evpath = None
for prop in devprops.splitlines():
prop = prop.strip()
if (prop.startswith(b'Device Enabled') and
prop.endswith(b'0')):
evpath = None
break
if prop.startswith(b'Device Node'):
try:
evpath = prop.split('"')[1]
except Exception:
evpath = None
if evpath:
_cache_xinput.append(evpath)
def get_inputs(path):
global _cache_input
if _cache_input is None:
event_glob = os.path.join(path, "event*")
_cache_input = [Input(x) for x in glob(event_glob)]
return _cache_input
def read_line(path):
f = open(path)
try:
return f.readline().strip()
finally:
f.close()
class ProbeSysfsHardwareProbe(MotionEventProvider):
def __new__(self, device, args):
# hack to not return an instance of this provider.
# :)
instance = super(ProbeSysfsHardwareProbe, self).__new__(self)
instance.__init__(device, args)
def __init__(self, device, args):
super(ProbeSysfsHardwareProbe, self).__init__(device, args)
self.provider = 'mtdev'
self.match = None
self.input_path = '/sys/class/input'
self.select_all = True if _is_rpi else False
self.use_regex = False
self.args = []
args = args.split(',')
for arg in args:
if arg == '':
continue
arg = arg.split('=', 1)
# ensure it's a key = value
if len(arg) != 2:
Logger.error('ProbeSysfs: invalid parameters %s, not'
' key=value format' % arg)
continue
key, value = arg
if key == 'match':
self.match = value
elif key == 'provider':
self.provider = value
elif key == 'use_regex':
self.use_regex = bool(value)
elif key == 'select_all':
self.select_all = bool(value)
elif key == 'param':
self.args.append(value)
else:
Logger.error('ProbeSysfs: unknown %s option' % key)
continue
self.probe()
def should_use_mouse(self):
return not any(p for p in EventLoop.input_providers
if isinstance(p, MouseMotionEventProvider))
def probe(self):
global EventLoop
from kivy.base import EventLoop
inputs = get_inputs(self.input_path)
Logger.debug('ProbeSysfs: using probesysfs!')
use_mouse = self.should_use_mouse()
if not self.select_all:
inputs = [x for x in inputs if
x.has_capability(ABS_MT_POSITION_X)
and (use_mouse or not x.is_mouse)]
for device in inputs:
Logger.debug('ProbeSysfs: found device: %s at %s' % (
device.name, device.device))
# must ignore ?
if self.match:
if self.use_regex:
if not match(self.match, device.name, IGNORECASE):
Logger.debug('ProbeSysfs: device not match the'
' rule in config, ignoring.')
continue
else:
if self.match not in device.name:
continue
Logger.info('ProbeSysfs: device match: %s' % device.device)
d = device.device
devicename = self.device % dict(name=d.split(sep)[-1])
provider = MotionEventFactory.get(self.provider)
if provider is None:
Logger.info('ProbeSysfs: unable to found provider %s' %
self.provider)
Logger.info('ProbeSysfs: fallback on hidinput')
provider = MotionEventFactory.get('hidinput')
if provider is None:
Logger.critical('ProbeSysfs: no input provider found'
' to handle this device !')
continue
instance = provider(devicename, '%s,%s' % (
device.device, ','.join(self.args)))
if instance:
EventLoop.add_input_provider(instance)
MotionEventFactory.register('probesysfs', ProbeSysfsHardwareProbe)
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
sample_program_6_5_2.py
|
# -*- coding: utf-8 -*-
"""
@author: hkaneko
"""
import pandas as pd
from rdkit import Chem
from rdkit.Chem import Descriptors
from rdkit.ML.Descriptors import MoleculeDescriptors
y_name = 'boiling_point'
sdf = Chem.SDMolSupplier('boiling_point.sdf') # sdf ファイルの読み込み
# 計算する記述子名の取得
descriptor_names = []
for descriptor_information in Descriptors.descList:
descriptor_names.append(descriptor_information[0])
print('計算する記述子の数 :', len(descriptor_names))
# 記述子の計算
descriptor_calculator = MoleculeDescriptors.MolecularDescriptorCalculator(descriptor_names)
# 分子ごとに、リスト型の変数 y に物性値を、descriptors に計算された記述子の値を、smiles に SMILES を追加
descriptors, y, smiles = [], [], []
print('分子の数 :', len(sdf))
for index, molecule in enumerate(sdf):
print(index + 1, '/', len(sdf))
y.append(float(molecule.GetProp(y_name)))
descriptors.append(descriptor_calculator.CalcDescriptors(molecule))
smiles.append(Chem.MolToSmiles(molecule))
descriptors = pd.DataFrame(descriptors, index=smiles, columns=descriptor_names)
y = pd.DataFrame(y, index=smiles, columns=[y_name])
# 保存
descriptors_with_y = pd.concat([y, descriptors], axis=1) # y と記述子を結合
descriptors_with_y.to_csv('descriptors_with_y.csv') # csv ファイルに保存。同じ名前のファイルがあるときは上書きされますので注意してください
|
[] |
[] |
[] |
[]
|
[]
|
python
| null | null | null |
pkg/alicloud/alicloud.go
|
/*
Copyright © 2019 Allan Hung <[email protected]>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package alicloud
import (
"fmt"
"net/http"
"os"
"sync"
"time"
"github.com/aliyun/alibaba-cloud-sdk-go/services/ecs"
"github.com/allanhung/ali-ecs-tag-update/pkg/log"
"github.com/denverdino/aliyungo/metadata"
)
type AliCloudConfig struct {
RegionID string `json:"regionId" yaml:"regionId"`
AccessKeyID string `json:"accessKeyId" yaml:"accessKeyId"`
AccessKeySecret string `json:"accessKeySecret" yaml:"accessKeySecret"`
VPCID string `json:"vpcId" yaml:"vpcId"`
RoleName string `json:"-" yaml:"-"` // For ECS RAM role only
StsToken string `json:"-" yaml:"-"`
ExpireTime time.Time `json:"-" yaml:"-"`
}
type AliClient struct {
RegionID string
EcsClient ecs.Client
clientLock sync.RWMutex
nextExpire time.Time
}
func (a *AliCloudConfig) GetCloudConfig() error {
roleName := ""
if os.Getenv("ALICLOUD_REGION") == "" ||
os.Getenv("ALICLOUD_ACCESS_KEY") == "" ||
os.Getenv("ALICLOUD_SECRET_KEY") == "" {
httpClient := &http.Client{
Timeout: 3 * time.Second,
}
// Load config from Metadata Service
m := metadata.NewMetaData(httpClient)
roleName, err := m.RoleName()
if err != nil {
return fmt.Errorf("failed to get role name from Metadata Service: %v", err)
}
vpcID, err := m.VpcID()
if err != nil {
return fmt.Errorf("failed to get VPC ID from Metadata Service: %v", err)
}
regionID, err := m.Region()
if err != nil {
return fmt.Errorf("failed to get Region ID from Metadata Service: %v", err)
}
role, err := m.RamRoleToken(roleName)
if err != nil {
return fmt.Errorf("failed to get STS Token from Metadata Service: %v", err)
}
a.RegionID = regionID
a.RoleName = roleName
a.VPCID = vpcID
a.AccessKeyID = role.AccessKeyId
a.AccessKeySecret = role.AccessKeySecret
a.StsToken = role.SecurityToken
a.ExpireTime = role.Expiration
} else {
a.RegionID = os.Getenv("ALICLOUD_REGION")
a.AccessKeyID = os.Getenv("ALICLOUD_ACCESS_KEY")
a.AccessKeySecret = os.Getenv("ALICLOUD_SECRET_KEY")
a.RoleName = roleName
}
return nil
}
func NewAliClient(cfg *AliCloudConfig) (*AliClient, error) {
var err error
var ecsClient *ecs.Client
if cfg.RoleName == "" {
ecsClient, err = ecs.NewClientWithAccessKey(
cfg.RegionID,
cfg.AccessKeyID,
cfg.AccessKeySecret,
)
} else {
ecsClient, err = ecs.NewClientWithStsToken(
cfg.RegionID,
cfg.AccessKeyID,
cfg.AccessKeySecret,
cfg.StsToken,
)
}
if err != nil {
return nil, fmt.Errorf("failed to create alicloud client: %v", err)
}
aliClient := &AliClient{
RegionID: cfg.RegionID,
EcsClient: *ecsClient,
}
if cfg.RoleName != "" {
aliClient.setNextExpire(cfg.ExpireTime)
go aliClient.refreshStsToken(cfg, 1*time.Second)
}
return aliClient, nil
}
func (p *AliClient) setNextExpire(expireTime time.Time) {
p.clientLock.Lock()
defer p.clientLock.Unlock()
p.nextExpire = expireTime
}
func (p *AliClient) refreshStsToken(cfg *AliCloudConfig, sleepTime time.Duration) {
for {
time.Sleep(sleepTime)
now := time.Now()
utcLocation, err := time.LoadLocation("")
if err != nil {
log.Logger.Errorf("Get utc time error %v", err)
continue
}
nowTime := now.In(utcLocation)
p.clientLock.RLock()
sleepTime = p.nextExpire.Sub(nowTime)
p.clientLock.RUnlock()
log.Logger.Infof("Distance expiration time %v", sleepTime)
if sleepTime < 10*time.Minute {
sleepTime = time.Second * 1
} else {
sleepTime = 9 * time.Minute
log.Logger.Info("Next fetch sts sleep interval : ", sleepTime.String())
continue
}
err = cfg.GetCloudConfig()
if err != nil {
log.Logger.Errorf("Failed to refreshStsToken: %v", err)
continue
}
var ecsClient *ecs.Client
if cfg.RoleName == "" {
ecsClient, err = ecs.NewClientWithAccessKey(
cfg.RegionID,
cfg.AccessKeyID,
cfg.AccessKeySecret,
)
} else {
ecsClient, err = ecs.NewClientWithStsToken(
cfg.RegionID,
cfg.AccessKeyID,
cfg.AccessKeySecret,
cfg.StsToken,
)
}
log.Logger.Infof("Refresh client from sts token, next expire time %v", cfg.ExpireTime)
p.clientLock.Lock()
p.RegionID = cfg.RegionID
p.EcsClient = *ecsClient
p.nextExpire = cfg.ExpireTime
p.clientLock.Unlock()
}
}
|
[
"\"ALICLOUD_REGION\"",
"\"ALICLOUD_ACCESS_KEY\"",
"\"ALICLOUD_SECRET_KEY\"",
"\"ALICLOUD_REGION\"",
"\"ALICLOUD_ACCESS_KEY\"",
"\"ALICLOUD_SECRET_KEY\""
] |
[] |
[
"ALICLOUD_REGION",
"ALICLOUD_ACCESS_KEY",
"ALICLOUD_SECRET_KEY"
] |
[]
|
["ALICLOUD_REGION", "ALICLOUD_ACCESS_KEY", "ALICLOUD_SECRET_KEY"]
|
go
| 3 | 0 | |
IdRecDemo/manage.py
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "IdRecDemo.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
backend/manage.py
|
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'plain_shadow_31593.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
internal/cmd/cmds_test.go
|
// Copyright © 2019 The Homeport Team
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
package cmd_test
import (
"fmt"
"io/ioutil"
"os"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
. "github.com/homeport/dyff/internal/cmd"
"github.com/gonvenience/term"
)
var _ = Describe("command line tool flags", func() {
BeforeEach(func() {
term.FixedTerminalWidth = 250
term.FixedTerminalHeight = 40
})
AfterEach(func() {
term.FixedTerminalWidth = -1
term.FixedTerminalHeight = -1
})
Context("version command", func() {
It("should print the version", func() {
out, err := dyff("version")
Expect(err).ToNot(HaveOccurred())
Expect(out).To(ContainSubstring("version (development)"))
})
})
Context("yaml command", func() {
Context("creating yaml output", func() {
It("should not create YAML output that is not valid", func() {
filename := createTestFile(`{"foo":{"bar":"*"}}`)
defer os.Remove(filename)
out, err := dyff("yaml", filename)
Expect(err).ToNot(HaveOccurred())
Expect(out).To(BeEquivalentTo(`---
foo:
bar: "*"
`))
})
})
Context("using restructure", func() {
Context("to write the file to STDOUT", func() {
It("should write a YAML file to STDOUT using restructure feature", func() {
filename := createTestFile(`---
list:
- aaa: bbb
name: one
`)
defer os.Remove(filename)
out, err := dyff("yaml", "--restructure", filename)
Expect(err).ToNot(HaveOccurred())
Expect(out).To(BeEquivalentTo(`---
list:
- name: one
aaa: bbb
`))
})
It("should write a YAML file with multiple documents to STDOUT using restructure feature", func() {
out, err := dyff("yaml", "--plain", "--restructure", assets("issues", "issue-133", "input.yml"))
Expect(err).ToNot(HaveOccurred())
Expect(out).To(BeEquivalentTo(`---
name: one
bar: foo
foo: bar
---
name: two
Foo: Bar
Bar: Foo
---
name: three
foobar: foobar
`))
})
})
Context("to write the file in-place", func() {
It("should write a YAML file in place using restructure feature", func() {
filename := createTestFile(`---
list:
- aaa: bbb
name: one
`)
defer os.Remove(filename)
out, err := dyff("yaml", "--restructure", "--in-place", filename)
Expect(err).ToNot(HaveOccurred())
Expect(out).To(BeEmpty())
data, err := ioutil.ReadFile(filename)
Expect(err).To(BeNil())
Expect(string(data)).To(BeEquivalentTo(`---
list:
- name: one
aaa: bbb
`))
})
})
Context("incorrect usage", func() {
It("should fail to write a YAML when in place and STDIN are used at the same time", func() {
_, err := dyff("yaml", "--in-place", "-")
Expect(err).To(HaveOccurred())
Expect(err.Error()).To(BeEquivalentTo("incompatible flags: cannot use in-place flag in combination with input from stdin"))
})
})
})
})
Context("json command", func() {
It("should write a JSON file in place using restructure feature", func() {
filename := createTestFile(`{"list":[{"aaa":"bbb","name":"one"}]}`)
defer os.Remove(filename)
out, err := dyff("json", "--restructure", "--in-place", filename)
Expect(err).ToNot(HaveOccurred())
Expect(out).To(BeEmpty())
data, err := ioutil.ReadFile(filename)
Expect(err).To(BeNil())
Expect(string(data)).To(BeEquivalentTo(`{"list": [{"name": "one", "aaa": "bbb"}]}
`))
})
It("should write a plain JSON file to STDOUT using restructure feature", func() {
filename := createTestFile(`{"list":[{"aaa":"bbb","name":"one"}]}`)
defer os.Remove(filename)
out, err := dyff("json", "--restructure", "--plain", filename)
Expect(err).ToNot(HaveOccurred())
Expect(out).To(BeEquivalentTo(`{"list": [{"name": "one", "aaa": "bbb"}]}
`))
})
It("should write a JSON file to STDOUT using restructure feature", func() {
filename := createTestFile(`{"list":[{"aaa":"bbb","name":"one"}]}`)
defer os.Remove(filename)
out, err := dyff("json", "--restructure", filename)
Expect(err).ToNot(HaveOccurred())
Expect(out).To(BeEquivalentTo(`{
"list": [
{
"name": "one",
"aaa": "bbb"
}
]
}
`))
})
It("should fail to write a JSON when in place and STDIN are used at the same time", func() {
_, err := dyff("json", "--in-place", "-")
Expect(err).To(HaveOccurred())
Expect(err.Error()).To(BeEquivalentTo("incompatible flags: cannot use in-place flag in combination with input from STDIN"))
})
It("should write timestamps with proper quotes in plain mode", func() {
out, err := dyff("json", "--plain", assets("issues", "issue-120", "buildpack.toml"))
Expect(err).ToNot(HaveOccurred())
Expect(out).To(Equal(`{"metadata": {"dependencies": [{"deprecation_date": "2021-08-21T00:00:00Z"}], "dependency_deprecation_dates": [{"date": "2021-08-21T13:37:00Z"}]}}
`))
})
It("should write timestamps with proper quotes in default mode", func() {
out, err := dyff("json", assets("issues", "issue-120", "buildpack.toml"))
Expect(err).ToNot(HaveOccurred())
Expect(out).To(Equal(`{
"metadata": {
"dependencies": [
{
"deprecation_date": "2021-08-21T00:00:00Z"
}
],
"dependency_deprecation_dates": [
{
"date": "2021-08-21T13:37:00Z"
}
]
}
}
`))
})
})
Context("between command", func() {
It("should create the default report when there are no flags specified", func() {
from := createTestFile(`{"list":[{"aaa":"bbb","name":"one"}]}`)
defer os.Remove(from)
to := createTestFile(`{"list":[{"aaa":"bbb","name":"two"}]}`)
defer os.Remove(to)
out, err := dyff("between", from, to)
Expect(err).ToNot(HaveOccurred())
Expect(out).To(BeEquivalentTo(fmt.Sprintf(` _ __ __
_| |_ _ / _|/ _| between %s
/ _' | | | | |_| |_ and %s
| (_| | |_| | _| _|
\__,_|\__, |_| |_| returned one difference
|___/
list
- one list entry removed: + one list entry added:
- name: one - name: two
aaa: bbb aaa: bbb
`, from, to)))
})
It("should create the same default report when swap flag is used", func() {
from := createTestFile(`{"list":[{"aaa":"bbb","name":"one"}]}`)
defer os.Remove(from)
to := createTestFile(`{"list":[{"aaa":"bbb","name":"two"}]}`)
defer os.Remove(to)
out, err := dyff("between", "--swap", to, from)
Expect(err).ToNot(HaveOccurred())
Expect(out).To(BeEquivalentTo(fmt.Sprintf(` _ __ __
_| |_ _ / _|/ _| between %s
/ _' | | | | |_| |_ and %s
| (_| | |_| | _| _|
\__,_|\__, |_| |_| returned one difference
|___/
list
- one list entry removed: + one list entry added:
- name: one - name: two
aaa: bbb aaa: bbb
`, from, to)))
})
It("should create the oneline report", func() {
from := createTestFile(`{"list":[{"aaa":"bbb","name":"one"}]}`)
defer os.Remove(from)
to := createTestFile(`{"list":[{"aaa":"bbb","name":"two"}]}`)
defer os.Remove(to)
out, err := dyff("between", "--output=brief", from, to)
Expect(err).ToNot(HaveOccurred())
Expect(out).To(BeEquivalentTo(fmt.Sprintf("one change detected between %s and %s\n\n", from, to)))
})
It("should create a report using a custom root in the files", func() {
from, to := assets("examples", "from.yml"), assets("examples", "to.yml")
expected := fmt.Sprintf(` _ __ __
_| |_ _ / _|/ _| between %s, YAML root was changed to yaml.map
/ _' | | | | |_| |_ and %s, YAML root was changed to yaml.map
| (_| | |_| | _| _|
\__,_|\__, |_| |_| returned four differences
|___/
(root level)
- six map entries removed: + six map entries added:
stringB: fOObAr stringY: YAML!
intB: 10 intY: 147
floatB: 2.71 floatY: 24.0
boolB: false boolY: true
mapB: mapY:
key0: B key0: Y
key1: B key1: Y
listB: listY:
- B - Yo
- B - Yo
- B - Yo
type-change-1
± type change from string to int
- string
+ 147
type-change-2
± type change from string to int
- 12
+ 12
whitespaces
± whitespace only change
- Strings·can··have·whitespaces. + Strings·can··have·whitespaces.↵
↵
↵
`, from, to)
out, err := dyff("between", from, to, "--chroot", "yaml.map")
Expect(err).ToNot(HaveOccurred())
Expect(out).To(BeEquivalentTo(expected))
})
It("should fail when change root is used with files containing multiple documents", func() {
from, to := assets("testbed", "from.yml"), assets("testbed", "to.yml")
_, err := dyff("between", from, to, "--chroot", "orderchanges")
Expect(err).To(HaveOccurred())
Expect(err.Error()).To(BeEquivalentTo(fmt.Sprintf("failed to change root of %s to path orderchanges: change root for an input file is only possible if there is only one document, but %s contains two documents", from, from)))
})
It("should fail when change root is used with files that do not have the specified path", func() {
from, to := assets("examples", "from.yml"), assets("binary", "to.yml")
_, err := dyff("between", from, to, "--chroot", "yaml.map")
Expect(err).To(HaveOccurred())
Expect(err.Error()).To(BeEquivalentTo(fmt.Sprintf("failed to change root of %s to path yaml.map: no key 'map' found in map, available keys: data", to)))
})
It("should return an exit code with the number of differences if respective flag is used", func() {
from := createTestFile(`{"list":[{"aaa":"bbb","name":"one"}]}`)
defer os.Remove(from)
to := createTestFile(`{"list":[{"aaa":"bbb","name":"two"}]}`)
defer os.Remove(to)
out, err := dyff("between", "--output=brief", "--set-exit-code", from, to)
Expect(err).To(HaveOccurred())
Expect(out).To(BeEquivalentTo(fmt.Sprintf("one change detected between %s and %s\n\n", from, to)))
})
It("should fail when input files cannot be read", func() {
_, err := dyff("between", "/does/not/exist/from.yml", "/does/not/exist/to.yml")
Expect(err).To(HaveOccurred())
Expect(err.Error()).To(ContainSubstring("failed to load input files: unable to load data from /does/not/exist/from.yml"))
})
It("should fail when an unsupported output style is defined", func() {
_, err := dyff("between", "--output", "unknown", "/dev/null", "/dev/null")
Expect(err).To(HaveOccurred())
Expect(err.Error()).To(ContainSubstring("unknown output style unknown"))
})
It("should omit the dyff banner header if respective flag is set", func() {
from := createTestFile(`{"list":[{"aaa":"bbb","name":"one"}]}`)
defer os.Remove(from)
to := createTestFile(`{"list":[{"aaa":"bbb","name":"two"}]}`)
defer os.Remove(to)
out, err := dyff("between", "--omit-header", from, to)
Expect(err).ToNot(HaveOccurred())
Expect(out).To(BeEquivalentTo(`
list
- one list entry removed: + one list entry added:
- name: one - name: two
aaa: bbb aaa: bbb
`))
})
It("should ignore order changes if respective flag is set", func() {
from := createTestFile(`{"list":[{"name":"one"},{"name":"two"},{"name":"three"}]}`)
defer os.Remove(from)
to := createTestFile(`{"list":[{"name":"one"},{"name":"three"},{"name":"two"}]}`)
defer os.Remove(to)
out, err := dyff("between", "--omit-header", "--ignore-order-changes", from, to)
Expect(err).ToNot(HaveOccurred())
Expect(out).To(BeEquivalentTo("\n"))
})
It("should not panic when timestamps need to reported", func() {
out, err := dyff("between", "--omit-header", "../../assets/issues/issue-111/from.yml", "../../assets/issues/issue-111/to.yml")
Expect(err).ToNot(HaveOccurred())
Expect(out).To(BeEquivalentTo(`
AWSTemplateFormatVersion
± type change from timestamp to string
- 2010-09-09
+ 2010-09-09
`))
})
It("should not try to evaluate variables in the user-provided strings", func() {
out, err := dyff("between", "--omit-header", assets("issues", "issue-132", "from.yml"), assets("issues", "issue-132", "to.yml"))
Expect(err).ToNot(HaveOccurred())
Expect(out).To(BeEquivalentTo(`
example_one
± value change
- %{one}
+ one
example_two
± value change
- two
+ %{two}
`))
})
It("should be understandingly when the arguments are in an incorrect order when executed by kubectl diff", func() {
from := createTestDirectory()
defer os.RemoveAll(from)
to := createTestDirectory()
defer os.RemoveAll(to)
createTestFileInDir(from, `{"list":[{"name":"one", "version":"v1"}]}`)
createTestFileInDir(to, `{"list":[{"name":"two", "version":"v2"}]}`)
_, err := dyff(from, to, "between", "--omit-header")
Expect(err).To(HaveOccurred())
// Usually, the environment variable would be like `dyff between`,
// but the binary name during testing is `cmd.test` and therefore
// the variable needs to adjusted for the internal program logic to
// correctly accept this as the kubectl context it looks for.
var tmp = os.Getenv("KUBECTL_EXTERNAL_DIFF")
os.Setenv("KUBECTL_EXTERNAL_DIFF", "cmd.test between --omit-header")
defer os.Setenv("KUBECTL_EXTERNAL_DIFF", tmp)
_, err = dyff(from, to, "between", "--omit-header")
Expect(err).ToNot(HaveOccurred())
})
It("should create exit code zero if there are no changes", func() {
from := createTestFile(`{"foo": "bar"}`)
defer os.Remove(from)
to := createTestFile(`{"foo": "bar"}`)
defer os.Remove(to)
_, err := dyff("between", "--set-exit-code", from, to)
Expect(err).To(HaveOccurred())
exitCode, ok := err.(ExitCode)
Expect(ok).To(BeTrue())
Expect(exitCode.Value).To(Equal(0))
})
It("should create exit code one if there are changes", func() {
from := createTestFile(`{"foo": "bar"}`)
defer os.Remove(from)
to := createTestFile(`{"foo": "BAR"}`)
defer os.Remove(to)
_, err := dyff("between", "--set-exit-code", from, to)
Expect(err).To(HaveOccurred())
exitCode, ok := err.(ExitCode)
Expect(ok).To(BeTrue())
Expect(exitCode.Value).To(Equal(1))
})
It("should fail with an exit code other than zero or one in case of an error", func() {
_, err := dyff("between", "--set-exit-code", "from", "to")
Expect(err).To(HaveOccurred())
exitCode, ok := err.(ExitCode)
Expect(ok).To(BeTrue())
Expect(exitCode.Value).To(Equal(255))
})
It("should accept a list of paths and filter the report based on these", func() {
expected := `
yaml.map.type-change-1
± type change from string to int
- string
+ 147
yaml.map.whitespaces
± whitespace only change
- Strings·can··have·whitespaces. + Strings·can··have·whitespaces.↵
↵
↵
`
By("using GoPatch style path", func() {
out, err := dyff("between", "--omit-header", "--filter", "/yaml/map/whitespaces,/yaml/map/type-change-1", assets("examples", "from.yml"), assets("examples", "to.yml"))
Expect(err).ToNot(HaveOccurred())
Expect(out).To(BeEquivalentTo(expected))
})
By("using DotStyle paths", func() {
out, err := dyff("between", "--omit-header", "--filter", "yaml.map.whitespaces,yaml.map.type-change-1", assets("examples", "from.yml"), assets("examples", "to.yml"))
Expect(err).ToNot(HaveOccurred())
Expect(out).To(BeEquivalentTo(expected))
})
})
})
Context("last-applied command", func() {
It("should create the default report when there are no flags specified", func() {
kubeYAML := createTestFile(`---
metadata:
annotations:
kubectl.kubernetes.io/last-applied-configuration: |
{ "metadata": { "annotations": {} }, "yaml": { "foo": "bat" } }
yaml:
foo: bar
`)
defer os.Remove(kubeYAML)
out, err := dyff("last-applied", "--omit-header", kubeYAML)
Expect(err).ToNot(HaveOccurred())
Expect(out).To(BeEquivalentTo(`
yaml.foo
± value change
- bat
+ bar
`))
})
It("should fail on an input file with multiple documents", func() {
kubeYAML := createTestFile(`---
foo: bar
--
foo: bar
`)
defer os.Remove(kubeYAML)
_, err := dyff("last-applied", kubeYAML)
Expect(err).To(HaveOccurred())
})
It("should fail on an input file when the last applied configuration is not set", func() {
kubeYAML := createTestFile(`foo: bar`)
defer os.Remove(kubeYAML)
_, err := dyff("last-applied", kubeYAML)
Expect(err).To(HaveOccurred())
})
})
})
|
[
"\"KUBECTL_EXTERNAL_DIFF\""
] |
[] |
[
"KUBECTL_EXTERNAL_DIFF"
] |
[]
|
["KUBECTL_EXTERNAL_DIFF"]
|
go
| 1 | 0 | |
gym_jsbsim/__init__.py
|
import os
try:
from .version import __version__, __jsbsim_version__ # noqa: F401
except ImportError:
pass
from jsbsim import __version__ as jsbsim_version
from gym.envs.registration import registry, register, make, spec
from gym_jsbsim.envs import TASKS
from gym_jsbsim.catalogs import Catalog
"""
This script registers JSBSimEnv
with OpenAI Gym so that they can be instantiated with a gym.make(id)
command.
To use do:
env = gym.make('GymJsbsim-{task}-v0')
"""
if __jsbsim_version__ != jsbsim_version:
print("Warning: You are using jsbsim-{} while gym-jsbsin was generated with {}".format(jsbsim_version, __jsbsim_version__))
if "JSBSIM_ROOT_DIR" not in os.environ:
os.environ["JSBSIM_ROOT_DIR"] = os.path.join(os.path.dirname(__file__), "jsbsim-" + __jsbsim_version__)
for task_name in TASKS:
register(
id=f"GymJsbsim-{task_name}-v0",
entry_point="gym_jsbsim.jsbsim_env:JSBSimEnv",
kwargs=dict(task=TASKS[task_name]),
)
|
[] |
[] |
[
"JSBSIM_ROOT_DIR"
] |
[]
|
["JSBSIM_ROOT_DIR"]
|
python
| 1 | 0 | |
python/pyspark/sql/context.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
import sys
import warnings
if sys.version >= '3':
basestring = unicode = str
from pyspark import since
from pyspark.rdd import ignore_unicode_prefix
from pyspark.sql.session import _monkey_patch_RDD, SparkSession
from pyspark.sql.dataframe import DataFrame
from pyspark.sql.readwriter import DataFrameReader
from pyspark.sql.streaming import DataStreamReader
from pyspark.sql.types import Row, StringType
from pyspark.sql.utils import install_exception_handler
__all__ = ["SQLContext", "HiveContext", "UDFRegistration"]
class SQLContext(object):
"""The entry point for working with structured data (rows and columns) in Spark, in Spark 1.x.
As of Spark 2.0, this is replaced by :class:`SparkSession`. However, we are keeping the class
here for backward compatibility.
A SQLContext can be used create :class:`DataFrame`, register :class:`DataFrame` as
tables, execute SQL over tables, cache tables, and read parquet files.
:param sparkContext: The :class:`SparkContext` backing this SQLContext.
:param sparkSession: The :class:`SparkSession` around which this SQLContext wraps.
:param jsqlContext: An optional JVM Scala SQLContext. If set, we do not instantiate a new
SQLContext in the JVM, instead we make all calls to this object.
"""
_instantiatedContext = None
@ignore_unicode_prefix
def __init__(self, sparkContext, sparkSession=None, jsqlContext=None):
"""Creates a new SQLContext.
>>> from datetime import datetime
>>> sqlContext = SQLContext(sc)
>>> allTypes = sc.parallelize([Row(i=1, s="string", d=1.0, l=1,
... b=True, list=[1, 2, 3], dict={"s": 0}, row=Row(a=1),
... time=datetime(2014, 8, 1, 14, 1, 5))])
>>> df = allTypes.toDF()
>>> df.createOrReplaceTempView("allTypes")
>>> sqlContext.sql('select i+1, d+1, not b, list[1], dict["s"], time, row.a '
... 'from allTypes where b and i > 0').collect()
[Row((i + CAST(1 AS BIGINT))=2, (d + CAST(1 AS DOUBLE))=2.0, (NOT b)=False, list[1]=2, \
dict[s]=0, time=datetime.datetime(2014, 8, 1, 14, 1, 5), a=1)]
>>> df.rdd.map(lambda x: (x.i, x.s, x.d, x.l, x.b, x.time, x.row.a, x.list)).collect()
[(1, u'string', 1.0, 1, True, datetime.datetime(2014, 8, 1, 14, 1, 5), 1, [1, 2, 3])]
"""
self._sc = sparkContext
self._jsc = self._sc._jsc
self._jvm = self._sc._jvm
if sparkSession is None:
sparkSession = SparkSession(sparkContext)
if jsqlContext is None:
jsqlContext = sparkSession._jwrapped
self.sparkSession = sparkSession
self._jsqlContext = jsqlContext
_monkey_patch_RDD(self.sparkSession)
install_exception_handler()
if SQLContext._instantiatedContext is None:
SQLContext._instantiatedContext = self
@property
def _ssql_ctx(self):
"""Accessor for the JVM Spark SQL context.
Subclasses can override this property to provide their own
JVM Contexts.
"""
return self._jsqlContext
@classmethod
@since(1.6)
def getOrCreate(cls, sc):
"""
Get the existing SQLContext or create a new one with given SparkContext.
:param sc: SparkContext
"""
if cls._instantiatedContext is None:
jsqlContext = sc._jvm.SQLContext.getOrCreate(sc._jsc.sc())
sparkSession = SparkSession(sc, jsqlContext.sparkSession())
cls(sc, sparkSession, jsqlContext)
return cls._instantiatedContext
@since(1.6)
def newSession(self):
"""
Returns a new SQLContext as new session, that has separate SQLConf,
registered temporary views and UDFs, but shared SparkContext and
table cache.
"""
return self.__class__(self._sc, self.sparkSession.newSession())
@since(1.3)
def setConf(self, key, value):
"""Sets the given Spark SQL configuration property.
"""
self.sparkSession.conf.set(key, value)
@ignore_unicode_prefix
@since(1.3)
def getConf(self, key, defaultValue=None):
"""Returns the value of Spark SQL configuration property for the given key.
If the key is not set and defaultValue is not None, return
defaultValue. If the key is not set and defaultValue is None, return
the system default value.
>>> sqlContext.getConf("spark.sql.shuffle.partitions")
u'200'
>>> sqlContext.getConf("spark.sql.shuffle.partitions", u"10")
u'10'
>>> sqlContext.setConf("spark.sql.shuffle.partitions", u"50")
>>> sqlContext.getConf("spark.sql.shuffle.partitions", u"10")
u'50'
"""
return self.sparkSession.conf.get(key, defaultValue)
@property
@since("1.3.1")
def udf(self):
"""Returns a :class:`UDFRegistration` for UDF registration.
:return: :class:`UDFRegistration`
"""
return UDFRegistration(self)
@since(1.4)
def range(self, start, end=None, step=1, numPartitions=None):
"""
Create a :class:`DataFrame` with single :class:`pyspark.sql.types.LongType` column named
``id``, containing elements in a range from ``start`` to ``end`` (exclusive) with
step value ``step``.
:param start: the start value
:param end: the end value (exclusive)
:param step: the incremental step (default: 1)
:param numPartitions: the number of partitions of the DataFrame
:return: :class:`DataFrame`
>>> sqlContext.range(1, 7, 2).collect()
[Row(id=1), Row(id=3), Row(id=5)]
If only one argument is specified, it will be used as the end value.
>>> sqlContext.range(3).collect()
[Row(id=0), Row(id=1), Row(id=2)]
"""
return self.sparkSession.range(start, end, step, numPartitions)
@ignore_unicode_prefix
@since(1.2)
def registerFunction(self, name, f, returnType=StringType()):
"""Registers a python function (including lambda function) as a UDF
so it can be used in SQL statements.
In addition to a name and the function itself, the return type can be optionally specified.
When the return type is not given it default to a string and conversion will automatically
be done. For any other return type, the produced object must match the specified type.
:param name: name of the UDF
:param f: python function
:param returnType: a :class:`pyspark.sql.types.DataType` object
>>> sqlContext.registerFunction("stringLengthString", lambda x: len(x))
>>> sqlContext.sql("SELECT stringLengthString('test')").collect()
[Row(stringLengthString(test)=u'4')]
>>> from pyspark.sql.types import IntegerType
>>> sqlContext.registerFunction("stringLengthInt", lambda x: len(x), IntegerType())
>>> sqlContext.sql("SELECT stringLengthInt('test')").collect()
[Row(stringLengthInt(test)=4)]
>>> from pyspark.sql.types import IntegerType
>>> sqlContext.udf.register("stringLengthInt", lambda x: len(x), IntegerType())
>>> sqlContext.sql("SELECT stringLengthInt('test')").collect()
[Row(stringLengthInt(test)=4)]
"""
self.sparkSession.catalog.registerFunction(name, f, returnType)
# TODO(andrew): delete this once we refactor things to take in SparkSession
def _inferSchema(self, rdd, samplingRatio=None):
"""
Infer schema from an RDD of Row or tuple.
:param rdd: an RDD of Row or tuple
:param samplingRatio: sampling ratio, or no sampling (default)
:return: :class:`pyspark.sql.types.StructType`
"""
return self.sparkSession._inferSchema(rdd, samplingRatio)
@since(1.3)
@ignore_unicode_prefix
def createDataFrame(self, data, schema=None, samplingRatio=None):
"""
Creates a :class:`DataFrame` from an :class:`RDD`, a list or a :class:`pandas.DataFrame`.
When ``schema`` is a list of column names, the type of each column
will be inferred from ``data``.
When ``schema`` is ``None``, it will try to infer the schema (column names and types)
from ``data``, which should be an RDD of :class:`Row`,
or :class:`namedtuple`, or :class:`dict`.
When ``schema`` is :class:`pyspark.sql.types.DataType` or
:class:`pyspark.sql.types.StringType`, it must match the
real data, or an exception will be thrown at runtime. If the given schema is not
:class:`pyspark.sql.types.StructType`, it will be wrapped into a
:class:`pyspark.sql.types.StructType` as its only field, and the field name will be "value",
each record will also be wrapped into a tuple, which can be converted to row later.
If schema inference is needed, ``samplingRatio`` is used to determined the ratio of
rows used for schema inference. The first row will be used if ``samplingRatio`` is ``None``.
:param data: an RDD of any kind of SQL data representation(e.g. :class:`Row`,
:class:`tuple`, ``int``, ``boolean``, etc.), or :class:`list`, or
:class:`pandas.DataFrame`.
:param schema: a :class:`pyspark.sql.types.DataType` or a
:class:`pyspark.sql.types.StringType` or a list of
column names, default is None. The data type string format equals to
:class:`pyspark.sql.types.DataType.simpleString`, except that top level struct type can
omit the ``struct<>`` and atomic types use ``typeName()`` as their format, e.g. use
``byte`` instead of ``tinyint`` for :class:`pyspark.sql.types.ByteType`.
We can also use ``int`` as a short name for :class:`pyspark.sql.types.IntegerType`.
:param samplingRatio: the sample ratio of rows used for inferring
:return: :class:`DataFrame`
.. versionchanged:: 2.0
The ``schema`` parameter can be a :class:`pyspark.sql.types.DataType` or a
:class:`pyspark.sql.types.StringType` after 2.0.
If it's not a :class:`pyspark.sql.types.StructType`, it will be wrapped into a
:class:`pyspark.sql.types.StructType` and each record will also be wrapped into a tuple.
>>> l = [('Alice', 1)]
>>> sqlContext.createDataFrame(l).collect()
[Row(_1=u'Alice', _2=1)]
>>> sqlContext.createDataFrame(l, ['name', 'age']).collect()
[Row(name=u'Alice', age=1)]
>>> d = [{'name': 'Alice', 'age': 1}]
>>> sqlContext.createDataFrame(d).collect()
[Row(age=1, name=u'Alice')]
>>> rdd = sc.parallelize(l)
>>> sqlContext.createDataFrame(rdd).collect()
[Row(_1=u'Alice', _2=1)]
>>> df = sqlContext.createDataFrame(rdd, ['name', 'age'])
>>> df.collect()
[Row(name=u'Alice', age=1)]
>>> from pyspark.sql import Row
>>> Person = Row('name', 'age')
>>> person = rdd.map(lambda r: Person(*r))
>>> df2 = sqlContext.createDataFrame(person)
>>> df2.collect()
[Row(name=u'Alice', age=1)]
>>> from pyspark.sql.types import *
>>> schema = StructType([
... StructField("name", StringType(), True),
... StructField("age", IntegerType(), True)])
>>> df3 = sqlContext.createDataFrame(rdd, schema)
>>> df3.collect()
[Row(name=u'Alice', age=1)]
>>> sqlContext.createDataFrame(df.toPandas()).collect() # doctest: +SKIP
[Row(name=u'Alice', age=1)]
>>> sqlContext.createDataFrame(pandas.DataFrame([[1, 2]])).collect() # doctest: +SKIP
[Row(0=1, 1=2)]
>>> sqlContext.createDataFrame(rdd, "a: string, b: int").collect()
[Row(a=u'Alice', b=1)]
>>> rdd = rdd.map(lambda row: row[1])
>>> sqlContext.createDataFrame(rdd, "int").collect()
[Row(value=1)]
>>> sqlContext.createDataFrame(rdd, "boolean").collect() # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
Py4JJavaError: ...
"""
return self.sparkSession.createDataFrame(data, schema, samplingRatio)
@since(1.3)
def registerDataFrameAsTable(self, df, tableName):
"""Registers the given :class:`DataFrame` as a temporary table in the catalog.
Temporary tables exist only during the lifetime of this instance of :class:`SQLContext`.
>>> sqlContext.registerDataFrameAsTable(df, "table1")
"""
df.createOrReplaceTempView(tableName)
@since(1.6)
def dropTempTable(self, tableName):
""" Remove the temp table from catalog.
>>> sqlContext.registerDataFrameAsTable(df, "table1")
>>> sqlContext.dropTempTable("table1")
"""
self.sparkSession.catalog.dropTempView(tableName)
@since(1.3)
def createExternalTable(self, tableName, path=None, source=None, schema=None, **options):
"""Creates an external table based on the dataset in a data source.
It returns the DataFrame associated with the external table.
The data source is specified by the ``source`` and a set of ``options``.
If ``source`` is not specified, the default data source configured by
``spark.sql.sources.default`` will be used.
Optionally, a schema can be provided as the schema of the returned :class:`DataFrame` and
created external table.
:return: :class:`DataFrame`
"""
return self.sparkSession.catalog.createExternalTable(
tableName, path, source, schema, **options)
@ignore_unicode_prefix
@since(1.0)
def sql(self, sqlQuery):
"""Returns a :class:`DataFrame` representing the result of the given query.
:return: :class:`DataFrame`
>>> sqlContext.registerDataFrameAsTable(df, "table1")
>>> df2 = sqlContext.sql("SELECT field1 AS f1, field2 as f2 from table1")
>>> df2.collect()
[Row(f1=1, f2=u'row1'), Row(f1=2, f2=u'row2'), Row(f1=3, f2=u'row3')]
"""
return self.sparkSession.sql(sqlQuery)
@since(1.0)
def table(self, tableName):
"""Returns the specified table as a :class:`DataFrame`.
:return: :class:`DataFrame`
>>> sqlContext.registerDataFrameAsTable(df, "table1")
>>> df2 = sqlContext.table("table1")
>>> sorted(df.collect()) == sorted(df2.collect())
True
"""
return self.sparkSession.table(tableName)
@ignore_unicode_prefix
@since(1.3)
def tables(self, dbName=None):
"""Returns a :class:`DataFrame` containing names of tables in the given database.
If ``dbName`` is not specified, the current database will be used.
The returned DataFrame has two columns: ``tableName`` and ``isTemporary``
(a column with :class:`BooleanType` indicating if a table is a temporary one or not).
:param dbName: string, name of the database to use.
:return: :class:`DataFrame`
>>> sqlContext.registerDataFrameAsTable(df, "table1")
>>> df2 = sqlContext.tables()
>>> df2.filter("tableName = 'table1'").first()
Row(tableName=u'table1', isTemporary=True)
"""
if dbName is None:
return DataFrame(self._ssql_ctx.tables(), self)
else:
return DataFrame(self._ssql_ctx.tables(dbName), self)
@since(1.3)
def tableNames(self, dbName=None):
"""Returns a list of names of tables in the database ``dbName``.
:param dbName: string, name of the database to use. Default to the current database.
:return: list of table names, in string
>>> sqlContext.registerDataFrameAsTable(df, "table1")
>>> "table1" in sqlContext.tableNames()
True
>>> "table1" in sqlContext.tableNames("default")
True
"""
if dbName is None:
return [name for name in self._ssql_ctx.tableNames()]
else:
return [name for name in self._ssql_ctx.tableNames(dbName)]
@since(1.0)
def cacheTable(self, tableName):
"""Caches the specified table in-memory."""
self._ssql_ctx.cacheTable(tableName)
@since(1.0)
def uncacheTable(self, tableName):
"""Removes the specified table from the in-memory cache."""
self._ssql_ctx.uncacheTable(tableName)
@since(1.3)
def clearCache(self):
"""Removes all cached tables from the in-memory cache. """
self._ssql_ctx.clearCache()
@property
@since(1.4)
def read(self):
"""
Returns a :class:`DataFrameReader` that can be used to read data
in as a :class:`DataFrame`.
:return: :class:`DataFrameReader`
"""
return DataFrameReader(self)
@property
@since(2.0)
def readStream(self):
"""
Returns a :class:`DataStreamReader` that can be used to read data streams
as a streaming :class:`DataFrame`.
.. note:: Experimental.
:return: :class:`DataStreamReader`
>>> text_sdf = sqlContext.readStream.text(tempfile.mkdtemp())
>>> text_sdf.isStreaming
True
"""
return DataStreamReader(self)
@property
@since(2.0)
def streams(self):
"""Returns a :class:`StreamingQueryManager` that allows managing all the
:class:`StreamingQuery` StreamingQueries active on `this` context.
.. note:: Experimental.
"""
from pyspark.sql.streaming import StreamingQueryManager
return StreamingQueryManager(self._ssql_ctx.streams())
class HiveContext(SQLContext):
"""A variant of Spark SQL that integrates with data stored in Hive.
Configuration for Hive is read from ``hive-site.xml`` on the classpath.
It supports running both SQL and HiveQL commands.
:param sparkContext: The SparkContext to wrap.
:param jhiveContext: An optional JVM Scala HiveContext. If set, we do not instantiate a new
:class:`HiveContext` in the JVM, instead we make all calls to this object.
.. note:: Deprecated in 2.0.0. Use SparkSession.builder.enableHiveSupport().getOrCreate().
"""
def __init__(self, sparkContext, jhiveContext=None):
warnings.warn(
"HiveContext is deprecated in Spark 2.0.0. Please use " +
"SparkSession.builder.enableHiveSupport().getOrCreate() instead.",
DeprecationWarning)
if jhiveContext is None:
sparkSession = SparkSession.builder.enableHiveSupport().getOrCreate()
else:
sparkSession = SparkSession(sparkContext, jhiveContext.sparkSession())
SQLContext.__init__(self, sparkContext, sparkSession, jhiveContext)
@classmethod
def _createForTesting(cls, sparkContext):
"""(Internal use only) Create a new HiveContext for testing.
All test code that touches HiveContext *must* go through this method. Otherwise,
you may end up launching multiple derby instances and encounter with incredibly
confusing error messages.
"""
jsc = sparkContext._jsc.sc()
jtestHive = sparkContext._jvm.org.apache.spark.sql.hive.test.TestHiveContext(jsc, False)
return cls(sparkContext, jtestHive)
def refreshTable(self, tableName):
"""Invalidate and refresh all the cached the metadata of the given
table. For performance reasons, Spark SQL or the external data source
library it uses might cache certain metadata about a table, such as the
location of blocks. When those change outside of Spark SQL, users should
call this function to invalidate the cache.
"""
self._ssql_ctx.refreshTable(tableName)
class UDFRegistration(object):
"""Wrapper for user-defined function registration."""
def __init__(self, sqlContext):
self.sqlContext = sqlContext
def register(self, name, f, returnType=StringType()):
return self.sqlContext.registerFunction(name, f, returnType)
register.__doc__ = SQLContext.registerFunction.__doc__
def _test():
import os
import doctest
import tempfile
from pyspark.context import SparkContext
from pyspark.sql import Row, SQLContext
import pyspark.sql.context
os.chdir(os.environ["SPARK_HOME"])
globs = pyspark.sql.context.__dict__.copy()
sc = SparkContext('local[4]', 'PythonTest')
globs['tempfile'] = tempfile
globs['os'] = os
globs['sc'] = sc
globs['sqlContext'] = SQLContext(sc)
globs['rdd'] = rdd = sc.parallelize(
[Row(field1=1, field2="row1"),
Row(field1=2, field2="row2"),
Row(field1=3, field2="row3")]
)
globs['df'] = rdd.toDF()
jsonStrings = [
'{"field1": 1, "field2": "row1", "field3":{"field4":11}}',
'{"field1" : 2, "field3":{"field4":22, "field5": [10, 11]},'
'"field6":[{"field7": "row2"}]}',
'{"field1" : null, "field2": "row3", '
'"field3":{"field4":33, "field5": []}}'
]
globs['jsonStrings'] = jsonStrings
globs['json'] = sc.parallelize(jsonStrings)
(failure_count, test_count) = doctest.testmod(
pyspark.sql.context, globs=globs,
optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE)
globs['sc'].stop()
if failure_count:
exit(-1)
if __name__ == "__main__":
_test()
|
[] |
[] |
[
"SPARK_HOME"
] |
[]
|
["SPARK_HOME"]
|
python
| 1 | 0 | |
tests/lxc_test.py
|
import os
import mitogen
import mitogen.lxc
try:
any
except NameError:
from mitogen.core import any
import unittest2
import testlib
def has_subseq(seq, subseq):
return any(seq[x:x+len(subseq)] == subseq for x in range(0, len(seq)))
class ConstructorTest(testlib.RouterMixin, testlib.TestCase):
lxc_attach_path = testlib.data_path('stubs/stub-lxc-attach.py')
def test_okay(self):
context = self.router.lxc(
container='container_name',
lxc_attach_path=self.lxc_attach_path,
)
argv = eval(context.call(os.getenv, 'ORIGINAL_ARGV'))
self.assertEquals(argv[0], self.lxc_attach_path)
self.assertTrue('--clear-env' in argv)
self.assertTrue(has_subseq(argv, ['--name', 'container_name']))
def test_eof(self):
e = self.assertRaises(mitogen.parent.EofError,
lambda: self.router.lxc(
container='container_name',
lxc_attach_path='true',
)
)
self.assertTrue(str(e).endswith(mitogen.lxc.Connection.eof_error_hint))
if __name__ == '__main__':
unittest2.main()
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
examples/scoop/example_scoop_gamma.py
|
import os
import socket
from scoop import futures
from pyroSAR.S1 import OSV
from pyroSAR.gamma import geocode
from pyroSAR.spatial import vector
from pyroSAR import Archive
from pyroSAR.ancillary import finder, multicore
"""
This script is an example usage for processing Sentinel-1 scenes with GAMMA
Run this script by calling the 'start_gamma.sh' scipt.
The following tasks are performed:
- a directory is scanned for valid Sentinel-1 scenes
- the found scenes are ingested into a spatialite database
- orbit state vector (OSV) files are downloaded to a user-defined directory (these are needed for precise orbit information)
- currently this is implemented to update a fixed directory in which all OSV files are stored
- an empty directory will first be filled with all available OSV files on the server
- a cluster job is setup using package 'scoop', which assigns a list of testsites to different cluster nodes
- for each site:
- query the SAR scenes, which overlap with your testsite and match certain criteria (e.g. sensor, acquisition mode etc.)
- filter the selected scenes by those that have already been processed and saved to the defined output directory
- do parallelized processing using package 'pathos'
"""
# the sites to be processed
# this is just an exemplary use case assuming a shapefile with different geometries for the test sites
sites = ['Egypt_Burullus', 'France_Camargue', 'Kenya_Lorian_Olbolossat', 'Sweden_Skogaryd', 'Sweden_Store-Mosse']
# the pyroSAR database file
dbfile = '/.../scenelist.db'
# the main directory for storing the processed results
maindir = '/.../swos_process'
# the directories for Sentinel-1 POE and RES orbit state vector files
# this is intended to be a fixed directory structure similar to that of ESA SNAP
# in the future all auxiliary data files will be stored in a structure defined by pyroSAR
# This is currently only used for Sentinel-1; within the processor two subdirectories will be created named
# POEORB and RESORB which will contain the respective orbit files
osvdir = '/.../.gamma/auxdata/Orbits/Sentinel-1'
def worker(sitename):
#######################################################################################
# setup general processing parameters
resolution = 20
# number of processes for Python pathos framework (multiple scenes in parallel)
parallel1 = 6
# number of parallel OpenMP threads; this is used by GAMMA internally
parallel2 = 6
os.environ['OMP_NUM_THREADS'] = str(parallel2)
#######################################################################################
# get the maximum date of the precise orbit files
# as type also 'RES' can be selected. These files are not as precise as POE and thus geocoding might not be
# quite as accurate
with OSV(osvdir) as osv:
maxdate = osv.maxdate(osvtype='POE', datetype='stop')
#######################################################################################
# define the directories for writing temporary and final results
sitedir = os.path.join(maindir, sitename)
tempdir = os.path.join(sitedir, 'proc_in')
outdir = os.path.join(sitedir, 'proc_out')
#######################################################################################
# load the test site geometry into a vector object
sites = vector.Vector('/.../testsites.shp')
# query the test site by name; a column name 'Site_Name' must be saved in your shapefile
site = sites['Site_Name={}'.format(sitename)]
#######################################################################################
# query the database for scenes to be processed
with Archive(dbfile) as archive:
selection_proc = archive.select(vectorobject=site,
processdir=outdir,
maxdate=maxdate,
sensor=('S1A', 'S1B'),
product='GRD',
acquisition_mode='IW',
vv=1)
print('{0}: {1} scenes found for site {2}'.format(socket.gethostname(), len(selection_proc), sitename))
#######################################################################################
# define the DEM file
demfile = '{0}/{1}/DEM/{1}_srtm_utm'.format(maindir, sitename)
if not os.path.isfile(demfile):
print('DEM missing for site {}'.format(sitename))
return
#######################################################################################
# call to processing utility
if len(selection_proc) > 1:
print('start processing')
if len(selection_proc) > 1:
if len(selection_proc) < parallel1:
parallel1 = len(selection_proc)
# run the function on multiple cores in parallel
multicore(geocode, cores=parallel1, multiargs={'scene': selection_proc}, dem=demfile,
tempdir=tempdir, outdir=outdir,
targetres=resolution, scaling='db',
func_geoback=2, func_interp=0, sarsimulation=False, osvdir=osvdir, cleanup=True, allow_RES_OSV=False)
elif len(selection_proc) == 1:
scene = selection_proc[0]
# run the function on a single core
geocode(scene, dem=demfile,
tempdir=tempdir, outdir=outdir,
targetres=resolution, scaling='db',
func_geoback=2, func_interp=0, sarSimCC=False, osvdir=osvdir, cleanup=True, allow_RES_OSV=False)
return len(selection_proc)
if __name__ == '__main__':
#######################################################################################
# update Sentinel-1 GRD scene archive database
# define a directory containing zipped scene archives and list all files starting with 'S1A' or 'S1B'
archive_s1 = '/.../sentinel1/GRD'
scenes_s1 = finder(archive_s1, ['^S1[AB]'], regex=True, recursive=False)
with Archive(dbfile) as archive:
archive.insert(scenes_s1)
#######################################################################################
# download the latest orbit state vector files
with OSV(osvdir) as osv:
osv.update()
#######################################################################################
# start the processing
results = list(futures.map(worker, sites))
|
[] |
[] |
[
"OMP_NUM_THREADS"
] |
[]
|
["OMP_NUM_THREADS"]
|
python
| 1 | 0 | |
main.py
|
from common_files.utils import MetricLogger, colorize, load_hyperparams
hyperparams = load_hyperparams()
AGENT = hyperparams['algo']['agent']
if 'cnn' in AGENT:
# For headless rendering
import os
os.environ['PYOPENGL_PLATFORM'] = 'egl'
import gym
if AGENT == 'td3':
from td3.agent import Agent
elif AGENT == 'sac':
from sac.agent import Agent
elif AGENT == 'sac-cnn':
from sac.agent import VisualAgent
# Use only environments with continuous action spaces before changes
env_name = hyperparams['env_name']
env = gym.make(env_name)
obs_init = env.reset()
num_epochs = hyperparams['num_epochs']
logger = MetricLogger(env_name, AGENT)
if 'cnn' in AGENT:
# obs_init_img = get_resized_img_from_env(env)
agent = VisualAgent(obs_size=(3,64,64), act_len=env.action_space.sample().shape[0], env_fn=lambda: gym.make(env_name),max_env_steps=1000, logger=logger)
else:
agent = Agent(obs_len=obs_init.shape[0], act_len=env.action_space.sample().shape[0], env_fn=lambda: gym.make(env_name),max_env_steps=1000, logger=logger)
print(colorize(f'Training {AGENT} agent on {env_name} Environment...', color='yellow'))
for epoch in range(num_epochs):
print(f'Epoch {epoch+1}/{num_epochs}:')
print('='*50)
agent.learn()
print('='*50)
# Save Q model
agent.save_Q_model()
env.close()
logger.save_logs()
logger.plot_logs()
# Things changed:
# 1. explicitly freezing target params
# 2. torch no grad in test runs
# 3. Change way of writing polyak avg
# 4. Clipping of noisy action
# 5. Changed implementation of Noise in actions
# TODO:
# 1. count number of variables in the network
# 2. improve logging
|
[] |
[] |
[
"PYOPENGL_PLATFORM"
] |
[]
|
["PYOPENGL_PLATFORM"]
|
python
| 1 | 0 | |
tests/compile/test_backend.py
|
import pytest
from myia.compile.backends import (
LoadingError,
UnknownBackend,
load_backend,
parse_default,
prim_groups as G,
)
from myia.operations import (
array_reduce,
array_to_scalar,
reshape,
scalar_add,
scalar_to_array,
)
from myia.testing.common import AN, MA
from myia.testing.multitest import mt, run, run_gpu
def test_default_backend():
import os
before = os.environ.get("MYIA_BACKEND", None)
try:
os.environ["MYIA_BACKEND"] = "pytorch"
assert parse_default() == ("pytorch", {})
os.environ["MYIA_BACKEND"] = "pytorch?target=cpu"
assert parse_default() == ("pytorch", {"target": "cpu"})
os.environ["MYIA_BACKEND"] = "relay?target=cpu&device_id=0"
assert parse_default() == ("relay", {"target": "cpu", "device_id": "0"})
finally:
# Make sure we don't switch the default for other tests.
if before is None:
del os.environ["MYIA_BACKEND"]
else:
os.environ["MYIA_BACKEND"] = before
def test_load_backend_unknown():
with pytest.raises(UnknownBackend):
load_backend("_fake_name_")
def test_backend_error():
from myia.compile.backends import _backends, register_backend
name = "__testing_name000_"
def format():
return {}
def f():
raise ValueError("test")
register_backend(name, f, format)
with pytest.raises(LoadingError):
load_backend(name)
del _backends[name]
@run(MA(2, 3))
def test_reshape2(x):
return reshape(x, (6,))
@mt(run(MA(2, 3)), run(MA(1, 3)))
def test_array_reduce(x):
return array_reduce(scalar_add, x, (1, 3))
@run(MA(2, 3))
def test_array_reduce2(x):
return array_reduce(scalar_add, x, (3,))
@run_gpu(MA(1, 1))
def test_array_to_scalar(x):
return array_to_scalar(reshape(x, ()))
@mt(run(2, 3), run(2.0, 3.0))
def test_truediv(x, y):
return x / y
@run_gpu(2)
def test_to_array(x):
return scalar_to_array(x, AN)
@mt(run(None), run(True), run(False))
def test_bool_and_nil_args(x):
return x
@run(3)
def test_return_tuple(x):
return (1, 2, x)
# Currently, this test should be skipped for all backends.
@run(0, primitives=[G.exception_operations])
def test_raise(x):
if x > 0:
return -x
else:
raise Exception("x <= 0")
@pytest.mark.xfail # MyiaTypeError: AbstractTuple vs AbstractTaggedUnion
@run(())
def test_return_list():
return [1, 2, 3]
ll = [1, 2, 3]
@pytest.mark.xfail # unhashable type in cse
@run(())
def test_constant_list():
return ll
a = MA(2, 3)
@pytest.mark.xfail # unhashable type in cse
@run(())
def test_constant_array():
return a
|
[] |
[] |
[
"MYIA_BACKEND"
] |
[]
|
["MYIA_BACKEND"]
|
python
| 1 | 0 | |
server/db.go
|
package main;
import (
_ "github.com/lib/pq"
)
func ConnectToPG(dbName string) *sql.DB {
db, err := sql.Open("postgres", "postgres://pacific:password@"+os.Getenv("DB_PORT_5432_TCP_ADDR")+"/objects?sslmode=disable")
if err != nil {
log.Fatal(err)
}
return db
}
func SetupTable(db *sql.DB, tableName string) {
_, err := db.Exec("CREATE TABLE objects (name text PRIMARY KEY, size integer)")
if err != nil {
log.Printf("Error inserting into DB: %+v", err)
return
}
}
|
[
"\"DB_PORT_5432_TCP_ADDR\""
] |
[] |
[
"DB_PORT_5432_TCP_ADDR"
] |
[]
|
["DB_PORT_5432_TCP_ADDR"]
|
go
| 1 | 0 | |
contrib/mongodb/mongo-go-driver/vendor/github.com/mongodb/mongo-go-driver/mongo/retryable_writes_test.go
|
// Copyright (C) MongoDB, Inc. 2017-present.
//
// Licensed under the Apache License, Version 2.0 (the "License"); you may
// not use this file except in compliance with the License. You may obtain
// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
package mongo
import (
"context"
"encoding/json"
"io/ioutil"
"os"
"path"
"testing"
"strings"
"time"
"sync"
"github.com/mongodb/mongo-go-driver/bson"
"github.com/mongodb/mongo-go-driver/core/connection"
"github.com/mongodb/mongo-go-driver/core/connstring"
"github.com/mongodb/mongo-go-driver/core/event"
"github.com/mongodb/mongo-go-driver/core/readpref"
"github.com/mongodb/mongo-go-driver/core/session"
"github.com/mongodb/mongo-go-driver/core/topology"
"github.com/mongodb/mongo-go-driver/core/writeconcern"
"github.com/mongodb/mongo-go-driver/internal/testutil"
"github.com/mongodb/mongo-go-driver/internal/testutil/helpers"
"github.com/mongodb/mongo-go-driver/mongo/collectionopt"
"github.com/stretchr/testify/require"
)
const retryWritesDir = "../data/retryable-writes"
type retryTestFile struct {
Data json.RawMessage `json:"data"`
MinServerVersion string `json:"minServerVersion"`
MaxServerVersion string `json:"maxServerVersion"`
Tests []*retryTestCase `json:"tests"`
}
type retryTestCase struct {
Description string `json:"description"`
FailPoint *failPoint `json:"failPoint"`
ClientOptions map[string]interface{} `json:"clientOptions"`
Operation *retryOperation `json:"operation"`
Outcome *retryOutcome `json:"outcome"`
}
type retryOperation struct {
Name string `json:"name"`
Arguments map[string]interface{} `json:"arguments"`
}
type retryOutcome struct {
Error bool `json:"error"`
Result json.RawMessage `json:"result"`
Collection struct {
Name string `json:"name"`
Data json.RawMessage `json:"data"`
} `json:"collection"`
}
var retryMonitoredTopology *topology.Topology
var retryMonitoredTopologyOnce sync.Once
var retryStartedChan = make(chan *event.CommandStartedEvent, 100)
var retryMonitor = &event.CommandMonitor{
Started: func(ctx context.Context, cse *event.CommandStartedEvent) {
retryStartedChan <- cse
},
}
func TestTxnNumberIncluded(t *testing.T) {
client := createRetryMonitoredClient(t, retryMonitor)
client.retryWrites = true
db := client.Database("retry-writes")
version, err := getServerVersion(db)
require.NoError(t, err)
if shouldSkipRetryTest(t, version) {
t.Skip()
}
doc1 := map[string]interface{}{"x": 1}
doc2 := map[string]interface{}{"y": 2}
update := map[string]interface{}{"$inc": 1}
var cases = []struct {
op *retryOperation
includesTxn bool
}{
{&retryOperation{Name: "deleteOne"}, true},
{&retryOperation{Name: "deleteMany"}, false},
{&retryOperation{Name: "updateOne", Arguments: map[string]interface{}{"update": update}}, true},
{&retryOperation{Name: "updateMany", Arguments: map[string]interface{}{"update": update}}, false},
{&retryOperation{Name: "replaceOne"}, true},
{&retryOperation{Name: "insertOne", Arguments: map[string]interface{}{"document": doc1}}, true},
{&retryOperation{Name: "insertMany", Arguments: map[string]interface{}{
"ordered": true, "documents": []interface{}{doc1, doc2}}}, true},
{&retryOperation{Name: "insertMany", Arguments: map[string]interface{}{
"ordered": false, "documents": []interface{}{doc1, doc2}}}, true},
{&retryOperation{Name: "findOneAndReplace"}, true},
{&retryOperation{Name: "findOneAndUpdate", Arguments: map[string]interface{}{"update": update}}, true},
{&retryOperation{Name: "findOneAndDelete"}, true},
}
err = db.Drop(ctx)
require.NoError(t, err)
for _, tc := range cases {
t.Run(tc.op.Name, func(t *testing.T) {
coll := db.Collection(tc.op.Name)
err = coll.Drop(ctx)
require.NoError(t, err)
// insert sample data
_, err = coll.InsertOne(ctx, doc1)
require.NoError(t, err)
_, err = coll.InsertOne(ctx, doc2)
require.NoError(t, err)
for len(retryStartedChan) > 0 {
<-retryStartedChan
}
executeRetryOperation(t, tc.op, nil, coll)
var evt *event.CommandStartedEvent
select {
case evt = <-retryStartedChan:
default:
require.Fail(t, "Expected command started event")
}
if tc.includesTxn {
require.NotNil(t, evt.Command.Lookup("txnNumber"))
} else {
require.Nil(t, evt.Command.Lookup("txnNumber"))
}
})
}
}
// test case for all RetryableWritesSpec tests
func TestRetryableWritesSpec(t *testing.T) {
for _, file := range testhelpers.FindJSONFilesInDir(t, retryWritesDir) {
runRetryTestFile(t, path.Join(retryWritesDir, file))
}
}
func runRetryTestFile(t *testing.T, filepath string) {
if strings.Contains(filepath, "bulk") {
return
}
content, err := ioutil.ReadFile(filepath)
require.NoError(t, err)
var testfile retryTestFile
require.NoError(t, json.Unmarshal(content, &testfile))
dbName := "admin"
dbAdmin := createTestDatabase(t, &dbName)
version, err := getServerVersion(dbAdmin)
require.NoError(t, err)
// check if we should skip all retry tests
if shouldSkipRetryTest(t, version) || os.Getenv("TOPOLOGY") == "sharded_cluster" {
t.Skip()
}
// check if we should skip individual test file
if shouldSkip(t, testfile.MinServerVersion, testfile.MaxServerVersion, dbAdmin) {
return
}
for _, test := range testfile.Tests {
runRetryTestCase(t, test, testfile.Data, dbAdmin)
}
}
func runRetryTestCase(t *testing.T, test *retryTestCase, data json.RawMessage, dbAdmin *Database) {
t.Run(test.Description, func(t *testing.T) {
client := createTestClient(t)
db := client.Database("retry-writes")
collName := sanitizeCollectionName("retry-writes", test.Description)
err := db.Drop(ctx)
require.NoError(t, err)
// insert data if present
coll := db.Collection(collName)
docsToInsert := docSliceToInterfaceSlice(docSliceFromRaw(t, data))
if len(docsToInsert) > 0 {
coll2, err := coll.Clone(collectionopt.WriteConcern(writeconcern.New(writeconcern.WMajority())))
require.NoError(t, err)
_, err = coll2.InsertMany(ctx, docsToInsert)
require.NoError(t, err)
}
// configure failpoint if needed
if test.FailPoint != nil {
doc := createFailPointDoc(t, test.FailPoint)
_, err := dbAdmin.RunCommand(ctx, doc)
require.NoError(t, err)
defer func() {
// disable failpoint if specified
_, _ = dbAdmin.RunCommand(ctx, bson.NewDocument(
bson.EC.String("configureFailPoint", test.FailPoint.ConfigureFailPoint),
bson.EC.String("mode", "off"),
))
}()
}
addClientOptions(client, test.ClientOptions)
executeRetryOperation(t, test.Operation, test.Outcome, coll)
verifyCollectionContents(t, coll, test.Outcome.Collection.Data)
})
}
func executeRetryOperation(t *testing.T, op *retryOperation, outcome *retryOutcome, coll *Collection) {
switch op.Name {
case "deleteOne":
res, err := executeDeleteOne(nil, coll, op.Arguments)
if outcome == nil {
return
}
if outcome.Error {
require.Error(t, err)
} else {
require.NoError(t, err)
verifyDeleteResult(t, res, outcome.Result)
}
case "deleteMany":
_, _ = executeDeleteMany(nil, coll, op.Arguments)
// no checking required for deleteMany
case "updateOne":
res, err := executeUpdateOne(nil, coll, op.Arguments)
if outcome == nil {
return
}
if outcome.Error {
require.Error(t, err)
} else {
require.NoError(t, err)
verifyUpdateResult(t, res, outcome.Result)
}
case "updateMany":
_, _ = executeUpdateMany(nil, coll, op.Arguments)
// no checking required for updateMany
case "replaceOne":
res, err := executeReplaceOne(nil, coll, op.Arguments)
if outcome == nil {
return
}
if outcome.Error {
require.Error(t, err)
} else {
require.NoError(t, err)
verifyUpdateResult(t, res, outcome.Result)
}
case "insertOne":
res, err := executeInsertOne(nil, coll, op.Arguments)
if outcome == nil {
return
}
if outcome.Error {
require.Error(t, err)
} else {
require.NoError(t, err)
verifyInsertOneResult(t, res, outcome.Result)
}
case "insertMany":
res, err := executeInsertMany(nil, coll, op.Arguments)
if outcome == nil {
return
}
if outcome.Error {
require.Error(t, err)
} else {
require.NoError(t, err)
verifyInsertManyResult(t, res, outcome.Result)
}
case "findOneAndUpdate":
res := executeFindOneAndUpdate(nil, coll, op.Arguments)
if outcome == nil {
return
}
if outcome.Error {
require.Error(t, res.err)
} else {
require.NoError(t, res.err)
verifyDocumentResult(t, res, outcome.Result)
}
case "findOneAndDelete":
res := executeFindOneAndDelete(nil, coll, op.Arguments)
if outcome == nil {
return
}
if outcome.Error {
require.Error(t, res.err)
} else {
require.NoError(t, res.err)
verifyDocumentResult(t, res, outcome.Result)
}
case "findOneAndReplace":
res := executeFindOneAndReplace(nil, coll, op.Arguments)
if outcome == nil {
return
}
if outcome.Error {
require.Error(t, res.err)
} else {
require.NoError(t, res.err)
verifyDocumentResult(t, res, outcome.Result)
}
case "bulkWrite":
// TODO reenable when bulk writes implemented
t.Skip("Skipping until bulk writes implemented")
}
}
func createRetryMonitoredClient(t *testing.T, monitor *event.CommandMonitor) *Client {
clock := &session.ClusterClock{}
c := &Client{
topology: createRetryMonitoredTopology(t, clock, monitor),
connString: testutil.ConnString(t),
readPreference: readpref.Primary(),
clock: clock,
registry: defaultRegistry,
}
subscription, err := c.topology.Subscribe()
testhelpers.RequireNil(t, err, "error subscribing to topology: %s", err)
c.topology.SessionPool = session.NewPool(subscription.C)
return c
}
func createRetryMonitoredTopology(t *testing.T, clock *session.ClusterClock, monitor *event.CommandMonitor) *topology.Topology {
cs := testutil.ConnString(t)
cs.HeartbeatInterval = time.Minute
cs.HeartbeatIntervalSet = true
opts := []topology.Option{
topology.WithConnString(func(connstring.ConnString) connstring.ConnString { return cs }),
topology.WithServerOptions(func(opts ...topology.ServerOption) []topology.ServerOption {
return append(
opts,
topology.WithConnectionOptions(func(opts ...connection.Option) []connection.Option {
return append(
opts,
connection.WithMonitor(func(*event.CommandMonitor) *event.CommandMonitor {
return monitor
}),
)
}),
topology.WithClock(func(c *session.ClusterClock) *session.ClusterClock {
return clock
}),
)
}),
}
retryMonitoredTopologyOnce.Do(func() {
retryMonitoredTopo, err := topology.New(opts...)
if err != nil {
t.Fatal(err)
}
err = retryMonitoredTopo.Connect(ctx)
if err != nil {
t.Fatal(err)
}
retryMonitoredTopology = retryMonitoredTopo
})
return retryMonitoredTopology
}
// skip entire test suite if server version less than 3.6 OR not a replica set
func shouldSkipRetryTest(t *testing.T, serverVersion string) bool {
return compareVersions(t, serverVersion, "3.6") < 0 ||
os.Getenv("TOPOLOGY") == "server"
}
|
[
"\"TOPOLOGY\"",
"\"TOPOLOGY\""
] |
[] |
[
"TOPOLOGY"
] |
[]
|
["TOPOLOGY"]
|
go
| 1 | 0 | |
src/net/sourceforge/docfetcher/util/Util.java
|
/*******************************************************************************
* Copyright (c) 2010, 2011 Tran Nam Quang.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Eclipse Public License v1.0
* which accompanies this distribution, and is available at
* http://www.eclipse.org/legal/epl-v10.html
*
* Contributors:
* Tran Nam Quang - initial API and implementation
*******************************************************************************/
package net.sourceforge.docfetcher.util;
import java.io.BufferedReader;
import java.io.File;
import java.io.FileFilter;
import java.io.FilenameFilter;
import java.io.IOException;
import java.io.InputStreamReader;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Iterator;
import java.util.List;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import net.sourceforge.docfetcher.util.annotations.MutableCopy;
import net.sourceforge.docfetcher.util.annotations.NotNull;
import net.sourceforge.docfetcher.util.annotations.Nullable;
import net.sourceforge.docfetcher.util.annotations.ThreadSafe;
import net.sourceforge.docfetcher.util.gui.Col;
import org.apache.commons.codec.binary.Base64;
import org.eclipse.swt.SWT;
import org.eclipse.swt.custom.StyledText;
import org.eclipse.swt.dnd.Clipboard;
import org.eclipse.swt.dnd.FileTransfer;
import org.eclipse.swt.dnd.TextTransfer;
import org.eclipse.swt.dnd.Transfer;
import org.eclipse.swt.dnd.TransferData;
import org.eclipse.swt.events.DisposeEvent;
import org.eclipse.swt.events.DisposeListener;
import org.eclipse.swt.events.FocusEvent;
import org.eclipse.swt.events.FocusListener;
import org.eclipse.swt.events.KeyAdapter;
import org.eclipse.swt.events.KeyListener;
import org.eclipse.swt.events.MouseAdapter;
import org.eclipse.swt.events.MouseEvent;
import org.eclipse.swt.events.MouseTrackAdapter;
import org.eclipse.swt.events.MouseTrackListener;
import org.eclipse.swt.events.SelectionListener;
import org.eclipse.swt.graphics.Color;
import org.eclipse.swt.graphics.Image;
import org.eclipse.swt.graphics.Point;
import org.eclipse.swt.graphics.Rectangle;
import org.eclipse.swt.graphics.Resource;
import org.eclipse.swt.layout.FillLayout;
import org.eclipse.swt.layout.FormLayout;
import org.eclipse.swt.layout.GridData;
import org.eclipse.swt.layout.GridLayout;
import org.eclipse.swt.program.Program;
import org.eclipse.swt.widgets.Button;
import org.eclipse.swt.widgets.Combo;
import org.eclipse.swt.widgets.Composite;
import org.eclipse.swt.widgets.Control;
import org.eclipse.swt.widgets.Display;
import org.eclipse.swt.widgets.Label;
import org.eclipse.swt.widgets.Shell;
import org.eclipse.swt.widgets.Text;
import org.eclipse.swt.widgets.Widget;
import com.google.common.base.CharMatcher;
import com.google.common.base.Strings;
import com.google.common.base.Throwables;
import com.google.common.collect.Lists;
import com.google.common.io.Files;
import com.sun.jna.Library;
import com.sun.jna.Native;
import com.sun.jna.WString;
/**
* @author Tran Nam Quang
*/
public final class Util {
/*
* TODO pre-release: consider structuring the methods in this class by putting them into
* public static inner classes.
*/
/** Whether the platform is Windows. */
public static final boolean IS_WINDOWS;
/** Whether the platform is Linux. */
public static final boolean IS_LINUX;
/** Whether the platform is Linux KDE. */
public static final boolean IS_LINUX_KDE;
/** Whether the operating system is Ubuntu and has the Unity desktop. */
public static final boolean IS_UBUNTU_UNITY;
/** Whether the platform is Mac OS X. */
public static final boolean IS_MAC_OS_X;
public static final boolean IS_64_BIT_JVM;
/** The system's temporary directory. Does not contain backward slashes. */
public static final File TEMP_DIR = new File(System.getProperty("java.io.tmpdir"));
/** The current directory. Does not contain backward slashes. */
public static final String USER_DIR_PATH = toForwardSlashes(System.getProperty("user.dir"));
/** The current directory. */
public static final File USER_DIR = new File(USER_DIR_PATH);
/** The user's home directory. Does not contain backward slashes. */
public static final String USER_HOME_PATH = System.getProperty("user.home");
/** Line separator character ('\r\n' on Windows, '\n' on Linux). */
public static final String LS = System.getProperty("line.separator");
/**
* File separator character. On Windows, this is '\', and on Linux, it's
* '/'.
*/
public static final String FS = System.getProperty("file.separator");
/**
* Default minimum value for the width of a button.
*/
public static final int BTW = 75;
static {
String osName = System.getProperty("os.name").toLowerCase();
IS_WINDOWS = osName.contains("windows");
IS_LINUX = osName.contains("linux");
IS_UBUNTU_UNITY = isUbuntuUnity(IS_LINUX);
IS_LINUX_KDE = IS_LINUX && System.getenv("KDE_FULL_SESSION") != null;
IS_MAC_OS_X = osName.equals("mac os x");
String arch = System.getProperty("sun.arch.data.model");
if (arch == null)
arch = System.getProperty("os.arch").toLowerCase();
IS_64_BIT_JVM = arch.contains("64");
}
private Util() {}
private static boolean isUbuntuUnity(boolean isLinux) {
if (!isLinux)
return false;
try {
String output = getProcessOutput("lsb_release -irs").trim();
String[] lines = output.split("\n");
if (lines.length != 2)
return false;
if (!lines[0].trim().toLowerCase().equals("ubuntu"))
return false;
// See: http://askubuntu.com/questions/70296/is-there-an-environment-variable-that-is-set-for-unity
if (lines[1].trim().equals("11.04"))
return "gnome".equals(System.getenv("DESKTOP_SESSION"))
&& "gnome".equals(System.getenv("GDMSESSION"));
return "Unity".equals(System.getenv("XDG_CURRENT_DESKTOP"));
}
catch (IOException e) {
return false;
}
}
@NotNull
private static String getProcessOutput(@NotNull String command)
throws IOException {
Process p = Runtime.getRuntime().exec(command);
BufferedReader in = new BufferedReader(new InputStreamReader(p.getInputStream()));
StringBuilder sb = new StringBuilder();
boolean firstLine = true;
while (true) {
String line = in.readLine();
if (line == null)
break;
if (firstLine)
firstLine = false;
else
sb.append(Util.LS);
sb.append(line);
}
return sb.toString();
}
/**
* Splits the given string into an integer array. Any characters other than
* digits and the 'minus' are treated as separators.
* <p>
* If the string cannot be parsed, the given array of default values is
* returned. If the string contains numbers that are greater than
* {@code Integer.MAX_VALUE} or less than {@code Integer.MIN_VALUE}, those
* numbers will be clamped.
*/
public static int[] toIntArray(String str, int[] defaultValues) {
if (str.trim().equals(""))
return new int[0];
String[] rawValues = str.split("[^-\\d]+");
int[] array = new int[rawValues.length];
for (int i = 0; i < rawValues.length; i++) {
try {
array[i] = Integer.parseInt(rawValues[i]);
}
catch (NumberFormatException e) {
if (rawValues[i].matches("\\d{10,}"))
array[i] = Integer.MAX_VALUE;
else if (rawValues[i].matches("-\\d{10,}"))
array[i] = Integer.MIN_VALUE;
else
return defaultValues;
}
}
return array;
}
/**
* Returns the given integer string as an {@code int} value. Leading and
* trailing whitespaces are ignored. If the string cannot be parsed, the
* given default value is returned. If the string is a number, but greater
* than {@code Integer.MAX_VALUE} or less than {@code Integer.MIN_VALUE}, a
* clamped value is returned.
*/
public static int toInt(String value, int defaultValue) {
value = value.trim();
try {
return Integer.parseInt(value);
}
catch (NumberFormatException e) {
if (value.matches("\\d{10,}"))
return Integer.MAX_VALUE;
else if (value.matches("-\\d{10,}"))
return Integer.MIN_VALUE;
}
return defaultValue;
}
/**
* Encodes the given collection of strings into a single string, using the
* specified separator. The resulting string is a concatenation of the
* elements of the collection, which are separated by the given separator
* and where occurrences of the separator and backslashes are escaped
* appropriately.
*
* @see Util#decodeStrings(String, char)
*/
@NotNull
public static String encodeStrings( @NotNull String sep,
@NotNull Collection<String> parts) {
Util.checkNotNull(sep, parts);
if (parts.isEmpty())
return "";
StringBuilder sb = new StringBuilder();
boolean isFirst = true;
for (String part : parts) {
if (!isFirst)
sb.append(sep);
sb.append(part.replace("\\", "\\\\").replace(sep, "\\" + sep));
isFirst = false;
}
return sb.toString();
}
/**
* Decodes the given string into a list of strings, using the specified
* separator. This method basically splits the given string at those
* occurrences of the separator that aren't escaped with a backslash.
* <p>
* Special case: If the given string is an empty or a blank string, an empty
* list is returned.
*
* @see Util#encodeStrings(String, char)
*/
@MutableCopy
@NotNull
public static List<String> decodeStrings(char sep, @NotNull String str) {
Util.checkNotNull(str);
if (str.trim().isEmpty())
return new ArrayList<String>(0);
boolean precedingBackslash = false;
List<String> parts = new ArrayList<String>();
StringBuilder sb = new StringBuilder();
for (int i = 0; i < str.length(); i++) {
char c = str.charAt(i);
if (c == sep && ! precedingBackslash) {
parts.add(sb.toString());
sb.delete(0, sb.length());
}
else if (c != '\\' || precedingBackslash)
sb.append(c);
if (c == '\\')
precedingBackslash = ! precedingBackslash;
else
precedingBackslash = false;
}
parts.add(sb.toString());
return parts;
}
@NotNull
public static String encodeBase64(@NotNull String input) {
String encodedBytes = Base64.encodeBase64URLSafeString(input.getBytes());
return new String(encodedBytes);
}
/**
* Shortens the given string if its length exceeds a fixed limit.
*/
@NotNull
public static String truncate(@NotNull String str) {
if (str.length() > 32)
return str.substring(0, 32) + "..."; //$NON-NLS-1$
return str;
}
/**
* Removes any leading whitespace from the input string and returns the
* resulting string.
*
* @throws IllegalArgumentException
* if the input string is null.
* @see String#trim()
*/
@NotNull
public static String trimLeft(@NotNull String input) {
if (input == null)
throw new IllegalArgumentException();
for (int i = 0; i < input.length(); i++) {
char c = input.charAt(i);
if (! Character.isWhitespace(c))
return input.substring(i);
}
return "";
}
/**
* Removes any trailing whitespace from the input string and returns the
* resulting string.
*
* @throws IllegalArgumentException
* if the input string is null.
* @see String#trim()
*/
@NotNull
public static String trimRight(@NotNull String input) {
final int len = input.length();
for (int i = len - 1; i >= 0; i--) {
char c = input.charAt(i);
if (! Character.isWhitespace(c))
return input.substring(0, i + 1);
}
return "";
}
public static <T> boolean equals(@NotNull Collection<T> col, @NotNull T[] a) {
Util.checkNotNull(col, a);
if (col.size() != a.length)
return false;
int i = 0;
for (T e1 : col) {
if (!e1.equals(a[i]))
return false;
i++;
}
return true;
}
public static String ensureLinuxLineSep(@NotNull String input) {
return input.replace("\r\n", "\n");
}
public static String ensureWindowsLineSep(@NotNull String input) {
// Two replace passes are needed to avoid converting "\r\n" to "\r\r\n".
return input.replace("\r\n", "\n").replace("\n", "\r\n");
}
/**
* Centers the given shell relative to its parent shell and sets the shell's
* width and height. If there is no parent shell, the given shell is
* centered relative to the screen.
*/
public static void setCenteredBounds( @NotNull Shell shell,
int width,
int height) {
shell.setSize(width, height);
Composite parent = shell.getParent();
Rectangle parentBounds = null;
if (parent == null || !parent.isVisible())
parentBounds = shell.getMonitor().getBounds();
else
parentBounds = parent.getBounds();
int shellPosX = (parentBounds.width - width) / 2;
int shellPosY = (parentBounds.height - height) / 2;
if (parent != null) {
shellPosX += parentBounds.x;
shellPosY += parentBounds.y;
}
shell.setLocation(shellPosX, shellPosY);
}
/**
* Packs the given shell and then centers it relative to its parent shell.
* If there is no parent shell, the given shell is centered relative to the
* screen.
*/
public static void setCenteredBounds(@NotNull Shell shell) {
shell.pack();
Point shellSize = shell.getSize();
Composite parent = shell.getParent();
Rectangle parentBounds = null;
if (parent == null || !parent.isVisible())
parentBounds = shell.getMonitor().getBounds();
else
parentBounds = parent.getBounds();
int shellPosX = (parentBounds.width - shellSize.x) / 2;
int shellPosY = (parentBounds.height - shellSize.y) / 2;
if (parent != null) {
shellPosX += parentBounds.x;
shellPosY += parentBounds.y;
}
shell.setLocation(shellPosX, shellPosY);
}
/**
* Packs the given shell and then centers it relative to the given control.
*/
public static void setCenteredBounds( @NotNull Shell shell,
@NotNull Control control) {
shell.pack();
Point shellSize = shell.getSize();
Composite parent = control.getParent();
Rectangle bounds = control.getBounds();
bounds = control.getDisplay().map(parent, null, bounds);
int x = bounds.x + (bounds.width - shellSize.x) / 2;
int y = bounds.y + (bounds.height - shellSize.y)/ 2;
shell.setLocation(x, y);
}
/**
* Centers the given shell relative to its parent shell and sets the shell's
* minimum width and height. The actual width and height may be greater to
* provide enough space for the shell's children. If the given shell has no
* parent shell, it is centered relative to the screen.
*/
public static void setCenteredMinBounds(@NotNull Shell shell,
int minWidth,
int minHeight) {
Point prefSize = shell.computeSize(SWT.DEFAULT, SWT.DEFAULT);
int width = Math.max(prefSize.x, minWidth);
int height = Math.max(prefSize.y, minHeight);
setCenteredBounds(shell, width, height);
}
@NotNull
public static Button[] maybeSwapButtons(@NotNull Button b1,
@NotNull Button b2) {
boolean leftAlign = b1.getDisplay().getDismissalAlignment() == SWT.LEFT;
return new Button[] { leftAlign ? b1 : b2, leftAlign ? b2 : b1 };
}
/**
* Returns whether the first bit mask contains the second bit mask.
* <p>
* Example: {@code contains(SWT.CTRL | SWT.ALT, SWT.CTRL) == true}
*/
public static boolean contains(int bit1, int bit2) {
return (bit1 & bit2) == bit2;
}
/**
* Creates and returns a {@link org.eclipse.swt.layout.FillLayout
* FillLayout} with the given margin.
*/
public static FillLayout createFillLayout(int margin) {
FillLayout layout = new FillLayout();
layout.marginWidth = layout.marginHeight = margin;
return layout;
}
/**
* Creates and returns a {@link org.eclipse.swt.layout.GridLayout
* GridLayout} with the given arguments.
*/
public static GridLayout createGridLayout(int numColumns, boolean makeColumsEqualWidth, int margin, int spacing) {
GridLayout layout = new GridLayout(numColumns, makeColumsEqualWidth);
layout.marginWidth = layout.marginHeight = margin;
layout.horizontalSpacing = layout.verticalSpacing = spacing;
return layout;
}
/**
* Creates and returns a {@link org.eclipse.swt.layout.FormLayout
* FormLayout} with the given margin.
*/
public static FormLayout createFormLayout(int margin) {
FormLayout layout = new FormLayout();
layout.marginWidth = layout.marginHeight = margin;
return layout;
}
@NotNull
public static Text createLabeledGridText( @NotNull Composite parent,
@NotNull String labelText) {
Label label = new Label(parent, SWT.NONE);
label.setText(labelText);
label.setLayoutData(new GridData(SWT.FILL, SWT.CENTER, false, false));
Text text = new Text(parent, SWT.BORDER | SWT.SINGLE);
text.setLayoutData(new GridData(SWT.FILL, SWT.CENTER, true, false));
return text;
}
@NotNull
public static Text createUnlabeledGridText( @NotNull Composite parent) {
Text text = new Text(parent, SWT.BORDER | SWT.SINGLE);
text.setLayoutData(new GridData(SWT.FILL, SWT.CENTER, true, false));
return text;
}
@NotNull
public static StyledText createLabeledGridStyledText( @NotNull Composite parent,
@NotNull String labelText) {
Label label = new Label(parent, SWT.NONE);
label.setText(labelText);
label.setLayoutData(new GridData(SWT.FILL, SWT.CENTER, false, false));
StyledText text = new StyledText(parent, SWT.BORDER | SWT.SINGLE);
text.setLayoutData(new GridData(SWT.FILL, SWT.CENTER, true, false));
return text;
}
@NotNull
public static Button createCheckButton( @NotNull Composite parent,
@NotNull String label) {
Button bt = new Button(parent, SWT.CHECK);
bt.setText(label);
return bt;
}
@NotNull
public static Button createPushButton( @NotNull Composite parent,
@NotNull String label,
@NotNull SelectionListener listener) {
Button bt = new Button(parent, SWT.PUSH);
bt.setText(label);
bt.addSelectionListener(listener);
return bt;
}
@NotNull
public static Button createPushButton( @NotNull Composite parent,
@Nullable Image image,
@Nullable String toolTip,
@NotNull SelectionListener listener) {
Button bt = new Button(parent, SWT.PUSH);
bt.setImage(image);
if (toolTip != null)
bt.setToolTipText(toolTip);
bt.addSelectionListener(listener);
return bt;
}
/**
* Returns a suitable text foreground color for the given background color.
* The returned color is either black or white, depending on the perceived
* luminance of the given background color.
*/
@NotNull
public static Color getTextForeground(@NotNull Color background) {
int r = background.getRed();
int g = background.getGreen();
int b = background.getBlue();
double a = 1 - (0.299 * r + 0.587 * g + 0.114 * b) / 255;
return a < 0.5 ? Col.BLACK.get() : Col.WHITE.get();
}
/**
* Splits the given file path at any path separators, i.e. forward or
* backward slashes. Example:
*
* <pre>
* /path/to/file/ -> '', 'path', 'to', 'file'
* </pre>
*
* Note that a leading path separator will produce an empty string at the
* beginning of the returned list, while a (single) trailing path separator
* won't.
*/
@MutableCopy
@NotNull
public static List<String> splitPath(@NotNull String path) {
List<String> parts = new ArrayList<String>();
int lastStart = 0;
for (int i = 0; i < path.length(); i++) {
char c = path.charAt(i);
if (c == '/' || c == '\\') {
parts.add(path.substring(lastStart, i));
lastStart = i + 1;
}
}
if (lastStart < path.length())
parts.add(path.substring(lastStart));
return parts;
}
/**
* A {@link com.google.common.base.CharMatcher CharMatcher} that matches
* forward and backward slashes. See the {@code CharMatcher} Javadocs for
* more.
*/
public static final CharMatcher fileSepMatcher = CharMatcher.anyOf("/\\").precomputed();
/**
* Creates a file path by joining the given parts. All leading and trailing
* forward and backward slashes are stripped from the parts, except for the
* first part, where only the trailing slashes are stripped. All backward
* slashes are replaced by forward slashes.
* <p>
* Special case: If only two path parts are given and one of them is empty,
* the other path part is returned, without any additional file separators.
*/
@NotNull
public static String joinPath( @NotNull String first,
@NotNull String second,
@NotNull String... more) {
if (more.length == 0) {
if (first.isEmpty())
return second;
if (second.isEmpty())
return first;
}
StringBuilder sb = new StringBuilder();
sb.append(fileSepMatcher.trimTrailingFrom(first));
sb.append('/');
sb.append(fileSepMatcher.trimFrom(second));
for (int i = 0; i < more.length; i++) {
sb.append('/');
sb.append(fileSepMatcher.trimFrom(more[i]));
}
return toForwardSlashes(sb.toString());
}
/**
* Same as {@link #joinPath(String...)}, but reads the parts from an
* <tt>Iterable</tt>.
*/
@NotNull
public static String joinPath(@NotNull Iterable<?> parts) {
Iterator<?> it = parts.iterator();
if (! it.hasNext())
return "";
StringBuilder sb = new StringBuilder();
sb.append(fileSepMatcher.trimTrailingFrom(it.next().toString()));
while (it.hasNext()) {
sb.append('/');
sb.append(fileSepMatcher.trimFrom(it.next().toString()));
}
return toForwardSlashes(sb.toString());
}
@NotNull
public static String join(@NotNull String separator, @NotNull Object... parts) {
Util.checkNotNull(separator);
StringBuilder sb = new StringBuilder();
for (int i = 0; i < parts.length; i++) {
if (i == 0) {
sb.append(parts[i]);
} else {
sb.append(separator);
sb.append(parts[i]);
}
}
return sb.toString();
}
@NotNull
public static String join(@NotNull String separator, @NotNull Iterable<?> parts) {
Util.checkNotNull(separator);
Iterator<?> it = parts.iterator();
if (! it.hasNext())
return "";
StringBuilder sb = new StringBuilder();
sb.append(it.next().toString());
while (it.hasNext()) {
sb.append(separator);
sb.append(it.next().toString());
}
return sb.toString();
}
/**
* For the given file, returns an absolute path in which all backward
* slashes have been replaced by forward slashes.
* <p>
* Exception: If the file's path is a UNC path, the UNC path is returned as
* is.
*/
@NotNull
public static String getAbsPath(@NotNull File file) {
String absPath = file.getAbsolutePath();
if (absPath.startsWith("\\\\")) // UNC path?
return absPath;
/*
* We'll replace "//" with "/" here due to a bug in the
* File.getAbsolutePath method: On Windows, if the given file has the
* path "SOME_PATH" and the current working directory is the root of a
* device, e.g. "C:\", then getAbsolutePath will return "C:\\SOME_PATH"
* rather than the more sensible value "C:\SOME_PATH".
*/
return absPath.replace('\\', '/').replace("//", "/");
}
/**
* Returns whether given file's path is a UNC path.
*/
public static boolean isUncPath(@NotNull File file) {
return file.getPath().startsWith("\\\\");
}
/**
* For the given path string, returns an absolute path in which all backward
* slashes have been replaced by forward slashes.
*/
@NotNull
public static String getAbsPath(@NotNull String path) {
return getAbsPath(new File(path));
}
@NotNull
public static File getAbsFile(@NotNull File file) {
return new File(getAbsPath(file));
}
/**
* Equivalent to {@link java.io.File#getAbsolutePath()}.
*/
@NotNull
public static String getSystemAbsPath(@NotNull File file) {
return file.getAbsolutePath();
}
/**
* Equivalent to {@link java.io.File#getAbsolutePath() new
* java.io.File(path).getAbsolutePath()}.
*/
@NotNull
public static String getSystemAbsPath(@NotNull String path) {
return new File(path).getAbsolutePath();
}
@NotNull
public static File getCanonicalFile(@NotNull String path) {
return getCanonicalFile(new File(path));
}
@NotNull
public static File getCanonicalFile(@NotNull File file) {
return new File(getCanonicalPath(file));
}
@NotNull
public static String getCanonicalPath(@NotNull File file) {
if (IS_WINDOWS && isWindowsDevice(file.getPath())) {
String driveLetter = getDriveLetter(file.getPath());
assert driveLetter != null;
return driveLetter + ":\\"; // the trailing slash is important here
}
// Calling getCanonicalPath leads to performance problems for files
// located on a network, so it has been disabled. See:
// https://sourceforge.net/p/docfetcher/discussion/702424/thread/4ed68957/
// try {
// return file.getCanonicalPath();
// }
// catch (IOException e) {
// return file.getAbsolutePath();
// }
return file.getAbsolutePath();
}
public static boolean isCanonicallyEqual( @Nullable File file1,
@Nullable File file2) {
if (file1 == null || file2 == null)
return false;
return getCanonicalFile(file1).equals(getCanonicalFile(file2));
}
/**
* Returns all files and directories directly underneath the given
* directory. This works like {@link File#listFiles()}, except that when
* access to the directory is denied, an empty array is returned, not a null
* pointer.
*/
@NotNull
public static File[] listFiles(@NotNull File dir) {
File[] files = dir.listFiles();
return files == null ? new File[0] : files;
}
/**
* Returns all files and directories directly underneath the given directory
* that are not filtered by the given {@code filter}. This works like
* {@link File#listFiles(FilenameFilter)}, except that when access to the
* directory is denied, an empty array is returned, not a null pointer.
*/
@NotNull
public static File[] listFiles( @NotNull File dir,
@Nullable FilenameFilter filter) {
File[] files = dir.listFiles(filter);
return files == null ? new File[0] : files;
}
/**
* Returns all files and directories directly underneath the given directory
* that are not filtered by the given {@code filter}. This works like
* {@link File#listFiles(FileFilter)}, except that when access to the
* directory is denied, an empty array is returned, not a null pointer.
*/
@NotNull
public static File[] listFiles( @NotNull File dir,
@Nullable FileFilter filter) {
File[] files = dir.listFiles(filter);
return files == null ? new File[0] : files;
}
/**
* Returns whether the given file is a symlink. Returns false if the file
* doesn't exists or if an IOException occured. The symlink detection is
* based on the comparison of the absolute and canonical path of a link: If
* those two differ, the given file can be assumed to be a symlink.
* <p>
* Note: If the given file is an instance of TFile and
* represents an archive entry, this method always returns false.
*/
public static boolean isSymLink(@NotNull File file) {
try {
/*
* Earlier versions simply compared the absolute and canonical path
* of the given file. This did not work for files with 8.3
* filenames, which were incorrectly identified as symlinks.
*/
File canon;
if (file.getParent() == null) {
canon = file;
}
else {
File canonDir = file.getParentFile().getCanonicalFile();
canon = new File(canonDir, file.getName());
}
return !canon.getCanonicalFile().equals(canon.getAbsoluteFile());
}
catch (IOException e) {
return false;
}
}
private interface Kernel32 extends Library {
public int GetFileAttributesW(WString fileName);
}
private static Kernel32 lib = null;
private static int getWin32FileAttributes(File file) throws IOException {
if (lib == null) {
synchronized (Kernel32.class) {
lib = (Kernel32) Native.loadLibrary("kernel32", Kernel32.class);
}
}
/*
* It's important here to use the canonical path, because: (1)
* Non-canonical paths can be made arbitrarily long using "..", like
* this: "C:/Test/../Test/../Test/../Test". (2) GetFileAttributesW will
* return -1 if the path is as long as or longer than 260 characters.
* This can cause directories and files to be incorrectly identified as
* NTFS junctions, for example.
*/
String path;
try {
path = file.getCanonicalPath();
}
catch (IOException e) {
path = file.getAbsolutePath();
}
return lib.GetFileAttributesW(new WString(path));
}
/**
* Returns whether the given file is a Windows junction or symlink. Returns
* false if the platform is not Windows, if the file doesn't exists or if an
* IOException occured.
* <p>
* Notes:
* <ul>
* <li>If the given file is an instance of TFile and represents an archive
* entry, this method always returns false.</li>
* <li>This method requires file system access, which can cause latencies if
* the file is accessed over a network.</li>
* </ul>
*/
public static boolean isJunctionOrSymlink(@NotNull File file) {
if (! IS_WINDOWS)
return false;
try {
// Wrap in java.io.File to shield against TFile instances
return new File(file.getPath()).exists()
&& (0x400 & getWin32FileAttributes(file)) != 0;
}
catch (IOException e) {
return false;
}
}
/**
* Returns the parent of the given file. Unlike the standard method
* {@link File#getParentFile()}, this method will not return null if the
* given file was constructed with a relative path.
*/
@NotNull
public static File getParentFile(@NotNull File file) {
Util.checkNotNull(file);
File parent = file.getParentFile();
if (parent == null)
parent = file.getAbsoluteFile().getParentFile();
return parent;
}
@NotNull
public static String toForwardSlashes(@NotNull String path) {
if (path.startsWith("\\\\")) // UNC path?
return path;
return path.replace('\\', '/');
}
/**
* @see #getParentFile(File)
*/
@NotNull
public static File getParentFile(@NotNull String path) {
Util.checkNotNull(path);
File file = new File(path);
File parent = file.getParentFile();
if (parent == null)
parent = file.getAbsoluteFile().getParentFile();
return parent;
}
/**
* Returns true if <tt>objects</tt> contains an object that is equal to
* <tt>object</tt>. Returns false if <tt>objects</tt> is null.
*/
public static boolean containsEquality( @Nullable Object[] objects,
@Nullable Object object) {
if (objects == null)
return false;
for (Object candidate : objects)
if (candidate.equals(object))
return true;
return false;
}
/**
* Equivalent to {@link #splitFilename(String)
* splitFilename(file.getName())}.
*
* @see Util#splitFilename(String)
*/
public static String[] splitFilename(@NotNull File file) {
return Util.splitFilename(file.getName());
}
/**
* Splits the given filename into a base name and the file extension,
* omitting the '.' character, e.g. "data.xml" -> ["data", "xml"]. The file
* extension is an empty string if the file has no extension (i.e. it
* doesn't contain the '.' character). The returned file extension is always
* lowercase, even if it wasn't lowercased in the given filename. It is also
* guaranteed that the returned array is always of length 2.
* <p>
* Exception: If the file ends with ".xxx.gz", then the returned file
* extension is "xxx.gz", not "gz". Examples:
* <ul>
* <li>"archive.tar.gz" -> ["archive", "tar.gz"]
* <li>"abiword.abw.gz" -> ["abiword", "abw.gz"]
* </ul>
* <p>
* Note: This method also accepts filepaths.
*
* @throws NullPointerException
* if the given filename is null.
*/
@NotNull
public static String[] splitFilename(@NotNull String filename) {
int index = filename.lastIndexOf('.');
if (index == -1)
return new String[] {filename, ""};
String ext = filename.substring(index + 1).toLowerCase();
if (ext.equals("gz")) {
int index2 = filename.lastIndexOf('.', index - 1);
if (index2 != -1) {
return new String[] {
filename.substring(0, index2),
filename.substring(index2 + 1).toLowerCase()
};
}
}
return new String[] {filename.substring(0, index), ext};
}
@NotNull
public static String getExtension(@NotNull String filename) {
return splitFilename(filename)[1];
}
@NotNull
public static String getExtension(@NotNull File file) {
return splitFilename(file.getName())[1];
}
/**
* For the given filename and a list of file extensions, this method returns
* true if any of the file extensions match the filename. A match occurs
* when the given filename, after being lower-cased, ends with '.' and the
* matching lower-cased file extension.
* <p>
* Example: The filename <code>'some_file.TXT'</code> matches the file
* extension <code>'txt'</code>.
* <p>
* Note: This method also accepts filepaths.
*/
public static boolean hasExtension( @NotNull String filename,
@NotNull String... extensions) {
filename = filename.toLowerCase();
for (String ext : extensions)
if (filename.endsWith("." + ext.toLowerCase()))
return true;
return false;
}
/**
* @see #hasExtension(String, String...)
*/
public static boolean hasExtension(@NotNull String filename,
@NotNull Collection<String> extensions) {
filename = filename.toLowerCase();
for (String ext : extensions)
if (filename.endsWith("." + ext.toLowerCase()))
return true;
return false;
}
/**
* Deletes all the files within a directory. Does not delete the directory
* itself.
* <p>
* If the file argument is a symbolic link or there is a symbolic link in
* the path leading to the directory, this method will do nothing. Symbolic
* links within the directory are not followed.
*/
public static void deleteContents(@NotNull File directory)
throws IOException {
checkThat(directory.isDirectory());
if (isSymLink(directory))
return;
for (File file : listFiles(directory))
deleteRecursively(file);
}
/**
* Deletes a file or directory and all contents recursively.
* <p>
* If the file argument is a symbolic link the link will be deleted but not
* the target of the link. If the argument is a directory, symbolic links
* within the directory will not be followed.
*/
public static void deleteRecursively(@NotNull File file) throws IOException {
if (file.isDirectory())
deleteContents(file);
if (!file.delete())
throw new IOException("Failed to delete " + file);
}
/**
* Returns the name of the given file. In contrast to the default
* {@link File#getName()} method, this method will return a drive letter
* instead of an empty string if the given file is a Windows root such as
* "C:". The {@code letterSuffix} argument is a string that will be appended
* to the drive letter, if one is returned.
*/
@NotNull
public static String getNameOrLetter( @NotNull File file,
@NotNull String letterSuffix) {
Util.checkNotNull(file, letterSuffix);
String filename = file.getName();
/*
* Special case: If the file was created as 'new File("")', then its
* filename will be an empty string.
*/
if (file.getAbsoluteFile().equals(USER_DIR))
return USER_DIR.getName();
/*
* Note: Do not use absolute files here, because this would turn "C:"
* into the working directory! (Strange but true.)
*/
if (IS_WINDOWS && filename.length() == 0 && getParentFile(file) == null) {
String driveLetter = getDriveLetter(file.getPath());
if (driveLetter != null)
return driveLetter + letterSuffix;
}
return filename;
}
private static Pattern drivePattern = Pattern.compile("([a-zA-Z]):.*");
private static Pattern driveOnlyPattern = Pattern.compile("(?:[a-zA-Z]):(?:\\\\|/)*");
/**
* Returns the drive letter at the beginning of the given Windows path, or
* null if the path doesn't start with a drive letter.
* <p>
* Example: For "C:\Windows" this method returns "C".
*/
@Nullable
public static String getDriveLetter(@NotNull String path) {
Util.checkNotNull(path);
Matcher m = Util.drivePattern.matcher(path);
if (m.matches())
return m.group(1).toUpperCase();
return null;
}
public static boolean isWindowsDevice(@NotNull String path) {
return driveOnlyPattern.matcher(path).matches();
}
public static void assertSwtThread() {
if (Display.getCurrent() == null)
throw new IllegalStateException();
}
/**
* Throws an <code>IllegalArgumentException</code> if the given condition is
* false.
*/
public static void checkThat(boolean condition) {
if (!condition)
throw new IllegalArgumentException();
}
/**
* Throws an <code>IllegalArgumentException</code> with the given error
* message if the given condition is false.
*/
public static void checkThat(boolean condition, @NotNull String message) {
if (!condition)
throw new IllegalArgumentException(message);
}
/**
* Throws an <code>IllegalArgumentException</code> if the provided argument
* is null. If not, the argument is returned.
*/
public static <T> T checkNotNull(T a) {
/*
* Generally, it does not make sense to check that a method argument of
* type Boolean is not null - if the Boolean is not allowed to be null,
* one could use a primitive boolean instead. If someone does call this
* method with a Boolean, he/she might have done so by accident by
* confusing checkNotNull with checkThat. To prevent this, we'll throw
* an exception.
*/
if (a instanceof Boolean)
throw new UnsupportedOperationException();
if (a == null)
throw new IllegalArgumentException();
return a;
}
/**
* Throws an <code>IllegalArgumentException</code> if any of the provided
* arguments is null.
*/
public static void checkNotNull(Object a, Object b) {
if (a == null || b == null)
throw new IllegalArgumentException();
}
/**
* Throws an <code>IllegalArgumentException</code> if any of the provided
* arguments is null.
*/
public static void checkNotNull(Object a, Object b, Object c) {
if (a == null || b == null || c == null)
throw new IllegalArgumentException();
}
/**
* Throws an <code>IllegalArgumentException</code> if any of the provided
* arguments is null.
*/
public static void checkNotNull(Object a, Object b, Object c, Object d) {
if (a == null || b == null || c == null || d == null)
throw new IllegalArgumentException();
}
/**
* Throws an <code>IllegalArgumentException</code> if any of the provided
* arguments is null.
*/
public static void checkNotNull(Object a, Object b, Object c, Object d, Object e) {
if (a == null || b == null || c == null || d == null || e == null)
throw new IllegalArgumentException();
}
/**
* Returns the given string if it is not null, otherwise returns an empty
* string.
*/
@NotNull
public static String notNull(@Nullable String string) {
return string == null ? "" : string;
}
private static long lastTimeStamp = -1;
/**
* Returns a unique identifier based on {@link System#currentTimeMillis()}.
* The returned ID is guaranteed to differ from all previous IDs obtained by
* this method.
*/
@ThreadSafe
public static synchronized long getTimestamp() {
/*
* Try to create a timestamp and don't return until the last timestamp
* and the current one are unequal.
*/
long newTimeStamp = System.currentTimeMillis();
while (newTimeStamp == lastTimeStamp)
newTimeStamp = System.currentTimeMillis();
lastTimeStamp = newTimeStamp;
return newTimeStamp;
}
/**
* Returns true if the directory given by <tt>dir</tt> is a direct or
* indirect parent directory of the file or directory given by
* <tt>fileOrDir</tt>.
*/
public static boolean contains( @NotNull File dir,
@NotNull File fileOrDir) {
return contains(getAbsPath(dir), getAbsPath(fileOrDir));
}
/**
* Returns true if the directory given by the absolute path <tt>dirPath</tt>
* is a direct or indirect parent directory of the file or directory given
* by the absolute path <tt>fileOrDirPath</tt>.
*/
public static boolean contains( @NotNull String dirPath,
@NotNull String fileOrDirPath) {
dirPath = dirPath.replace('\\', '/');
fileOrDirPath = fileOrDirPath.replace('\\', '/');
if (dirPath.length() >= fileOrDirPath.length())
return false;
char c = fileOrDirPath.charAt(dirPath.length());
if (c != '/')
return false;
if (! fileOrDirPath.startsWith(dirPath))
return false;
return true;
}
/**
* Returns the last element of the given list. Returns null if the given
* list is empty or null.
*/
@Nullable
public static <T> T getLast(@Nullable List<T> list) {
if (list == null) return null;
int size = list.size();
if (size == 0) return null;
return list.get(size - 1);
}
@MutableCopy
@NotNull
public static <T> List<T> createList( int extraCapacity,
@NotNull T... elements) {
Util.checkNotNull(elements);
List<T> newList = new ArrayList<T>(elements.length + extraCapacity);
for (T element : elements)
newList.add(element);
return newList;
}
/**
* Creates a new list from the given collection and elements. The given
* collection is added first to the returned list.
*
* @see #createListReverse(Collection, Object...)
*/
@MutableCopy
@NotNull
public static <T> List<T> createList( @NotNull Collection<? extends T> col,
@NotNull T... elements) {
Util.checkNotNull(col, elements);
List<T> newList = new ArrayList<T>(col.size() + elements.length);
newList.addAll(col);
for (T element : elements)
newList.add(element);
return newList;
}
/**
* Creates a new list from the given collection and elements. The given
* elements are added first to the returned list.
*
* @see #createList(Collection, Object...)
*/
@MutableCopy
@NotNull
public static <T> List<T> createListReversed( @NotNull Collection<? extends T> col,
@NotNull T... elements) {
Util.checkNotNull(col, elements);
List<T> newList = new ArrayList<T>(col.size() + elements.length);
for (T element : elements)
newList.add(element);
newList.addAll(col);
return newList;
}
@NotNull
public static <T> List<T> createEmptyList(@NotNull Collection<?>... cols) {
int size = 0;
for (int i = 0; i < cols.length; i++)
size += cols[i].size();
return new ArrayList<T>(size);
}
/**
* Runs the given {@code Runnable} in a way that avoids throwing errors of
* the type {@link SWT#ERROR_THREAD_INVALID_ACCESS}. This is useful for
* running GUI-accessing code from non-GUI threads.
* <p>
* The given Runnable is <b>not</b> run if the given given widget is null or
* disposed. This helps avoid the common pitfall of trying to access widgets
* from a non-GUI thread when these widgets have already been disposed.
* <p>
* The returned Boolean indicates whether the Runnable was run (true) or not
* (false).
*/
public static boolean runSwtSafe( @Nullable final Widget widget,
@NotNull final Runnable runnable) {
if (Display.getCurrent() != null) {
boolean wasRun = widget != null && !widget.isDisposed();
if (wasRun)
runnable.run();
return wasRun;
}
else {
return runSyncExec(widget, runnable);
}
}
/**
* @see #runSwtSafe(Widget, Runnable)
*/
public static boolean runSwtSafe( @Nullable final Display display,
@NotNull final Runnable runnable) {
if (Display.getCurrent() != null) {
boolean wasRun = display != null && !display.isDisposed();
if (wasRun)
runnable.run();
return wasRun;
}
else {
return runSyncExec(display, runnable);
}
}
/**
* Runs the given {@code Runnable} via {@link Display#syncExec(Runnable)}.
* This is useful for running GUI-accessing code from non-GUI threads.
* <p>
* The given Runnable is <b>not</b> run if the given given widget is null or
* disposed. This helps avoid the common pitfall of trying to access widgets
* from a non-GUI thread when these widgets have already been disposed.
* <p>
* The returned Boolean indicates whether the Runnable was run (true) or not
* (false).
*/
public static boolean runSyncExec( @Nullable final Widget widget,
@NotNull final Runnable runnable) {
if (widget == null || widget.isDisposed())
return false;
final boolean[] wasRun = { false };
widget.getDisplay().syncExec(new Runnable() {
public void run() {
wasRun[0] = !widget.isDisposed();
if (wasRun[0])
runnable.run();
}
});
return wasRun[0];
}
/**
* @see #runSyncExec(Widget, Runnable)
*/
public static boolean runSyncExec( @Nullable final Display display,
@NotNull final Runnable runnable) {
if (display == null || display.isDisposed())
return false;
final boolean[] wasRun = { false };
display.syncExec(new Runnable() {
public void run() {
wasRun[0] = !display.isDisposed();
if (wasRun[0])
runnable.run();
}
});
return wasRun[0];
}
/**
* Runs the given {@code Runnable} via {@link Display#asyncExec(Runnable)}.
* This is useful for running GUI-accessing code from non-GUI threads.
* <p>
* The given Runnable is <b>not</b> run if the given widget is null or
* disposed. This helps avoid the common pitfall of trying to access widgets
* from a non-GUI thread when these widgets have already been disposed.
*/
public static void runAsyncExec(@Nullable final Widget widget,
@NotNull final Runnable runnable) {
/*
* Note: Unlike the syncExec variant, here it's not possible to return a
* boolean flag that indicates whether the Runnable was run, since
* asyncExec may not execute the Runnable immediately.
*/
if (widget == null || widget.isDisposed())
return;
widget.getDisplay().asyncExec(new Runnable() {
public void run() {
if (!widget.isDisposed())
runnable.run();
}
});
}
/**
* Runs the given {@code Runnable} via {@link Display#asyncExec(Runnable)}.
* This is useful for running GUI-accessing code from non-GUI threads.
* <p>
* The given Runnable is <b>not</b> run if the given display is null or
* disposed. This helps avoid the common pitfall of trying to access widgets
* from a non-GUI thread when these widgets have already been disposed.
*/
public static void runAsyncExec(@Nullable final Display display,
@NotNull final Runnable runnable) {
/*
* Note: Unlike the syncExec variant, here it's not possible to return a
* boolean flag that indicates whether the Runnable was run, since
* asyncExec may not execute the Runnable immediately.
*/
if (display == null || display.isDisposed())
return;
display.asyncExec(new Runnable() {
public void run() {
if (!display.isDisposed())
runnable.run();
}
});
}
/**
* Launches the given filename or filepath, and returns whether the file was
* successfully launched. This method first tries to launch the file via the
* SWT method {@link Program#launch(String)}. If this fails and the
* application is running on Linux, this method tries to call xdg-open.
*/
public static boolean launch(@NotNull String filename) {
Util.checkNotNull(filename);
if (Program.launch(filename))
return true;
if (!IS_LINUX)
return false;
try {
String[] cmd = {"xdg-open", filename};
Process process = Runtime.getRuntime().exec(cmd);
int exitValue = process.waitFor();
return exitValue == 0;
}
catch (Exception e) {
return false;
}
}
/**
* @see #launch(String)
*/
public static boolean launch(@NotNull File fileOrDir) {
Util.checkNotNull(fileOrDir);
return launch(getSystemAbsPath(fileOrDir));
}
@NotNull
public static File createTempDir() throws IOException {
File dir = Files.createTempDir();
/*
* On Windows and Mac OS X, returning a canonical file avoids certain
* symlink-related issues:
*
* On Windows 7, Files.createTempDir() might return a directory that
* contains 8.3 filenames, for which the absolute and canonical paths
* differ, so that the directory will be incorrectly treated as a
* symlink. On Mac OS X, Files.createTempDir() will return an actual
* symlink.
*
* In both cases, returning a file that is or appears to be a symlink
* will lead to various problems, e.g. Files.deleteRecursively(File)
* failing to delete the temporary directory.
*/
return IS_WINDOWS || IS_MAC_OS_X ? dir.getCanonicalFile() : dir;
}
/**
* Equivalent to {@link #createTempFile(String, String, File)
* createTempFile(String, String, null)}.
*/
public static File createTempFile( @NotNull String prefix,
@Nullable String suffix)
throws IOException {
return createTempFile(prefix, suffix, null);
}
/**
* Equivalent to {@link File#createTempFile(String, String, File)}, except:
* <ul>
* <li>The returned file will be deleted automatically after JVM shutdown.
* <li>Unlike {@link File#createTempFile(String, String, File)}, this method
* will not throw an exception if the prefix is shorter than 3 characters.
* Instead, the prefix will be right-padded with underscores to make it 3
* characters long.
* </ul>
*
* @see {@link File#createTempFile(String, String, File)}
*/
public static File createTempFile( @NotNull String prefix,
@Nullable String suffix,
@Nullable File directory)
throws IOException {
int prefixLength = prefix.length();
if (prefixLength < 3)
prefix += Strings.repeat("_", 3 - prefixLength);
File file = File.createTempFile(prefix, suffix, directory);
/*
* On Mac OS X, File.createTempFile() will give us a symlink to a file,
* which is not what we want, because our file walker will silently
* ignore symlinks. The workaround is to return a canonical file on Mac
* OS X.
*/
if (Util.IS_MAC_OS_X)
file = file.getCanonicalFile();
file.deleteOnExit();
return file;
}
@NotNull
public static File createDerivedTempFile( @NotNull String filename,
@NotNull File tempDir)
throws IOException {
String[] nameParts = Util.splitFilename(filename);
if (! nameParts[1].equals(""))
nameParts[1] = "." + nameParts[1];
return Util.createTempFile(
nameParts[0], nameParts[1], tempDir
);
}
public static void println(@NotNull Object... objects) {
StringBuilder sb = new StringBuilder();
boolean first = true;
for (Object object : objects) {
if (first) {
first = false;
} else {
sb.append("; ");
}
sb.append(object);
}
System.out.println(sb.toString());
}
/**
* Equivalent to <code>System.err.println(String)</code>. This method can be
* called instead to suppress AspectJ warnings.
*/
public static void printErr(@NotNull String message) {
System.err.println(message);
}
/**
* Equivalent to {@link Throwable#printStackTrace()}. This method can be
* called instead to suppress AspectJ warnings.
*/
public static void printErr(@NotNull Throwable t) {
t.printStackTrace();
}
@NotNull
public static String getLowestMessage(@Nullable Throwable throwable) {
if (throwable == null)
return "";
List<Throwable> chain = Throwables.getCausalChain(throwable);
for (Throwable t : Lists.reverse(chain)) {
String msg = t.getMessage();
if (msg != null && !msg.trim().equals(""))
return msg;
}
return "";
}
/**
* Applying this method to the given widget will cause all the text in it to
* become selected if the user clicks on it after coming back from another
* part of the GUI or another program. The widget must be a Combo or a Text
* widget.
*/
public static void selectAllOnFocus(@NotNull final Control text) {
Util.checkThat(text instanceof Combo || text instanceof Text || text instanceof StyledText);
class SelectAllOnFocus extends MouseAdapter implements FocusListener {
private boolean focusGained = false;
public void focusGained(FocusEvent e) {
focusGained = true;
}
public void focusLost(FocusEvent e) {
}
public void mouseDown(MouseEvent e) {
if (! focusGained) return;
if (text instanceof Combo)
selectAll((Combo) text);
else if (text instanceof Text)
((Text) text).selectAll();
else if (text instanceof StyledText)
((StyledText) text).selectAll();
focusGained = false;
}
}
SelectAllOnFocus listener = new SelectAllOnFocus();
text.addFocusListener(listener);
text.addMouseListener(listener);
}
@Nullable private static KeyListener selectAllKeyListener;
public static void registerSelectAllKey(@NotNull final StyledText st) {
if (selectAllKeyListener == null) {
selectAllKeyListener = new KeyAdapter() {
public void keyPressed(org.eclipse.swt.events.KeyEvent e) {
if (e.stateMask == SWT.MOD1 || e.keyCode == 'a')
st.selectAll();
}
};
}
st.addKeyListener(selectAllKeyListener);
}
/**
* Selects all the text in the given combo.
*/
public static void selectAll(@NotNull Combo combo) {
int length = combo.getText().length();
combo.setSelection(new Point(0, length));
}
public static int clamp(int value, int minimum, int maximum) {
Util.checkThat(minimum <= maximum);
if (value > maximum) return maximum;
if (value < minimum) return minimum;
return value;
}
public static boolean isInterrupted() {
return Thread.currentThread().isInterrupted();
}
/**
* Returns an array of files from the system clipboard, or null if there are
* no files on the clipboard. This method should not be called from a
* non-GUI thread, and it should not be called before an SWT display has
* been created.
*/
@Nullable
public static List<File> getFilesFromClipboard() {
assertSwtThread();
Clipboard clipboard = new Clipboard(Display.getDefault());
try {
TransferData[] types = clipboard.getAvailableTypes();
for (TransferData type : types) {
if (!FileTransfer.getInstance().isSupportedType(type))
continue;
Object data = clipboard.getContents(FileTransfer.getInstance());
if (data == null || !(data instanceof String[]))
continue;
String[] paths = (String[]) data;
List<File> files = new ArrayList<File>(paths.length);
for (String path : paths)
files.add(new File(path));
return files;
}
return null;
}
finally {
clipboard.dispose();
}
}
public static void setClipboard(@NotNull String text) {
Util.checkNotNull(text);
Clipboard clipboard = new Clipboard(Display.getCurrent());
Transfer[] types = new Transfer[] { TextTransfer.getInstance() };
clipboard.setContents(new Object[] {text}, types);
clipboard.dispose();
}
/**
* Replaces the contents of the given clipboard with the given text and
* returns the clipboard. If the given clipboard is null, it will be
* created. This will only work if an SWT Display has been created.
*/
public static void setClipboard(@NotNull Collection<File> files) {
Util.checkNotNull(files);
if (files.isEmpty())
return;
Clipboard clipboard = new Clipboard(Display.getCurrent());
Transfer[] types = new Transfer[] {
TextTransfer.getInstance(),
FileTransfer.getInstance()
};
StringBuilder sb = new StringBuilder();
String[] filePaths = new String[files.size()];
int i = 0;
for (File file : files) {
if (i != 0)
sb.append("\n");
String path = Util.getSystemAbsPath(file);
sb.append(path);
filePaths[i] = path;
i++;
}
clipboard.setContents(new Object[] {sb.toString(), filePaths}, types);
clipboard.dispose();
}
/**
* Adds a {@link MouseTrackListener} to the given control that highlights
* the background when the mouse hovers over the control.
*/
public static void addMouseHighlighter(@NotNull final Control control) {
control.addMouseTrackListener(new MouseTrackAdapter() {
public void mouseEnter(MouseEvent e) {
control.setBackground(Col.WIDGET_HIGHLIGHT_SHADOW.get());
}
public void mouseExit(MouseEvent e) {
control.setBackground(null);
}
});
}
/**
* Returns whether the given key code represents the Enter key, which can be
* either the 'normal' Enter key or the Enter key on the numpad.
*/
public static boolean isEnterKey(int keyCode){
return keyCode == SWT.CR || keyCode == SWT.KEYPAD_CR;
}
/**
* Returns whether the given stateMask is a special function key.
*/
public static boolean isSpecialStateMask(int stateMask){
return (stateMask & (SWT.CTRL | SWT.ALT | SWT.SHIFT | SWT.COMMAND)) != 0;
}
/**
* Returns whether the given keyCode is a character or a number.
*/
public static boolean isAlphaNumeric(int keyCode){
return (keyCode >=97 && keyCode <=122) || (keyCode >=48 && keyCode <=57);
}
/**
* Returns whether the given keyCode is a valid key to trigger type-ahead search.
*/
public static boolean isTypeAheadAccept(char character){
return character>=32 || character == SWT.BS ;
}
// Any of the given resources may be null
public static void disposeWith( @NotNull Widget widget,
@NotNull final Resource... resources) {
Util.checkNotNull(widget, resources);
widget.addDisposeListener(new DisposeListener() {
public void widgetDisposed(DisposeEvent e) {
for (Resource resource : resources)
if (resource != null)
resource.dispose();
}
});
}
/**
* Launches the given filepath and select the file
* explorer.exe /select,F:\docfetcher\DocFetcher\aspectjtools.jar
* returning whether the file was successfully launched
* This process is not working properly in Win7 64bit
*/
public static boolean winOpenDir(String fileName) {
if (IS_WINDOWS) {
try {
Runtime.getRuntime().exec("explorer /select, " + fileName);
return true;
} catch (Exception e) {
return false;
}
}
return false;
}
/**
* Return the original index name, which is based on the
* root folder of the index
*/
public static String getDefaultIndexName(File rootFile){
String nameOrLetter = Util.getNameOrLetter(rootFile, ":\\");
return (Util.truncate(nameOrLetter));
}
}
|
[
"\"KDE_FULL_SESSION\"",
"\"DESKTOP_SESSION\"",
"\"GDMSESSION\"",
"\"XDG_CURRENT_DESKTOP\""
] |
[] |
[
"DESKTOP_SESSION",
"GDMSESSION",
"KDE_FULL_SESSION",
"XDG_CURRENT_DESKTOP"
] |
[]
|
["DESKTOP_SESSION", "GDMSESSION", "KDE_FULL_SESSION", "XDG_CURRENT_DESKTOP"]
|
java
| 4 | 0 | |
manage.py
|
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'Emall.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
DjangoRestApisPSQL/DjangoRestApisPSQL/asgi.py
|
"""
ASGI config for DjangoRestApisPSQL project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'DjangoRestApisPSQL.settings')
application = get_asgi_application()
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
test/test_match.py
|
import os
from serum import Context, match
from serum.exceptions import UnknownEnvironment
import pytest
@pytest.fixture()
def environ():
yield os.environ
os.environ.pop('TEST_ENV', None)
def test_match_returns_correct_env(environ):
env1 = Context()
env2 = Context()
environ['TEST_ENV'] = 'ENV1'
env = match(environment_variable='TEST_ENV', ENV1=env1, ENV2=env2)
assert env is env1
environ['TEST_ENV'] = 'ENV2'
env = match(environment_variable='TEST_ENV', ENV1=env1, ENV2=env2)
assert env is env2
def test_match_gets_default():
default = Context()
env1 = Context()
env = match(environment_variable='TEST_ENV', default=default, ENV1=env1)
assert env is default
def test_match_fails_when_no_default_and_no_env():
env1 = Context()
with pytest.raises(UnknownEnvironment):
match(environment_variable='TEST_ENV', env1=env1)
def test_match_fails_with_unknown_environment(environ):
environ['TEST_ENV'] = 'unknown'
with pytest.raises(UnknownEnvironment):
match(environment_variable='TEST_ENV')
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
infra/conf/rule/rule_test.go
|
package rule_test
import (
"context"
"errors"
"io/fs"
"os"
"path/filepath"
"testing"
"github.com/v2fly/v2ray-core/v5/common"
"github.com/v2fly/v2ray-core/v5/common/platform"
"github.com/v2fly/v2ray-core/v5/common/platform/filesystem"
"github.com/v2fly/v2ray-core/v5/infra/conf/cfgcommon"
"github.com/v2fly/v2ray-core/v5/infra/conf/geodata"
_ "github.com/v2fly/v2ray-core/v5/infra/conf/geodata/standard"
"github.com/v2fly/v2ray-core/v5/infra/conf/rule"
)
func init() {
const geoipURL = "https://raw.githubusercontent.com/v2fly/geoip/release/geoip.dat"
wd, err := os.Getwd()
common.Must(err)
tempPath := filepath.Join(wd, "..", "..", "..", "testing", "temp")
geoipPath := filepath.Join(tempPath, "geoip.dat")
os.Setenv("v2ray.location.asset", tempPath)
if _, err := os.Stat(geoipPath); err != nil && errors.Is(err, fs.ErrNotExist) {
common.Must(os.MkdirAll(tempPath, 0o755))
geoipBytes, err := common.FetchHTTPContent(geoipURL)
common.Must(err)
common.Must(filesystem.WriteFile(geoipPath, geoipBytes))
}
}
func TestToCidrList(t *testing.T) {
t.Log(os.Getenv("v2ray.location.asset"))
common.Must(filesystem.CopyFile(platform.GetAssetLocation("geoiptestrouter.dat"), platform.GetAssetLocation("geoip.dat")))
ips := cfgcommon.StringList([]string{
"geoip:us",
"geoip:cn",
"geoip:!cn",
"ext:geoiptestrouter.dat:!cn",
"ext:geoiptestrouter.dat:ca",
"ext-ip:geoiptestrouter.dat:!cn",
"ext-ip:geoiptestrouter.dat:!ca",
})
cfgctx := cfgcommon.NewConfigureLoadingContext(context.Background())
if loader, err := geodata.GetGeoDataLoader("standard"); err == nil {
cfgcommon.SetGeoDataLoader(cfgctx, loader)
} else {
t.Fatal(err)
}
_, err := rule.ToCidrList(cfgctx, ips)
if err != nil {
t.Fatalf("Failed to parse geoip list, got %s", err)
}
}
|
[
"\"v2ray.location.asset\""
] |
[] |
[
"v2ray.location.asset"
] |
[]
|
["v2ray.location.asset"]
|
go
| 1 | 0 | |
bin/pherf-cluster.py
|
#!/usr/bin/env python
############################################################################
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
############################################################################
# This script is intended for use where HBase/Phoenix is loaded from HBase classpath
# therefore HBASE_DIR environment variable needs to be configured for this script to execute
import os
import subprocess
import sys
import phoenix_utils
phoenix_utils.setPath()
args = phoenix_utils.shell_quote(sys.argv[1:])
# HBase configuration folder path (where hbase-site.xml reside) for
# HBase/Phoenix client side property override
hbase_config_path = os.getenv('HBASE_CONF_DIR', phoenix_utils.current_dir)
hbase_path = os.getenv('HBASE_DIR')
java_home = os.getenv('JAVA_HOME')
# load hbase-env.??? to extract JAVA_HOME, HBASE_PID_DIR, HBASE_LOG_DIR
hbase_env_path = None
hbase_env_cmd = None
if os.name == 'posix':
hbase_env_path = os.path.join(hbase_config_path, 'hbase-env.sh')
hbase_env_cmd = ['bash', '-c', 'source %s && env' % hbase_env_path]
elif os.name == 'nt':
hbase_env_path = os.path.join(hbase_config_path, 'hbase-env.cmd')
hbase_env_cmd = ['cmd.exe', '/c', 'call %s & set' % hbase_env_path]
if not hbase_env_path or not hbase_env_cmd:
print >> sys.stderr, "hbase-env file unknown on platform %s" % os.name
sys.exit(-1)
hbase_env = {}
if os.path.isfile(hbase_env_path):
p = subprocess.Popen(hbase_env_cmd, stdout = subprocess.PIPE)
for x in p.stdout:
(k, _, v) = x.partition('=')
hbase_env[k.strip()] = v.strip()
if hbase_env.has_key('JAVA_HOME'):
java_home = hbase_env['JAVA_HOME']
if java_home:
java = os.path.join(java_home, 'bin', 'java')
else:
java = 'java'
print "HBASE_DIR environment variable is currently set to: " + hbase_path
# Get the HBase classpath
hbasecp, stderr = subprocess.Popen(hbase_path + "/bin/hbase classpath",
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE).communicate()
java_cmd = java +' -cp "' + hbasecp + os.pathsep + phoenix_utils.pherf_conf_path + os.pathsep + phoenix_utils.hbase_conf_dir + os.pathsep + phoenix_utils.phoenix_pherf_jar + \
'" -Dlog4j.configuration=file:' + \
os.path.join(phoenix_utils.current_dir, "log4j.properties") + \
" org.apache.phoenix.pherf.Pherf " + args
os.execl("/bin/sh", "/bin/sh", "-c", java_cmd)
|
[] |
[] |
[
"JAVA_HOME",
"HBASE_CONF_DIR",
"HBASE_DIR"
] |
[]
|
["JAVA_HOME", "HBASE_CONF_DIR", "HBASE_DIR"]
|
python
| 3 | 0 | |
cni-plugin/win_tests/calico_cni_windows_suite_test.go
|
// Copyright (c) 2018-2020 Tigera, Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package main_windows_test
import (
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/projectcalico/calico/libcalico-go/lib/testutils"
"os"
"testing"
"github.com/onsi/ginkgo/reporters"
)
func init() {
testutils.HookLogrusForGinkgo()
}
func TestCalicoCni(t *testing.T) {
RegisterFailHandler(Fail)
reportPath := os.Getenv("REPORT")
if reportPath == "" {
// Default the report path if not specified.
reportPath = "../report/windows_suite.xml"
}
junitReporter := reporters.NewJUnitReporter(reportPath)
RunSpecsWithDefaultAndCustomReporters(t, "CNI suite (Windows)", []Reporter{junitReporter})
}
|
[
"\"REPORT\""
] |
[] |
[
"REPORT"
] |
[]
|
["REPORT"]
|
go
| 1 | 0 | |
altong_project/wsgi.py
|
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'altong_project.settings')
application = get_wsgi_application()
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
pipenv/project.py
|
# -*- coding: utf-8 -*-
import base64
import fnmatch
import glob
import hashlib
import io
import json
import operator
import os
import re
import sys
import six
import toml
import tomlkit
import vistir
import pipfile
import pipfile.api
from .vendor.cached_property import cached_property
from .cmdparse import Script
from .environment import Environment
from .environments import (
PIPENV_DEFAULT_PYTHON_VERSION, PIPENV_IGNORE_VIRTUALENVS, PIPENV_MAX_DEPTH,
PIPENV_PIPFILE, PIPENV_PYTHON, PIPENV_TEST_INDEX, PIPENV_VENV_IN_PROJECT,
PIPENV_USE_SYSTEM, is_in_virtualenv, is_type_checking
)
from .vendor.requirementslib.models.utils import get_default_pyproject_backend
from .utils import (
cleanup_toml, convert_toml_outline_tables, find_requirements,
get_canonical_names, get_url_name, get_workon_home, is_editable,
is_installable_file, is_star, is_valid_url, is_virtual_environment,
looks_like_dir, normalize_drive, pep423_name, proper_case, python_version,
safe_expandvars, get_pipenv_dist
)
if is_type_checking():
from typing import Dict, Text, Union
TSource = Dict[Text, Union[Text, bool]]
def _normalized(p):
if p is None:
return None
loc = vistir.compat.Path(p)
try:
loc = loc.resolve()
except OSError:
loc = loc.absolute()
# Recase the path properly on Windows. From https://stackoverflow.com/a/35229734/5043728
if os.name == 'nt':
matches = glob.glob(re.sub(r'([^:/\\])(?=[/\\]|$)', r'[\1]', str(loc)))
path_str = matches and matches[0] or str(loc)
else:
path_str = str(loc)
return normalize_drive(os.path.abspath(path_str))
DEFAULT_NEWLINES = u"\n"
class _LockFileEncoder(json.JSONEncoder):
"""A specilized JSON encoder to convert loaded TOML data into a lock file.
This adds a few characteristics to the encoder:
* The JSON is always prettified with indents and spaces.
* TOMLKit's container elements are seamlessly encodable.
* The output is always UTF-8-encoded text, never binary, even on Python 2.
"""
def __init__(self):
super(_LockFileEncoder, self).__init__(
indent=4, separators=(",", ": "), sort_keys=True
)
def default(self, obj):
if isinstance(obj, vistir.compat.Path):
obj = obj.as_posix()
return super(_LockFileEncoder, self).default(obj)
def encode(self, obj):
content = super(_LockFileEncoder, self).encode(obj)
if not isinstance(content, six.text_type):
content = content.decode("utf-8")
return content
def preferred_newlines(f):
if isinstance(f.newlines, six.text_type):
return f.newlines
return DEFAULT_NEWLINES
if PIPENV_PIPFILE:
if not os.path.isfile(PIPENV_PIPFILE):
raise RuntimeError("Given PIPENV_PIPFILE is not found!")
else:
PIPENV_PIPFILE = _normalized(PIPENV_PIPFILE)
# Overwrite environment variable so that subprocesses can get the correct path.
# See https://github.com/pypa/pipenv/issues/3584
os.environ['PIPENV_PIPFILE'] = PIPENV_PIPFILE
# (path, file contents) => TOMLFile
# keeps track of pipfiles that we've seen so we do not need to re-parse 'em
_pipfile_cache = {}
if PIPENV_TEST_INDEX:
DEFAULT_SOURCE = {
u"url": PIPENV_TEST_INDEX,
u"verify_ssl": True,
u"name": u"custom",
}
else:
DEFAULT_SOURCE = {
u"url": u"https://pypi.org/simple",
u"verify_ssl": True,
u"name": u"pypi",
}
pipfile.api.DEFAULT_SOURCE = DEFAULT_SOURCE
class SourceNotFound(KeyError):
pass
class Project(object):
"""docstring for Project"""
_lockfile_encoder = _LockFileEncoder()
def __init__(self, which=None, python_version=None, chdir=True):
super(Project, self).__init__()
self._name = None
self._virtualenv_location = None
self._download_location = None
self._proper_names_db_path = None
self._pipfile_location = None
self._pipfile_newlines = DEFAULT_NEWLINES
self._lockfile_newlines = DEFAULT_NEWLINES
self._requirements_location = None
self._original_dir = os.path.abspath(os.curdir)
self._environment = None
self._which = which
self._build_system = {
"requires": ["setuptools", "wheel"]
}
self.python_version = python_version
# Hack to skip this during pipenv run, or -r.
if ("run" not in sys.argv) and chdir:
try:
os.chdir(self.project_directory)
except (TypeError, AttributeError):
pass
def path_to(self, p):
"""Returns the absolute path to a given relative path."""
if os.path.isabs(p):
return p
return os.sep.join([self._original_dir, p])
def _build_package_list(self, package_section):
"""Returns a list of packages for pip-tools to consume."""
from pipenv.vendor.requirementslib.utils import is_vcs
ps = {}
# TODO: Separate the logic for showing packages from the filters for supplying pip-tools
for k, v in self.parsed_pipfile.get(package_section, {}).items():
# Skip editable VCS deps.
if hasattr(v, "keys"):
# When a vcs url is gven without editable it only appears as a key
# Eliminate any vcs, path, or url entries which are not editable
# Since pip-tools can't do deep resolution on them, even setuptools-installable ones
if (
is_vcs(v)
or is_vcs(k)
or (is_installable_file(k) or is_installable_file(v))
or any(
(
prefix in v
and (os.path.isfile(v[prefix]) or is_valid_url(v[prefix]))
)
for prefix in ["path", "file"]
)
):
# If they are editable, do resolve them
if "editable" not in v:
# allow wheels to be passed through
if not (
hasattr(v, "keys")
and v.get("path", v.get("file", "")).endswith(".whl")
):
continue
ps.update({k: v})
else:
ps.update({k: v})
else:
ps.update({k: v})
else:
# Since these entries have no attributes we know they are not editable
# So we can safely exclude things that need to be editable in order to be resolved
# First exclude anything that is a vcs entry either in the key or value
if not (
any(is_vcs(i) for i in [k, v])
# Then exclude any installable files that are not directories
# Because pip-tools can resolve setup.py for example
or any(is_installable_file(i) for i in [k, v])
# Then exclude any URLs because they need to be editable also
# Things that are excluded can only be 'shallow resolved'
or any(is_valid_url(i) for i in [k, v])
):
ps.update({k: v})
return ps
@property
def name(self):
if self._name is None:
self._name = self.pipfile_location.split(os.sep)[-2]
return self._name
@property
def pipfile_exists(self):
return os.path.isfile(self.pipfile_location)
@property
def required_python_version(self):
if self.pipfile_exists:
required = self.parsed_pipfile.get("requires", {}).get(
"python_full_version"
)
if not required:
required = self.parsed_pipfile.get("requires", {}).get("python_version")
if required != "*":
return required
@property
def project_directory(self):
return os.path.abspath(os.path.join(self.pipfile_location, os.pardir))
@property
def requirements_exists(self):
return bool(self.requirements_location)
def is_venv_in_project(self):
return PIPENV_VENV_IN_PROJECT or (
self.project_directory
and os.path.isdir(os.path.join(self.project_directory, ".venv"))
)
@property
def virtualenv_exists(self):
if os.path.exists(self.virtualenv_location):
if os.name == "nt":
extra = ["Scripts", "activate.bat"]
else:
extra = ["bin", "activate"]
return os.path.isfile(os.sep.join([self.virtualenv_location] + extra))
return False
def get_location_for_virtualenv(self):
# If there's no project yet, set location based on config.
if not self.project_directory:
if self.is_venv_in_project():
return os.path.abspath(".venv")
return str(get_workon_home().joinpath(self.virtualenv_name))
dot_venv = os.path.join(self.project_directory, ".venv")
# If there's no .venv in project root, set location based on config.
if not os.path.exists(dot_venv):
if self.is_venv_in_project():
return dot_venv
return str(get_workon_home().joinpath(self.virtualenv_name))
# If .venv in project root is a directory, use it.
if os.path.isdir(dot_venv):
return dot_venv
# Now we assume .venv in project root is a file. Use its content.
with io.open(dot_venv) as f:
name = f.read().strip()
# If content looks like a path, use it as a relative path.
# Otherwise use directory named after content in WORKON_HOME.
if looks_like_dir(name):
path = vistir.compat.Path(self.project_directory, name)
return path.absolute().as_posix()
return str(get_workon_home().joinpath(name))
@property
def working_set(self):
from .utils import load_path
sys_path = load_path(self.which("python"))
import pkg_resources
return pkg_resources.WorkingSet(sys_path)
@property
def installed_packages(self):
return self.environment.get_installed_packages()
@property
def installed_package_names(self):
return get_canonical_names([pkg.key for pkg in self.installed_packages])
@property
def lockfile_package_names(self):
dev_keys = get_canonical_names(self.lockfile_content["develop"].keys())
default_keys = get_canonical_names(self.lockfile_content["default"].keys())
return {
"dev": dev_keys,
"default": default_keys,
"combined": dev_keys | default_keys
}
@property
def pipfile_package_names(self):
dev_keys = get_canonical_names(self.dev_packages.keys())
default_keys = get_canonical_names(self.packages.keys())
return {
"dev": dev_keys,
"default": default_keys,
"combined": dev_keys | default_keys
}
def get_environment(self, allow_global=False):
# type: (bool) -> Environment
is_venv = is_in_virtualenv()
if allow_global and not is_venv:
prefix = sys.prefix
else:
prefix = self.virtualenv_location
sources = self.sources if self.sources else [DEFAULT_SOURCE]
environment = Environment(
prefix=prefix, is_venv=is_venv, sources=sources, pipfile=self.parsed_pipfile,
project=self
)
pipenv_dist = get_pipenv_dist(pkg="pipenv")
if pipenv_dist:
environment.extend_dists(pipenv_dist)
else:
environment.add_dist("pipenv")
return environment
@property
def environment(self):
if not self._environment:
allow_global = os.environ.get("PIPENV_USE_SYSTEM", PIPENV_USE_SYSTEM)
self._environment = self.get_environment(allow_global=allow_global)
return self._environment
def get_outdated_packages(self):
return self.environment.get_outdated_packages(pre=self.pipfile.get("pre", False))
@classmethod
def _sanitize(cls, name):
# Replace dangerous characters into '_'. The length of the sanitized
# project name is limited as 42 because of the limit of linux kernel
#
# 42 = 127 - len('/home//.local/share/virtualenvs//bin/python2') - 32 - len('-HASHHASH')
#
# 127 : BINPRM_BUF_SIZE - 1
# 32 : Maximum length of username
#
# References:
# https://www.gnu.org/software/bash/manual/html_node/Double-Quotes.html
# http://www.tldp.org/LDP/abs/html/special-chars.html#FIELDREF
# https://github.com/torvalds/linux/blob/2bfe01ef/include/uapi/linux/binfmts.h#L18
return re.sub(r'[ $`!*@"\\\r\n\t]', "_", name)[0:42]
def _get_virtualenv_hash(self, name):
"""Get the name of the virtualenv adjusted for windows if needed
Returns (name, encoded_hash)
"""
def get_name(name, location):
name = self._sanitize(name)
hash = hashlib.sha256(location.encode()).digest()[:6]
encoded_hash = base64.urlsafe_b64encode(hash).decode()
return name, encoded_hash[:8]
clean_name, encoded_hash = get_name(name, self.pipfile_location)
venv_name = "{0}-{1}".format(clean_name, encoded_hash)
# This should work most of the time for
# Case-sensitive filesystems,
# In-project venv
# "Proper" path casing (on non-case-sensitive filesystems).
if (
not fnmatch.fnmatch("A", "a")
or self.is_venv_in_project()
or get_workon_home().joinpath(venv_name).exists()
):
return clean_name, encoded_hash
# Check for different capitalization of the same project.
for path in get_workon_home().iterdir():
if not is_virtual_environment(path):
continue
try:
env_name, hash_ = path.name.rsplit("-", 1)
except ValueError:
continue
if len(hash_) != 8 or env_name.lower() != name.lower():
continue
return get_name(env_name, self.pipfile_location.replace(name, env_name))
# Use the default if no matching env exists.
return clean_name, encoded_hash
@property
def virtualenv_name(self):
sanitized, encoded_hash = self._get_virtualenv_hash(self.name)
suffix = "-{0}".format(PIPENV_PYTHON) if PIPENV_PYTHON else ""
# If the pipfile was located at '/home/user/MY_PROJECT/Pipfile',
# the name of its virtualenv will be 'my-project-wyUfYPqE'
return sanitized + "-" + encoded_hash + suffix
@property
def virtualenv_location(self):
# if VIRTUAL_ENV is set, use that.
virtualenv_env = os.getenv("VIRTUAL_ENV")
if (
"PIPENV_ACTIVE" not in os.environ
and not PIPENV_IGNORE_VIRTUALENVS and virtualenv_env
):
return virtualenv_env
if not self._virtualenv_location: # Use cached version, if available.
assert self.project_directory, "project not created"
self._virtualenv_location = self.get_location_for_virtualenv()
return self._virtualenv_location
@property
def virtualenv_src_location(self):
if self.virtualenv_location:
loc = os.sep.join([self.virtualenv_location, "src"])
else:
loc = os.sep.join([self.project_directory, "src"])
vistir.path.mkdir_p(loc)
return loc
@property
def download_location(self):
if self._download_location is None:
loc = os.sep.join([self.virtualenv_location, "downloads"])
self._download_location = loc
# Create the directory, if it doesn't exist.
vistir.path.mkdir_p(self._download_location)
return self._download_location
@property
def proper_names_db_path(self):
if self._proper_names_db_path is None:
self._proper_names_db_path = vistir.compat.Path(
self.virtualenv_location, "pipenv-proper-names.txt"
)
self._proper_names_db_path.touch() # Ensure the file exists.
return self._proper_names_db_path
@property
def proper_names(self):
with self.proper_names_db_path.open() as f:
return f.read().splitlines()
def register_proper_name(self, name):
"""Registers a proper name to the database."""
with self.proper_names_db_path.open("a") as f:
f.write(u"{0}\n".format(name))
@property
def pipfile_location(self):
if PIPENV_PIPFILE:
return PIPENV_PIPFILE
if self._pipfile_location is None:
try:
loc = pipfile.Pipfile.find(max_depth=PIPENV_MAX_DEPTH)
except RuntimeError:
loc = "Pipfile"
self._pipfile_location = _normalized(loc)
return self._pipfile_location
@property
def requirements_location(self):
if self._requirements_location is None:
try:
loc = find_requirements(max_depth=PIPENV_MAX_DEPTH)
except RuntimeError:
loc = None
self._requirements_location = loc
return self._requirements_location
@property
def parsed_pipfile(self):
"""Parse Pipfile into a TOMLFile and cache it
(call clear_pipfile_cache() afterwards if mutating)"""
contents = self.read_pipfile()
# use full contents to get around str/bytes 2/3 issues
cache_key = (self.pipfile_location, contents)
if cache_key not in _pipfile_cache:
parsed = self._parse_pipfile(contents)
_pipfile_cache[cache_key] = parsed
return _pipfile_cache[cache_key]
def read_pipfile(self):
# Open the pipfile, read it into memory.
if not self.pipfile_exists:
return ""
with io.open(self.pipfile_location) as f:
contents = f.read()
self._pipfile_newlines = preferred_newlines(f)
return contents
def clear_pipfile_cache(self):
"""Clear pipfile cache (e.g., so we can mutate parsed pipfile)"""
_pipfile_cache.clear()
def _parse_pipfile(self, contents):
try:
return tomlkit.parse(contents)
except Exception:
# We lose comments here, but it's for the best.)
# Fallback to toml parser, for large files.
return toml.loads(contents)
def _read_pyproject(self):
pyproject = self.path_to("pyproject.toml")
if os.path.exists(pyproject):
self._pyproject = toml.load(pyproject)
build_system = self._pyproject.get("build-system", None)
if not os.path.exists(self.path_to("setup.py")):
if not build_system or not build_system.get("requires"):
build_system = {
"requires": ["setuptools>=40.8.0", "wheel"],
"build-backend": get_default_pyproject_backend(),
}
self._build_system = build_system
@property
def build_requires(self):
return self._build_system.get("requires", ["setuptools>=40.8.0", "wheel"])
@property
def build_backend(self):
return self._build_system.get("build-backend", get_default_pyproject_backend())
@property
def settings(self):
"""A dictionary of the settings added to the Pipfile."""
return self.parsed_pipfile.get("pipenv", {})
def has_script(self, name):
try:
return name in self.parsed_pipfile["scripts"]
except KeyError:
return False
def build_script(self, name, extra_args=None):
try:
script = Script.parse(self.parsed_pipfile["scripts"][name])
except KeyError:
script = Script(name)
if extra_args:
script.extend(extra_args)
return script
def update_settings(self, d):
settings = self.settings
changed = False
for new in d:
if new not in settings:
settings[new] = d[new]
changed = True
if changed:
p = self.parsed_pipfile
p["pipenv"] = settings
# Write the changes to disk.
self.write_toml(p)
@property
def _lockfile(self):
"""Pipfile.lock divided by PyPI and external dependencies."""
pfile = pipfile.load(self.pipfile_location, inject_env=False)
lockfile = json.loads(pfile.lock())
for section in ("default", "develop"):
lock_section = lockfile.get(section, {})
for key in list(lock_section.keys()):
norm_key = pep423_name(key)
lockfile[section][norm_key] = lock_section.pop(key)
return lockfile
@property
def _pipfile(self):
from .vendor.requirementslib.models.pipfile import Pipfile as ReqLibPipfile
pf = ReqLibPipfile.load(self.pipfile_location)
return pf
@property
def lockfile_location(self):
return "{0}.lock".format(self.pipfile_location)
@property
def lockfile_exists(self):
return os.path.isfile(self.lockfile_location)
@property
def lockfile_content(self):
return self.load_lockfile()
def _get_editable_packages(self, dev=False):
section = "dev-packages" if dev else "packages"
packages = {
k: v
for k, v in self.parsed_pipfile.get(section, {}).items()
if is_editable(k) or is_editable(v)
}
return packages
def _get_vcs_packages(self, dev=False):
from pipenv.vendor.requirementslib.utils import is_vcs
section = "dev-packages" if dev else "packages"
packages = {
k: v
for k, v in self.parsed_pipfile.get(section, {}).items()
if is_vcs(v) or is_vcs(k)
}
return packages or {}
@property
def editable_packages(self):
return self._get_editable_packages(dev=False)
@property
def editable_dev_packages(self):
return self._get_editable_packages(dev=True)
@property
def vcs_packages(self):
"""Returns a list of VCS packages, for not pip-tools to consume."""
return self._get_vcs_packages(dev=False)
@property
def vcs_dev_packages(self):
"""Returns a list of VCS packages, for not pip-tools to consume."""
return self._get_vcs_packages(dev=True)
@property
def all_packages(self):
"""Returns a list of all packages."""
p = dict(self.parsed_pipfile.get("dev-packages", {}))
p.update(self.parsed_pipfile.get("packages", {}))
return p
@property
def packages(self):
"""Returns a list of packages, for pip-tools to consume."""
return self._build_package_list("packages")
@property
def dev_packages(self):
"""Returns a list of dev-packages, for pip-tools to consume."""
return self._build_package_list("dev-packages")
@property
def pipfile_is_empty(self):
if not self.pipfile_exists:
return True
if not len(self.read_pipfile()):
return True
return False
def create_pipfile(self, python=None):
"""Creates the Pipfile, filled with juicy defaults."""
from .vendor.pip_shims.shims import InstallCommand
# Inherit the pip's index configuration of install command.
command = InstallCommand()
indexes = command.cmd_opts.get_option("--extra-index-url").default
sources = [DEFAULT_SOURCE]
for i, index in enumerate(indexes):
if not index:
continue
source_name = "pip_index_{}".format(i)
verify_ssl = index.startswith("https")
sources.append(
{u"url": index, u"verify_ssl": verify_ssl, u"name": source_name}
)
data = {
u"source": sources,
# Default packages.
u"packages": {},
u"dev-packages": {},
}
# Default requires.
required_python = python
if not python:
if self.virtualenv_location:
required_python = self.which("python", self.virtualenv_location)
else:
required_python = self.which("python")
version = python_version(required_python) or PIPENV_DEFAULT_PYTHON_VERSION
if version and len(version) >= 3:
data[u"requires"] = {"python_version": version[: len("2.7")]}
self.write_toml(data)
@classmethod
def populate_source(cls, source):
"""Derive missing values of source from the existing fields."""
# Only URL pararemter is mandatory, let the KeyError be thrown.
if "name" not in source:
source["name"] = get_url_name(source["url"])
if "verify_ssl" not in source:
source["verify_ssl"] = "https://" in source["url"]
if not isinstance(source["verify_ssl"], bool):
source["verify_ssl"] = str(source["verify_ssl"]).lower() == "true"
return source
def get_or_create_lockfile(self, from_pipfile=False):
from pipenv.vendor.requirementslib.models.lockfile import Lockfile as Req_Lockfile
lockfile = None
if from_pipfile and self.pipfile_exists:
lockfile_dict = {
"default": self._lockfile["default"].copy(),
"develop": self._lockfile["develop"].copy()
}
lockfile_dict.update({"_meta": self.get_lockfile_meta()})
lockfile = Req_Lockfile.from_data(
path=self.lockfile_location, data=lockfile_dict, meta_from_project=False
)
elif self.lockfile_exists:
try:
lockfile = Req_Lockfile.load(self.lockfile_location)
except OSError:
lockfile = Req_Lockfile.from_data(self.lockfile_location, self.lockfile_content)
else:
lockfile = Req_Lockfile.from_data(path=self.lockfile_location, data=self._lockfile, meta_from_project=False)
if lockfile._lockfile is not None:
return lockfile
if self.lockfile_exists and self.lockfile_content:
lockfile_dict = self.lockfile_content.copy()
sources = lockfile_dict.get("_meta", {}).get("sources", [])
if not sources:
sources = self.pipfile_sources
elif not isinstance(sources, list):
sources = [sources]
lockfile_dict["_meta"]["sources"] = [
self.populate_source(s) for s in sources
]
_created_lockfile = Req_Lockfile.from_data(
path=self.lockfile_location, data=lockfile_dict, meta_from_project=False
)
lockfile._lockfile = lockfile.projectfile.model = _created_lockfile
return lockfile
else:
return self.get_or_create_lockfile(from_pipfile=True)
def get_lockfile_meta(self):
from .vendor.plette.lockfiles import PIPFILE_SPEC_CURRENT
if self.lockfile_exists:
sources = self.lockfile_content.get("_meta", {}).get("sources", [])
else:
sources = [dict(source) for source in self.parsed_pipfile["source"]]
if not isinstance(sources, list):
sources = [sources]
return {
"hash": {"sha256": self.calculate_pipfile_hash()},
"pipfile-spec": PIPFILE_SPEC_CURRENT,
"sources": [self.populate_source(s) for s in sources],
"requires": self.parsed_pipfile.get("requires", {})
}
def write_toml(self, data, path=None):
"""Writes the given data structure out as TOML."""
if path is None:
path = self.pipfile_location
data = convert_toml_outline_tables(data)
try:
formatted_data = tomlkit.dumps(data).rstrip()
except Exception:
document = tomlkit.document()
for section in ("packages", "dev-packages"):
document[section] = tomlkit.table()
# Convert things to inline tables — fancy :)
for package in data.get(section, {}):
if hasattr(data[section][package], "keys"):
table = tomlkit.inline_table()
table.update(data[section][package])
document[section][package] = table
else:
document[section][package] = tomlkit.string(data[section][package])
formatted_data = tomlkit.dumps(document).rstrip()
if (
vistir.compat.Path(path).absolute()
== vistir.compat.Path(self.pipfile_location).absolute()
):
newlines = self._pipfile_newlines
else:
newlines = DEFAULT_NEWLINES
formatted_data = cleanup_toml(formatted_data)
with io.open(path, "w", newline=newlines) as f:
f.write(formatted_data)
# pipfile is mutated!
self.clear_pipfile_cache()
def write_lockfile(self, content):
"""Write out the lockfile.
"""
s = self._lockfile_encoder.encode(content)
open_kwargs = {"newline": self._lockfile_newlines, "encoding": "utf-8"}
with vistir.contextmanagers.atomic_open_for_write(
self.lockfile_location, **open_kwargs
) as f:
f.write(s)
# Write newline at end of document. GH-319.
# Only need '\n' here; the file object handles the rest.
if not s.endswith(u"\n"):
f.write(u"\n")
@property
def pipfile_sources(self):
if self.pipfile_is_empty or "source" not in self.parsed_pipfile:
return [DEFAULT_SOURCE]
# We need to make copies of the source info so we don't
# accidentally modify the cache. See #2100 where values are
# written after the os.path.expandvars() call.
return [
{k: safe_expandvars(v) for k, v in source.items()}
for source in self.parsed_pipfile["source"]
]
@property
def sources(self):
if self.lockfile_exists and hasattr(self.lockfile_content, "keys"):
meta_ = self.lockfile_content.get("_meta", {})
sources_ = meta_.get("sources")
if sources_:
return sources_
else:
return self.pipfile_sources
@property
def index_urls(self):
return [src.get("url") for src in self.sources]
def find_source(self, source):
"""
Given a source, find it.
source can be a url or an index name.
"""
if not is_valid_url(source):
try:
source = self.get_source(name=source)
except SourceNotFound:
source = self.get_source(url=source)
else:
source = self.get_source(url=source)
return source
def get_source(self, name=None, url=None, refresh=False):
from .utils import is_url_equal
def find_source(sources, name=None, url=None):
source = None
if name:
source = next(iter(
s for s in sources if "name" in s and s["name"] == name
), None)
elif url:
source = next(iter(
s for s in sources
if "url" in s and is_url_equal(url, s.get("url", ""))
), None)
if source is not None:
return source
sources = (self.sources, self.pipfile_sources)
if refresh:
self.clear_pipfile_cache()
sources = reversed(sources)
found = next(
iter(find_source(source, name=name, url=url) for source in sources), None
)
target = next(iter(t for t in (name, url) if t is not None))
if found is None:
raise SourceNotFound(target)
return found
def get_package_name_in_pipfile(self, package_name, dev=False):
"""Get the equivalent package name in pipfile"""
key = "dev-packages" if dev else "packages"
section = self.parsed_pipfile.get(key, {})
package_name = pep423_name(package_name)
for name in section.keys():
if pep423_name(name) == package_name:
return name
return None
def remove_package_from_pipfile(self, package_name, dev=False):
# Read and append Pipfile.
name = self.get_package_name_in_pipfile(package_name, dev)
key = "dev-packages" if dev else "packages"
p = self.parsed_pipfile
if name:
del p[key][name]
self.write_toml(p)
def remove_packages_from_pipfile(self, packages):
parsed = self.parsed_pipfile
packages = set([pep423_name(pkg) for pkg in packages])
for section in ("dev-packages", "packages"):
pipfile_section = parsed.get(section, {})
pipfile_packages = set([
pep423_name(pkg_name) for pkg_name in pipfile_section.keys()
])
to_remove = packages & pipfile_packages
# The normal toml parser can't handle deleting packages with preceding newlines
is_dev = section == "dev-packages"
for pkg in to_remove:
pkg_name = self.get_package_name_in_pipfile(pkg, dev=is_dev)
del parsed[section][pkg_name]
self.write_toml(parsed)
def add_package_to_pipfile(self, package, dev=False):
from .vendor.requirementslib import Requirement
# Read and append Pipfile.
p = self.parsed_pipfile
# Don't re-capitalize file URLs or VCSs.
if not isinstance(package, Requirement):
package = Requirement.from_line(package.strip())
req_name, converted = package.pipfile_entry
key = "dev-packages" if dev else "packages"
# Set empty group if it doesn't exist yet.
if key not in p:
p[key] = {}
name = self.get_package_name_in_pipfile(req_name, dev)
if name and is_star(converted):
# Skip for wildcard version
return
# Add the package to the group.
p[key][name or pep423_name(req_name)] = converted
# Write Pipfile.
self.write_toml(p)
def src_name_from_url(self, index_url):
name, _, tld_guess = six.moves.urllib.parse.urlsplit(index_url).netloc.rpartition(
"."
)
src_name = name.replace(".", "")
try:
self.get_source(name=src_name)
except SourceNotFound:
name = src_name
else:
from random import randint
name = "{0}-{1}".format(src_name, randint(1, 1000))
return name
def add_index_to_pipfile(self, index, verify_ssl=True):
"""Adds a given index to the Pipfile."""
# Read and append Pipfile.
p = self.parsed_pipfile
try:
self.get_source(url=index)
except SourceNotFound:
source = {"url": index, "verify_ssl": verify_ssl}
else:
return
source["name"] = self.src_name_from_url(index)
# Add the package to the group.
if "source" not in p:
p["source"] = [source]
else:
p["source"].append(source)
# Write Pipfile.
self.write_toml(p)
def recase_pipfile(self):
if self.ensure_proper_casing():
self.write_toml(self.parsed_pipfile)
def load_lockfile(self, expand_env_vars=True):
with io.open(self.lockfile_location, encoding="utf-8") as lock:
j = json.load(lock)
self._lockfile_newlines = preferred_newlines(lock)
# lockfile is just a string
if not j or not hasattr(j, "keys"):
return j
if expand_env_vars:
# Expand environment variables in Pipfile.lock at runtime.
for i, source in enumerate(j["_meta"]["sources"][:]):
j["_meta"]["sources"][i]["url"] = os.path.expandvars(
j["_meta"]["sources"][i]["url"]
)
return j
def get_lockfile_hash(self):
if not os.path.exists(self.lockfile_location):
return
try:
lockfile = self.load_lockfile(expand_env_vars=False)
except ValueError:
# Lockfile corrupted
return ""
if "_meta" in lockfile and hasattr(lockfile, "keys"):
return lockfile["_meta"].get("hash", {}).get("sha256")
# Lockfile exists but has no hash at all
return ""
def calculate_pipfile_hash(self):
# Update the lockfile if it is out-of-date.
p = pipfile.load(self.pipfile_location, inject_env=False)
return p.hash
def ensure_proper_casing(self):
"""Ensures proper casing of Pipfile packages"""
pfile = self.parsed_pipfile
casing_changed = self.proper_case_section(pfile.get("packages", {}))
casing_changed |= self.proper_case_section(pfile.get("dev-packages", {}))
return casing_changed
def proper_case_section(self, section):
"""Verify proper casing is retrieved, when available, for each
dependency in the section.
"""
# Casing for section.
changed_values = False
unknown_names = [k for k in section.keys() if k not in set(self.proper_names)]
# Replace each package with proper casing.
for dep in unknown_names:
try:
# Get new casing for package name.
new_casing = proper_case(dep)
except IOError:
# Unable to normalize package name.
continue
if new_casing != dep:
changed_values = True
self.register_proper_name(new_casing)
# Replace old value with new value.
old_value = section[dep]
section[new_casing] = old_value
del section[dep]
# Return whether or not values have been changed.
return changed_values
@cached_property
def finders(self):
from .vendor.pythonfinder import Finder
scripts_dirname = "Scripts" if os.name == "nt" else "bin"
scripts_dir = os.path.join(self.virtualenv_location, scripts_dirname)
finders = [
Finder(path=scripts_dir, global_search=gs, system=False)
for gs in (False, True)
]
return finders
@property
def finder(self):
return next(iter(self.finders), None)
def which(self, search, as_path=True):
find = operator.methodcaller("which", search)
result = next(iter(filter(None, (find(finder) for finder in self.finders))), None)
if not result:
result = self._which(search)
else:
if as_path:
result = str(result.path)
return result
|
[] |
[] |
[
"VIRTUAL_ENV",
"PIPENV_PIPFILE",
"PIPENV_USE_SYSTEM"
] |
[]
|
["VIRTUAL_ENV", "PIPENV_PIPFILE", "PIPENV_USE_SYSTEM"]
|
python
| 3 | 0 | |
transfer.py
|
import argparse
import math
import h5py
import numpy as np
import tensorflow as tf
from tensorflow.contrib import slim
import socket
import importlib
import os
import sys
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(BASE_DIR)
sys.path.append(os.path.join(BASE_DIR, 'models'))
sys.path.append(os.path.join(BASE_DIR, 'utils'))
import provider
import tf_util
import data_loader
parser = argparse.ArgumentParser()
parser.add_argument('--gpu', type=int, default=0, help='GPU to use [default: GPU 0]')
parser.add_argument('--model', default='pointnet_cls_rot_transfer', help='Model name: pointnet_cls or pointnet_cls_basic [default: pointnet_cls]')
parser.add_argument('--log_dir', default='log_transfer', help='Log dir [default: log_transfer]')
parser.add_argument('--num_point', type=int, default=1024, help='Point Number [256/512/1024/2048] [default: 1024]')
parser.add_argument('--max_epoch', type=int, default=250, help='Epoch to run [default: 250]')
parser.add_argument('--batch_size', type=int, default=32, help='Batch Size during training [default: 32]')
parser.add_argument('--learning_rate', type=float, default=0.001, help='Initial learning rate [default: 0.001]')
parser.add_argument('--momentum', type=float, default=0.9, help='Initial learning rate [default: 0.9]')
parser.add_argument('--optimizer', default='adam', help='adam or momentum [default: adam]')
parser.add_argument('--decay_step', type=int, default=200000, help='Decay step for lr decay [default: 200000]')
parser.add_argument('--decay_rate', type=float, default=0.7, help='Decay rate for lr decay [default: 0.8]')
parser.add_argument('--model_path', default='log/model.ckpt', help='model checkpoint file path [default: log/model.ckpt]')
parser.add_argument('--dataset', type=str, choices=['shapenet', 'modelnet', 'modelnet10'], default='modelnet', help='dataset to train on [default: modelnet]')
FLAGS = parser.parse_args()
os.environ['KMP_DUPLICATE_LIB_OK']='True'
BATCH_SIZE = FLAGS.batch_size
NUM_POINT = FLAGS.num_point
MAX_EPOCH = FLAGS.max_epoch
BASE_LEARNING_RATE = FLAGS.learning_rate
GPU_INDEX = FLAGS.gpu
MOMENTUM = FLAGS.momentum
OPTIMIZER = FLAGS.optimizer
DECAY_STEP = FLAGS.decay_step
DECAY_RATE = FLAGS.decay_rate
MODEL_PATH = FLAGS.model_path
MODEL = importlib.import_module(FLAGS.model) # import network module
MODEL_FILE = os.path.join(BASE_DIR, 'models', FLAGS.model+'.py')
LOG_DIR = FLAGS.log_dir
if not os.path.exists(LOG_DIR): os.mkdir(LOG_DIR)
os.system('cp %s %s' % (MODEL_FILE, LOG_DIR)) # bkp of model def
os.system('cp train.py %s' % (LOG_DIR)) # bkp of train procedure
LOG_FOUT = open(os.path.join(LOG_DIR, 'log_train.txt'), 'w')
LOG_FOUT.write(str(FLAGS)+'\n')
MAX_NUM_POINT = 2048
if FLAGS.dataset == 'modelnet':
NUM_CLASSES = 40
elif FLAGS.dataset == 'shapenet':
NUM_CLASSES = 57
elif FLAGS.dataset == 'modelnet10':
NUM_CLASSES = 10
else:
raise NotImplementedError()
BN_INIT_DECAY = 0.5
BN_DECAY_DECAY_RATE = 0.5
BN_DECAY_DECAY_STEP = float(DECAY_STEP)
BN_DECAY_CLIP = 0.99
USE_INPUT_TRANS = False if 'input_trans_False' in MODEL_PATH else True
USE_FEATURE_TRANS = False if 'feature_trans_False' in MODEL_PATH else True
HOSTNAME = socket.gethostname()
# ModelNet40 official train/test split
TRAIN_FILES = provider.getDataFiles( \
os.path.join(BASE_DIR, 'data/modelnet40_ply_hdf5_2048/train_files.txt'))
TEST_FILES = provider.getDataFiles(\
os.path.join(BASE_DIR, 'data/modelnet40_ply_hdf5_2048/test_files.txt'))
def log_string(out_str):
LOG_FOUT.write(out_str+'\n')
LOG_FOUT.flush()
print(out_str)
def get_learning_rate(batch):
learning_rate = tf.train.exponential_decay(
BASE_LEARNING_RATE, # Base learning rate.
batch * BATCH_SIZE, # Current index into the dataset.
DECAY_STEP, # Decay step.
DECAY_RATE, # Decay rate.
staircase=True)
learning_rate = tf.maximum(learning_rate, 0.00001) # CLIP THE LEARNING RATE!
return learning_rate
def get_bn_decay(batch):
bn_momentum = tf.train.exponential_decay(
BN_INIT_DECAY,
batch*BATCH_SIZE,
BN_DECAY_DECAY_STEP,
BN_DECAY_DECAY_RATE,
staircase=True)
bn_decay = tf.minimum(BN_DECAY_CLIP, 1 - bn_momentum)
return bn_decay
def train():
with tf.Graph().as_default():
with tf.device('/gpu:'+str(GPU_INDEX)):
pointclouds_pl, labels_pl = MODEL.placeholder_inputs(BATCH_SIZE, NUM_POINT)
is_training_pl = tf.placeholder(tf.bool, shape=())
is_training_base_pl = tf.placeholder(tf.bool, shape=())
print(is_training_pl)
# Note the global_step=batch parameter to minimize.
# That tells the optimizer to helpfully increment the 'batch' parameter for you every time it trains.
batch = tf.Variable(0)
bn_decay = get_bn_decay(batch)
tf.summary.scalar('bn_decay', bn_decay)
# Get model and loss
pred, end_points = MODEL.get_model(pointclouds_pl, is_training_pl, is_training_base_pl, bn_decay=bn_decay, use_input_trans=USE_INPUT_TRANS, use_feature_trans=USE_FEATURE_TRANS, num_classes=NUM_CLASSES)
loss = MODEL.get_loss(pred, labels_pl, end_points)
tf.summary.scalar('loss', loss)
correct = tf.equal(tf.argmax(pred, 1), tf.to_int64(labels_pl))
accuracy = tf.reduce_sum(tf.cast(correct, tf.float32)) / float(BATCH_SIZE)
tf.summary.scalar('accuracy', accuracy)
# Get training operator
learning_rate = get_learning_rate(batch)
tf.summary.scalar('learning_rate', learning_rate)
if OPTIMIZER == 'momentum':
optimizer = tf.train.MomentumOptimizer(learning_rate, momentum=MOMENTUM)
elif OPTIMIZER == 'adam':
optimizer = tf.train.AdamOptimizer(learning_rate)
train_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, "transfer/")
print("[INFO] Trainable variables:", train_vars)
train_op = optimizer.minimize(loss, global_step=batch, var_list=train_vars)
variables = slim.get_variables_to_restore()
variables_to_restore = [v for v in variables if 'transfer' not in v.name]
saver_base = tf.train.Saver(variables_to_restore)
# Add ops to save and restore all the variables.
saver = tf.train.Saver()
# Create a session
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
config.allow_soft_placement = True
config.log_device_placement = False
sess = tf.Session(config=config)
# Add summary writers
#merged = tf.merge_all_summaries()
merged = tf.summary.merge_all()
train_writer = tf.summary.FileWriter(os.path.join(LOG_DIR, 'train'),
sess.graph)
test_writer = tf.summary.FileWriter(os.path.join(LOG_DIR, 'test'))
# Init variables
init = tf.global_variables_initializer()
# To fix the bug introduced in TF 0.12.1 as in
# http://stackoverflow.com/questions/41543774/invalidargumenterror-for-tensor-bool-tensorflow-0-12-1
#sess.run(init)
# Restore weights
saver_base.restore(sess, MODEL_PATH)
print("Restored previous weights")
sess.run(init, {is_training_pl: True, is_training_base_pl: True})
ops = {'pointclouds_pl': pointclouds_pl,
'labels_pl': labels_pl,
'is_training_pl': is_training_pl,
'is_training_base_pl': is_training_base_pl,
'pred': pred,
'loss': loss,
'train_op': train_op,
'merged': merged,
'step': batch}
X_train, X_test, y_train, y_test = data_loader.get_pointcloud(dataset=FLAGS.dataset,
NUM_POINT=NUM_POINT)
for epoch in range(MAX_EPOCH):
log_string('**** EPOCH %03d ****' % (epoch))
sys.stdout.flush()
train_one_epoch(X_train, y_train, sess, ops, train_writer)
eval_one_epoch(X_test, y_test, sess, ops, test_writer)
# Save the variables to disk.
if epoch % 10 == 0:
save_path = saver.save(sess, os.path.join(LOG_DIR, "model.ckpt"))
log_string("Model saved in file: %s" % save_path)
def train_one_epoch(current_data, current_label, sess, ops, train_writer):
""" ops: dict mapping from string to tf ops """
is_training = True
is_training_base = True
# Shuffle train files
current_data, current_label, _ = provider.shuffle_data(current_data, np.squeeze(current_label))
current_label = np.squeeze(current_label)
file_size = current_data.shape[0]
num_batches = file_size // BATCH_SIZE
total_correct = 0
total_seen = 0
loss_sum = 0
for batch_idx in range(num_batches):
start_idx = batch_idx * BATCH_SIZE
end_idx = (batch_idx+1) * BATCH_SIZE
# Augment batched point clouds by rotation and jittering
rotated_data, _ = provider.rotate_point_cloud(current_data[start_idx:end_idx, :, :])
jittered_data = provider.jitter_point_cloud(rotated_data)
feed_dict = {ops['pointclouds_pl']: jittered_data,
ops['labels_pl']: current_label[start_idx:end_idx],
ops['is_training_pl']: is_training,
ops['is_training_base_pl']: is_training_base,}
summary, step, _, loss_val, pred_val = sess.run([ops['merged'], ops['step'],
ops['train_op'], ops['loss'], ops['pred']], feed_dict=feed_dict)
train_writer.add_summary(summary, step)
pred_val = np.argmax(pred_val, 1)
correct = np.sum(pred_val == current_label[start_idx:end_idx])
total_correct += correct
total_seen += BATCH_SIZE
loss_sum += loss_val
log_string('mean loss: %f' % (loss_sum / float(num_batches)))
log_string('accuracy: %f' % (total_correct / float(total_seen)))
def eval_one_epoch(current_data, current_label, sess, ops, test_writer):
""" ops: dict mapping from string to tf ops """
is_training = False
is_training_base = False
total_correct = 0
total_seen = 0
loss_sum = 0
total_seen_class = [0 for _ in range(NUM_CLASSES)]
total_correct_class = [0 for _ in range(NUM_CLASSES)]
current_label = np.squeeze(current_label)
file_size = current_data.shape[0]
num_batches = file_size // BATCH_SIZE
for batch_idx in range(num_batches):
start_idx = batch_idx * BATCH_SIZE
end_idx = (batch_idx+1) * BATCH_SIZE
feed_dict = {ops['pointclouds_pl']: current_data[start_idx:end_idx, :, :],
ops['labels_pl']: current_label[start_idx:end_idx],
ops['is_training_pl']: is_training,
ops['is_training_base_pl']: is_training_base,}
summary, step, loss_val, pred_val = sess.run([ops['merged'], ops['step'],
ops['loss'], ops['pred']], feed_dict=feed_dict)
pred_val = np.argmax(pred_val, 1)
correct = np.sum(pred_val == current_label[start_idx:end_idx])
total_correct += correct
total_seen += BATCH_SIZE
loss_sum += (loss_val*BATCH_SIZE)
for i in range(start_idx, end_idx):
l = current_label[i]
total_seen_class[l] += 1
total_correct_class[l] += (pred_val[i-start_idx] == l)
log_string('eval mean loss: %f' % (loss_sum / float(total_seen)))
log_string('eval accuracy: %f'% (total_correct / float(total_seen)))
log_string('eval avg class acc: %f' % (np.mean(np.array(total_correct_class)/np.array(total_seen_class,dtype=np.float))))
if __name__ == "__main__":
train()
LOG_FOUT.close()
|
[] |
[] |
[
"KMP_DUPLICATE_LIB_OK"
] |
[]
|
["KMP_DUPLICATE_LIB_OK"]
|
python
| 1 | 0 | |
controllers/plutus.go
|
package controllers
import (
"bytes"
"encoding/hex"
"encoding/json"
"errors"
"math"
"math/big"
"net/http"
"os"
"reflect"
"strconv"
"strings"
"time"
"github.com/eabz/btcutil"
"github.com/eabz/btcutil/chaincfg"
"github.com/eabz/btcutil/hdkeychain"
"github.com/eabz/btcutil/txscript"
"github.com/ethereum/go-ethereum/accounts"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/rlp"
"github.com/grupokindynos/common/blockbook"
coinfactory "github.com/grupokindynos/common/coin-factory"
"github.com/grupokindynos/common/coin-factory/coins"
"github.com/grupokindynos/common/plutus"
"github.com/grupokindynos/plutus/models"
"github.com/martinboehm/btcd/btcec"
"github.com/martinboehm/btcd/chaincfg/chainhash"
"github.com/martinboehm/btcd/wire"
hdwallet "github.com/miguelmota/go-ethereum-hdwallet"
"github.com/tyler-smith/go-bip39"
"golang.org/x/crypto/sha3"
)
type Params struct {
Coin string
Body []byte
Txid string
}
var ethWallet *hdwallet.Wallet
var myClient = &http.Client{Timeout: 10 * time.Second}
const addrGap = 20
type AddrInfo struct {
LastUsed int
AddrInfo []models.AddrInfo
}
type Controller struct {
Address map[string]AddrInfo
}
type GasStation struct {
Fast float64 `json:"fast"`
Fastest float64 `json:"fastest"`
SafeLow float64 `json:"safeLow"`
Average float64 `json:"average"`
SafeLowWait float64 `json:"safeLowWait"`
AvgWait float64 `json:"avgWait"`
FastWait float64 `json:"fastWait"`
FastestWait float64 `json:"fastestWait"`
}
func (c *Controller) GetBalance(params Params) (interface{}, error) {
coinConfig, err := coinfactory.GetCoin(params.Coin)
if err != nil {
return nil, err
}
if !coinConfig.Info.Token && coinConfig.Info.Tag != "ETH" {
blockBookWrap := blockbook.NewBlockBookWrapper(coinConfig.Info.Blockbook)
acc, err := getAccFromMnemonic(coinConfig, false)
if err != nil {
return nil, err
}
pub, err := acc.Neuter()
if err != nil {
return nil, err
}
info, err := blockBookWrap.GetXpub(pub.String())
if err != nil {
return nil, err
}
confirmed, err := strconv.ParseFloat(info.Balance, 64)
if err != nil {
return nil, err
}
unconfirmed, err := strconv.ParseFloat(info.UnconfirmedBalance, 64)
response := plutus.Balance{
Confirmed: confirmed / 1e8,
Unconfirmed: unconfirmed / 1e8,
}
return response, nil
} else {
ethConfig, err := coinfactory.GetCoin("ETH")
if err != nil {
return nil, err
}
acc, err := getEthAccFromMnemonic(ethConfig, false)
if err != nil {
return nil, err
}
blockBookWrap := blockbook.NewBlockBookWrapper(ethConfig.Info.Blockbook)
info, err := blockBookWrap.GetEthAddress(acc.Address.Hex())
if err != nil {
return nil, err
}
if coinConfig.Info.Tag != "ETH" {
tokenInfo := ercDetails(info, coinConfig.Info.Contract)
if tokenInfo == nil {
response := plutus.Balance{
Confirmed: 0,
}
return response, nil
}
balance, err := strconv.ParseFloat(tokenInfo.Balance, 64)
balance = balance / (math.Pow(10, float64(tokenInfo.Decimals)))
if err != nil {
return nil, err
}
response := plutus.Balance{
Confirmed: balance,
}
return response, nil
} else {
balance, err := strconv.ParseFloat(info.Balance, 64)
if err != nil {
return nil, err
}
response := plutus.Balance{
Confirmed: balance / 1e18,
}
return response, nil
}
}
}
func ercDetails(info blockbook.EthAddr, contract string) *blockbook.EthTokens {
var tokenInfo *blockbook.EthTokens
for _, token := range info.Tokens {
if common.HexToAddress(contract) == common.HexToAddress(token.Contract) {
tokenInfo = &token
break
}
}
return tokenInfo
}
func (c *Controller) GetAddress(params Params) (interface{}, error) {
coinConfig, err := coinfactory.GetCoin(params.Coin)
if err != nil {
return nil, err
}
if coinConfig.Info.Token || coinConfig.Info.Tag == "ETH" {
var acc accounts.Account
if coinConfig.Mnemonic == "" {
ethConfig, err := coinfactory.GetCoin("ETH")
if err != nil {
return nil, err
}
acc, err = getEthAccFromMnemonic(ethConfig, false)
if err != nil {
return nil, err
}
} else {
acc, err = getEthAccFromMnemonic(coinConfig, false)
if err != nil {
return nil, err
}
}
return acc.Address.Hex(), nil
}
acc, err := getAccFromMnemonic(coinConfig, false)
if err != nil {
return nil, err
}
// Create a new xpub and derive the address from the hdwallet
directExtended, err := acc.Child(0)
if err != nil {
return nil, err
}
addrExtPub, err := directExtended.Child(uint32(c.Address[coinConfig.Info.Tag].LastUsed + 1))
if err != nil {
return nil, err
}
addr, err := addrExtPub.Address(coinConfig.NetParams)
if err != nil {
return nil, err
}
newAddrInfo := AddrInfo{
LastUsed: c.Address[coinConfig.Info.Tag].LastUsed + 1,
AddrInfo: c.Address[coinConfig.Info.Tag].AddrInfo,
}
newAddrInfo.AddrInfo = append(newAddrInfo.AddrInfo, models.AddrInfo{
Addr: addr.String(), Path: c.Address[coinConfig.Info.Tag].LastUsed + 1,
})
c.Address[coinConfig.Info.Tag] = newAddrInfo
return addr.String(), nil
}
func (c *Controller) SendToAddress(params Params) (interface{}, error) {
var SendToAddressData plutus.SendAddressBodyReq
err := json.Unmarshal(params.Body, &SendToAddressData)
if err != nil {
return nil, err
}
coinConfig, err := coinfactory.GetCoin(SendToAddressData.Coin)
if err != nil {
return "", err
}
var txid string
if coinConfig.Info.Token || coinConfig.Info.Tag == "ETH" {
txid, err = c.sendToAddressEth(SendToAddressData, coinConfig)
if err != nil {
return nil, err
}
} else {
txid, err = c.sendToAddress(SendToAddressData, coinConfig)
if err != nil {
return nil, err
}
}
return txid, nil
}
func (c *Controller) sendToAddress(SendToAddressData plutus.SendAddressBodyReq, coinConfig *coins.Coin) (string, error) {
value, err := btcutil.NewAmount(SendToAddressData.Amount)
if err != nil {
return "", err
}
acc, err := getAccFromMnemonic(coinConfig, true)
if err != nil {
return "", err
}
accPub, err := acc.Neuter()
if err != nil {
return "", err
}
blockBookWrap := blockbook.NewBlockBookWrapper(coinConfig.Info.Blockbook)
utxos, err := blockBookWrap.GetUtxo(accPub.String(), false)
if err != nil {
return "", err
}
if len(utxos) == 0 {
return "", errors.New("no balance available")
}
var Tx wire.MsgTx
var txVersion int32
if coinConfig.Info.Tag == "POLIS" || coinConfig.Info.Tag == "DASH" || coinConfig.Info.Tag == "GRS" {
txVersion = 2
} else {
txVersion = 1
}
var availableAmount btcutil.Amount
var changeAddrPubKeyHash string
// Add the inputs without signatures
for i, utxo := range utxos {
if i == 0 {
changeAddrPubKeyHash = utxo.Address
}
intValue, err := strconv.ParseInt(utxo.Value, 10, 64)
if err != nil {
return "", err
}
utxoAmount := btcutil.Amount(intValue)
availableAmount += utxoAmount
txidHash, err := chainhash.NewHashFromStr(utxo.Txid)
if err != nil {
return "", err
}
prevOut := wire.NewOutPoint(txidHash, uint32(utxo.Vout))
in := wire.NewTxIn(prevOut, nil, nil)
Tx.AddTxIn(in)
}
// To prevent address collision we need to de-register all networks and register just the network using
chaincfg.ResetParams()
chaincfg.Register(coinConfig.NetParams)
// Retrieve information for outputs
payAddr, err := btcutil.DecodeAddress(SendToAddressData.Address, coinConfig.NetParams)
if err != nil {
return "", err
}
changeAddr, err := btcutil.DecodeAddress(changeAddrPubKeyHash, coinConfig.NetParams)
if err != nil {
return "", err
}
pkScriptPay, err := txscript.PayToAddrScript(payAddr)
if err != nil {
return "", err
}
pkScriptChange, err := txscript.PayToAddrScript(changeAddr)
if err != nil {
return "", err
}
txOut := &wire.TxOut{
Value: int64(value.ToUnit(btcutil.AmountSatoshi)),
PkScript: pkScriptPay,
}
var fee blockbook.Fee
if SendToAddressData.Coin == "BTC" {
fee, err = blockBookWrap.GetFee("4")
if err != nil {
return "", err
}
} else {
fee, err = blockBookWrap.GetFee("2")
if err != nil {
return "", err
}
}
var feeRate int64
if fee.Result == "-1" || fee.Result == "0" || fee.Result == "" {
feeRate = 4000
} else {
feeParse, err := strconv.ParseFloat(fee.Result, 64)
if err != nil {
return "", err
}
feeRate = int64(feeParse * 1e8)
}
txSize := (len(Tx.TxIn) * 180) + (len(Tx.TxOut) * 34) + 124
feeSats := float64(feeRate) / 1024.0 * float64(txSize)
payingFee := btcutil.Amount(int64(feeSats))
if availableAmount-payingFee-value > 0 {
txOutChange := &wire.TxOut{
Value: int64(((availableAmount - value) - payingFee).ToUnit(btcutil.AmountSatoshi)),
PkScript: pkScriptChange,
}
Tx.AddTxOut(txOutChange)
} else {
txOut.Value = int64((value - payingFee).ToUnit(btcutil.AmountSatoshi))
}
Tx.AddTxOut(txOut)
Tx.Version = txVersion
// To prevent address collision we need to de-register all networks and register just the network using
chaincfg.ResetParams()
chaincfg.Register(coinConfig.NetParams)
// Create the signatures
for i, utxo := range utxos {
path := strings.Split(utxo.Path, "/")
pathParse, err := strconv.ParseInt(path[5], 10, 64)
if err != nil {
return "", err
}
privKey, err := getPrivKeyFromPath(acc, uint32(pathParse))
if err != nil {
return "", err
}
addr, err := btcutil.DecodeAddress(utxo.Address, coinConfig.NetParams)
if err != nil {
return "", err
}
subscript, err := txscript.PayToAddrScript(addr)
if err != nil {
return "", err
}
var sigHash txscript.SigHashHasher
if coinConfig.Info.Tag == "GRS" {
sigHash = txscript.Sha256
} else {
sigHash = txscript.Sha256d
}
sigScript, err := txscript.SignatureScript(&Tx, i, subscript, txscript.SigHashAll, privKey, true, sigHash)
if err != nil {
return "", err
}
Tx.TxIn[i].SignatureScript = sigScript
}
buf := bytes.NewBuffer([]byte{})
err = Tx.BtcEncode(buf, 0, wire.BaseEncoding)
if err != nil {
return "", err
}
rawTx := hex.EncodeToString(buf.Bytes())
return blockBookWrap.SendTx(rawTx)
}
func (c *Controller) sendToAddressEth(SendToAddressData plutus.SendAddressBodyReq, coinConfig *coins.Coin) (string, error) {
// using the ethereum account to hl the tokens
ethConfig, err := coinfactory.GetCoin("ETH")
if err != nil {
return "", err
}
//**get the account that holds the private keys and addresses
account, err := getEthAccFromMnemonic(ethConfig, true)
if err != nil {
return "", err
}
ethAccount := account.Address.Hex()
blockBookWrap := blockbook.NewBlockBookWrapper(ethConfig.Info.Blockbook)
//** get the balance, check if its > 0 or less than the amount
info, err := blockBookWrap.GetEthAddress(ethAccount)
if err != nil {
return "", err
}
balance, err := strconv.ParseFloat(info.Balance, 64)
if err != nil {
return "", err
}
balance = balance / 1e18
if balance == 0 {
return "", errors.New("no eth available")
}
decimals := 18
if coinConfig.Info.Token && coinConfig.Info.Tag != "ETH" {
// check balance of token
tokenInfo := ercDetails(info, coinConfig.Info.Contract)
if tokenInfo == nil {
return "", errors.New("no token balance available")
}
tokenBalance, err := strconv.ParseFloat(tokenInfo.Balance, 64)
if err != nil {
return "", err
}
tokenBalance = tokenBalance / (math.Pow(10, float64(tokenInfo.Decimals)))
if tokenBalance == 0 || tokenBalance < SendToAddressData.Amount {
return "", errors.New("not enough token available")
}
decimals = tokenInfo.Decimals
} else {
if balance < SendToAddressData.Amount {
return "", errors.New("not enough balance")
}
}
// get the nonce
nonce, err := strconv.ParseUint(info.Nonce, 0, 64)
if err != nil {
return "", errors.New("nonce failed")
}
nonce += uint64(info.UnconfirmedTxs)
//** Retrieve information for outputs: out address
toAddress := common.HexToAddress(SendToAddressData.Address)
//**calculate fee/gas cost
gasLimit := uint64(21000)
if coinConfig.Info.Tag != "ETH" {
gasLimit = uint64(200000)
}
gasStation := GasStation{}
err = getJSON("https://ethgasstation.info/json/ethgasAPI.json", &gasStation)
if err != nil {
return "", errors.New("could not retrieve the gas price")
}
gasPrice := big.NewInt(int64(1000000000 * (gasStation.Average / 10))) //(10^9*(gweiValue/10))
var data []byte
var tx *types.Transaction
if coinConfig.Info.Token && coinConfig.Info.Tag != "ETH" {
// the additional data for the token transaction
tokenAddress := common.HexToAddress(coinConfig.Info.Contract)
transferFnSignature := []byte("transfer(address,uint256)")
hash := sha3.NewLegacyKeccak256()
hash.Write(transferFnSignature)
methodID := hash.Sum(nil)[:4]
paddedAddress := common.LeftPadBytes(toAddress.Bytes(), 32)
val := new(big.Float)
pot := new(big.Float)
val.SetFloat64(SendToAddressData.Amount)
pot.SetFloat64(math.Pow10(decimals))
val.Mul(val, pot)
amount, _ := val.Int(nil)
paddedAmount := common.LeftPadBytes(amount.Bytes(), 32)
data = append(data, methodID...)
data = append(data, paddedAddress...)
data = append(data, paddedAmount...)
value := big.NewInt(0) // in wei (0 eth)
tx = types.NewTransaction(nonce, tokenAddress, value, gasLimit, gasPrice, data)
} else {
value := big.NewInt(int64(SendToAddressData.Amount * 1000000000000000000)) // the amount in wei
tx = types.NewTransaction(nonce, toAddress, value, gasLimit, gasPrice, data)
}
// **sign and send
signedTx, err := signEthTx(ethConfig, account, tx, nil)
if err != nil {
return "", errors.New("failed to sign transaction")
}
ts := types.Transactions{signedTx}
rawTxBytes := ts.GetRlp(0)
rawTxHex := hex.EncodeToString(rawTxBytes)
return blockBookWrap.SendTx("0x" + rawTxHex)
//return "", nil
}
func getJSON(url string, target interface{}) error {
r, err := myClient.Get(url)
if err != nil {
return err
}
defer r.Body.Close()
return json.NewDecoder(r.Body).Decode(target)
}
func (c *Controller) ValidateAddress(params Params) (interface{}, error) {
var ValidateAddressData models.AddressValidationBodyReq
err := json.Unmarshal(params.Body, &ValidateAddressData)
if err != nil {
return nil, err
}
coinConfig, err := coinfactory.GetCoin(ValidateAddressData.Coin)
if err != nil {
return nil, err
}
if coinConfig.Info.Token || coinConfig.Info.Tag == "ETH" {
coinConfig, err = coinfactory.GetCoin("ETH")
if err != nil {
return nil, err
}
acc, err := getEthAccFromMnemonic(coinConfig, false)
if err != nil {
return nil, err
}
return reflect.DeepEqual(ValidateAddressData.Address, acc.Address.Hex()), nil
}
var isMine bool
for _, addr := range c.Address[coinConfig.Info.Tag].AddrInfo {
if addr.Addr == ValidateAddressData.Address {
isMine = true
}
}
return isMine, nil
}
func (c *Controller) ValidateRawTx(params Params) (interface{}, error) {
var ValidateTxData plutus.ValidateRawTxReq
err := json.Unmarshal(params.Body, &ValidateTxData)
if err != nil {
return nil, err
}
coinConfig, err := coinfactory.GetCoin(ValidateTxData.Coin)
if err != nil {
return nil, err
}
var isValue, isAddress bool
//ethereum-like coins (and ERC20)
if coinConfig.Info.Token || coinConfig.Info.Tag == "ETH" {
value := ValidateTxData.Amount
var tx *types.Transaction
if ValidateTxData.RawTx[0:2] == "0x" {
ValidateTxData.RawTx = ValidateTxData.RawTx[2:]
}
rawtx, err := hex.DecodeString(ValidateTxData.RawTx)
if err != nil {
return nil, err
}
err = rlp.DecodeBytes(rawtx, &tx)
if err != nil {
return nil, err
}
//compare amount from the tx and the input body
var txBodyAmount int64
var txAddr common.Address
if coinConfig.Info.Token && coinConfig.Info.Tag != "ETH" {
address, amount := DecodeERC20Data([]byte(hex.EncodeToString(tx.Data())))
txAddr = common.HexToAddress(string(address))
txBodyAmount = amount.Int64()
} else {
txBodyAmount = tx.Value().Int64()
txAddr = *tx.To()
}
if txBodyAmount == value {
isValue = true
}
bodyAddr := common.HexToAddress(ValidateTxData.Address)
//compare the address from the tx and the input body
if bytes.Equal(bodyAddr.Bytes(), txAddr.Bytes()) {
isAddress = true
}
} else {
//bitcoin-like coins
value := btcutil.Amount(ValidateTxData.Amount)
rawTxBytes, err := hex.DecodeString(ValidateTxData.RawTx)
if err != nil {
return nil, err
}
tx, err := btcutil.NewTxFromBytes(rawTxBytes)
if err != nil {
return nil, err
}
// To prevent address collision we need to de-register all networks and register just the network using
chaincfg.ResetParams()
chaincfg.Register(coinConfig.NetParams)
for _, out := range tx.MsgTx().TxOut {
outAmount := btcutil.Amount(out.Value)
if outAmount == value {
isValue = true
}
for _, addr := range c.Address[coinConfig.Info.Tag].AddrInfo {
Addr, err := btcutil.DecodeAddress(addr.Addr, coinConfig.NetParams)
if err != nil {
return nil, err
}
scriptAddr, err := txscript.PayToAddrScript(Addr)
if err != nil {
return nil, err
}
if bytes.Equal(scriptAddr, out.PkScript) {
isAddress = true
}
}
}
}
if isValue && isAddress {
return true, nil
} else {
return false, nil
}
}
func (c *Controller) getAddrs(coinConfig *coins.Coin) error {
acc, err := getAccFromMnemonic(coinConfig, false)
if err != nil {
return err
}
blockBookWrap := blockbook.NewBlockBookWrapper(coinConfig.Info.Blockbook)
info, err := blockBookWrap.GetXpub(acc.String())
if err != nil {
return err
}
var addrInfoSlice []models.AddrInfo
for i := info.UsedTokens; i < info.UsedTokens+addrGap; i++ {
addr, err := getPubKeyHashFromPath(acc, coinConfig, uint32(i))
if err != nil {
return err
}
addrInfo := models.AddrInfo{Addr: addr, Path: i}
addrInfoSlice = append(addrInfoSlice, addrInfo)
}
c.Address[coinConfig.Info.Tag] = AddrInfo{
LastUsed: info.UsedTokens,
AddrInfo: addrInfoSlice,
}
return nil
}
func getAccFromMnemonic(coinConfig *coins.Coin, priv bool) (*hdkeychain.ExtendedKey, error) {
chaincfg.ResetParams()
_ = chaincfg.Register(coinConfig.NetParams)
if coinConfig.Mnemonic == "" {
return nil, errors.New("the coin is not available")
}
seed := bip39.NewSeed(coinConfig.Mnemonic, os.Getenv("MNEMONIC_PASSWORD"))
mKey, err := hdkeychain.NewMaster(seed, coinConfig.NetParams)
if err != nil {
return nil, err
}
purposeChild, err := mKey.Child(hdkeychain.HardenedKeyStart + 44)
if err != nil {
return nil, err
}
coinType, err := purposeChild.Child(hdkeychain.HardenedKeyStart + coinConfig.NetParams.HDCoinType)
if err != nil {
return nil, err
}
accChild, err := coinType.Child(hdkeychain.HardenedKeyStart + 0)
if err != nil {
return nil, err
}
if priv {
return accChild, nil
}
return accChild.Neuter()
}
func getEthAccFromMnemonic(coinConfig *coins.Coin, saveWallet bool) (accounts.Account, error) {
if coinConfig.Mnemonic == "" {
return accounts.Account{}, errors.New("the coin is not available")
}
wallet, err := hdwallet.NewFromMnemonic(coinConfig.Mnemonic)
if err != nil {
return accounts.Account{}, err
}
// standard for eth wallets like Metamask
path := hdwallet.MustParseDerivationPath("m/44'/60'/0'/0/0")
account, err := wallet.Derive(path, true)
if err != nil {
return accounts.Account{}, err
}
if saveWallet {
ethWallet = wallet
}
return account, nil
}
func signEthTx(coinConfig *coins.Coin, account accounts.Account, tx *types.Transaction, chainID *big.Int) (*types.Transaction, error) {
if coinConfig.Mnemonic == "" {
return nil, errors.New("the coin is not available")
}
signedTx, err := ethWallet.SignTx(account, tx, chainID)
if err != nil {
return nil, err
}
return signedTx, nil
}
func DecodeERC20Data(b []byte) ([]byte, *big.Int) {
to := b[32:72]
tokens := b[74:136]
hexed, _ := hex.DecodeString(string(tokens))
amount := big.NewInt(0)
amount.SetBytes(hexed)
return to, amount
}
func decimalToToken(decimalAmount float64, decimals int) *big.Int {
val := new(big.Float)
pot := new(big.Float)
val.SetFloat64(decimalAmount)
pot.SetFloat64(math.Pow10(decimals))
val.Mul(val, pot)
amount, _ := val.Int(nil)
return amount
}
func getPubKeyHashFromPath(acc *hdkeychain.ExtendedKey, coinConfig *coins.Coin, path uint32) (string, error) {
directExtended, err := acc.Child(0)
if err != nil {
return "", err
}
addrExtPub, err := directExtended.Child(path)
if err != nil {
return "", err
}
addr, err := addrExtPub.Address(coinConfig.NetParams)
if err != nil {
return "", err
}
return addr.String(), nil
}
func getPrivKeyFromPath(acc *hdkeychain.ExtendedKey, path uint32) (*btcec.PrivateKey, error) {
directExtended, err := acc.Child(0)
if err != nil {
return nil, err
}
accPath, err := directExtended.Child(path)
if err != nil {
return nil, err
}
return accPath.ECPrivKey()
}
func NewPlutusController() *Controller {
ctrl := &Controller{
Address: make(map[string]AddrInfo),
}
// Here we handle only active coins
for tag := range coinfactory.Coins {
coin, err := coinfactory.GetCoin(tag)
if err != nil {
panic(err)
}
if !coin.Info.Token && coin.Info.Tag != "ETH" && coin.Info.Tag != "XSG" && coin.Info.Tag != "DAPS" {
err := ctrl.getAddrs(coin)
if err != nil {
panic(errors.New(err.Error() + " " + coin.Info.Tag))
}
}
}
return ctrl
}
|
[
"\"MNEMONIC_PASSWORD\""
] |
[] |
[
"MNEMONIC_PASSWORD"
] |
[]
|
["MNEMONIC_PASSWORD"]
|
go
| 1 | 0 | |
fcmaes/advretry.py
|
# Copyright (c) Dietmar Wolz.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory.
import time
import os
import math
import random
import ctypes as ct
import numpy as np
from random import Random
import multiprocessing as mp
from multiprocessing import Process
from numpy.random import Generator, MT19937, SeedSequence
from scipy.optimize import OptimizeResult, Bounds
from fcmaes.retry import _convertBounds
from fcmaes.optimizer import Optimizer, dtime, fitting, seed_random
os.environ['MKL_DEBUG_CPU_TYPE'] = '5'
os.environ['MKL_NUM_THREADS'] = '1'
os.environ['OPENBLAS_NUM_THREADS'] = '1'
def minimize(fun,
bounds,
value_limit = math.inf,
num_retries = 5000,
logger = None,
workers = mp.cpu_count(),
popsize = 31,
min_evaluations = 2000,
max_evaluations = 50000,
evals_step_size = 1000,
check_interval = 100,
useCpp = False,
stop_fittness = None,
):
"""Minimization of a scalar function of one or more variables using
coordinated parallel CMA-ES retry.
Parameters
----------
fun : callable
The objective function to be minimized.
``fun(x, *args) -> float``
where ``x`` is an 1-D array with shape (n,) and ``args``
is a tuple of the fixed parameters needed to completely
specify the function.
bounds : sequence or `Bounds`, optional
Bounds on variables. There are two ways to specify the bounds:
1. Instance of the `scipy.Bounds` class.
2. Sequence of ``(min, max)`` pairs for each element in `x`. None
is used to specify no bound.
value_limit : float, optional
Upper limit for CMA-ES optimized function values to be stored.
This limit needs to be carefully set to a value which is seldom
found by CMA-ES retry to keep the store free of bad runs.
The crossover offspring of bad parents can
cause the algorithm to get stuck at local minima.
num_retries : int, optional
Number of CMA-ES retries.
logger : logger, optional
logger for log output of the retry mechanism. If None, logging
is switched off. Default is a logger which logs both to stdout and
appends to a file ``optimizer.log``.
workers : int, optional
number of parallel processes used. Default is mp.cpu_count()
popsize = int, optional
CMA-ES population size used for all CMA-ES runs.
min_evaluations : int, optional
Initial limit of the number of function evaluations.
max_evaluations : int, optional
Final limit of the number of function evaluations.
evals_step_size : int, optional
Delta the limit of the number of function evaluations is incremented after
``check_interval`` runs
check_interval : int, optional
After ``check_interval`` runs the store is sorted and the evaluation limit
is incremented by ``evals_step_size``
useCpp : bool, optional
Flag indicating use of the C++ CMA-ES implementation. Default is `False` -
use of the Python CMA-ES implementation
stop_fittness : float, optional
Limit for fitness value. CMA-ES runs terminate if this value is reached.
Returns
-------
res : scipy.OptimizeResult
The optimization result is represented as an ``OptimizeResult`` object.
Important attributes are: ``x`` the solution array,
``fun`` the best function value, ``nfev`` the number of function evaluations,
``nit`` the number of CMA-ES iterations, ``status`` the stopping critera and
``success`` a Boolean flag indicating if the optimizer exited successfully. """
store = Store(bounds, min_evaluations = min_evaluations, max_evaluations = max_evaluations,
evals_step_size = evals_step_size, check_interval = check_interval, logger = logger)
optimizer = Optimizer(store, popsize, stop_fittness)
optimize = optimizer.cma_cpp if useCpp else optimizer.cma_python
return retry(fun, store, optimize, num_retries, value_limit, workers)
def retry(fun, store, optimize, num_retries, value_limit = math.inf, workers=mp.cpu_count()):
sg = SeedSequence()
rgs = [Generator(MT19937(s)) for s in sg.spawn(workers)]
proc=[Process(target=_retry_loop,
args=(pid, rgs, fun, store, optimize, num_retries, value_limit)) for pid in range(workers)]
[p.start() for p in proc]
[p.join() for p in proc]
store.sort()
store.dump()
return OptimizeResult(x=store.get_x(0), fun=store.get_y(0), nfev=store.get_count_evals(), success=True)
class Store(object):
"""thread safe storage for optimization retry results;
delivers boundary and initial step size vectors for advanced retry crossover operation."""
def __init__(self,
bounds, # bounds of the objective function arguments
min_evaluations = 2000, # start with this number of evaluations
max_evaluations = 50000, # maximal number of evaluations
evals_step_size = 1000, # increase evaluation number by eval_step_size after sorting
check_interval = 100, # sort evaluation store after check_interval iterations
capacity = 500, # capacity of the evaluation store
logger = None # if None logging is switched off
):
self.lower, self.upper = _convertBounds(bounds)
self.logger = logger
self.delta = self.upper - self.lower
self.capacity = capacity
self.max_evaluations = max_evaluations
self.evals_step_size = evals_step_size
self.check_interval = check_interval
self.dim = len(self.lower)
self.random = Random()
self.t0 = time.perf_counter();
#shared between processes
self.add_mutex = mp.Lock()
self.check_mutex = mp.Lock()
self.xs = mp.RawArray(ct.c_double, capacity * self.dim)
self.lowers = mp.RawArray(ct.c_double, capacity * self.dim)
self.uppers = mp.RawArray(ct.c_double, capacity * self.dim)
self.ys = mp.RawArray(ct.c_double, capacity)
self.max_evals = mp.RawValue(ct.c_int, min_evaluations)
self.count_evals = mp.RawValue(ct.c_long, 0)
self.count_runs = mp.RawValue(ct.c_int, 0)
self.num_stored = mp.RawValue(ct.c_int, 0)
self.num_sorted = mp.RawValue(ct.c_int, 0)
self.best_y = mp.RawValue(ct.c_double, math.inf)
self.worst_y = mp.RawValue(ct.c_double, math.inf)
def eval_num(self):
return self.max_evals.value
def limits(self):
"""guess, boundaries and initial step size for crossover operation."""
diff_fac = self.random.uniform(0.5, 1.0);
lim_fac = self.random.uniform(2.0, 4.0) * diff_fac;
with self.add_mutex:
i, j = self.crossover()
if i < 0:
return math.inf, None, None, None, None
x0 = np.asarray(self.get_x(i))
x1 = np.asarray(self.get_x(j))
y0 = np.asarray(self.get_y(i))
deltax = np.abs(x1 - x0)
delta_bound = np.maximum(0.0001, lim_fac * deltax)
lower = np.maximum(self.lower, x0 - delta_bound)
upper = np.minimum(self.upper, x0 + delta_bound)
sdev = np.maximum(0.001, np.minimum(0.5, diff_fac * deltax / self.delta))
return y0, x1, lower, upper, sdev
def distance(self, yprev, y, xprev, x):
"""mean quadratic X difference to neighbor entry."""
diff = self.worst_y.value - self.best_y.value
if diff != 0 and (y - yprev) / diff > 0.01: #enough y distance: accept
return 1
dx = (np.asarray(x) - np.asarray(xprev)) / self.delta
return math.sqrt(sum(dx*dx)/self.dim)
def replace(self, i, y, xs, lower, upper):
"""replace entry in store."""
self.set_y(i, y)
self.set_x(i, xs)
self.set_lower(i, lower)
self.set_upper(i, upper)
def crossover(self): # Choose two good entries for recombination
"""indices of store entries to be used for crossover operation."""
n = self.num_sorted.value
if n < 2:
return -1, -1
lim = self.random.uniform(min(0.1*n, 1.5), 0.2*n)/n
for c in range(100):
i1 = -1
i2 = -1
for j in range(n):
if self.random.random() < lim:
if i1 < 0:
i1 = j
else:
i2 = j
return i1, i2
return -1, -1
def sort(self):
"""sorts all store entries, keep only the 90% best to make room for new ones;
skip entries having similar x values than their neighbors to preserve diversity"""
ns = self.num_stored.value
ys = np.asarray(self.ys[:ns])
yi = ys.argsort()
sortRuns = []
yprev = None
xprev = None
for i in range(len(yi)):
y = ys[yi[i]]
x = self.get_x(yi[i])
if i == 0 or self.distance(yprev, y, xprev, x) > 0.15:
sortRuns.append((y, x, self.get_lower(yi[i]), self.get_upper(yi[i])))
yprev = y
xprev = x
numStored = min(len(sortRuns),int(0.9*self.capacity)) # keep 90% best
for i in range(numStored):
self.replace(i, sortRuns[i][0], sortRuns[i][1], sortRuns[i][2], sortRuns[i][3])
self.num_sorted.value = numStored
self.num_stored.value = numStored
self.best_y.value = self.get_y(0);
self.worst_y.value = self.get_y(numStored-1);
return numStored
def add_result(self, y, xs, lower, upper, evals, limit=math.inf):
"""registers an optimization result at the store."""
with self.add_mutex:
self.incr_count_evals(evals)
if y < limit:
if y < self.best_y.value:
self.best_y.value = y
self.dump()
if self.num_stored.value >= self.capacity - 1:
self.sort()
ns = self.num_stored.value
self.num_stored.value = ns + 1
self.replace(ns, y, xs, lower, upper)
def get_x(self, pid):
return self.xs[pid*self.dim:(pid+1)*self.dim]
def get_y(self, pid):
return self.ys[pid]
def get_ys(self):
return self.ys[:self.num_stored.value]
def get_lower(self, pid):
return self.lowers[pid*self.dim:(pid+1)*self.dim]
def get_upper(self, pid):
return self.uppers[pid*self.dim:(pid+1)*self.dim]
def get_count_evals(self):
return self.count_evals.value
def get_count_runs(self):
return self.count_runs.value
def set_x(self, pid, xs):
self.xs[pid*self.dim:(pid+1)*self.dim] = xs[:]
def set_y(self, pid, y):
self.ys[pid] = y
def set_lower(self, pid, lower):
self.lowers[pid*self.dim:(pid+1)*self.dim] = lower[:]
def set_upper(self, pid, upper):
self.uppers[pid*self.dim:(pid+1)*self.dim] = upper[:]
def incr_count_evals(self, evals):
"""registers the number of evaluations of an optimization run;
trigger sorting after check_interval calls. """
self.count_runs.value += 1
if self.count_runs.value % self.check_interval == self.check_interval-1:
if self.max_evals.value < self.max_evaluations:
self.max_evals.value += self.evals_step_size
self.sort()
self.count_evals.value += evals
def dump(self):
"""logs the current status of the store if logger defined."""
if self.logger is None:
return
Ys = self.get_ys()
vals = []
for i in range(min(20, len(Ys))):
vals.append(round(Ys[i],2))
dt = dtime(self.t0)
message = '{0} {1} {2} {3} {4:.6f} {5:.2f} {6} {7!s} {8!s}'.format(
dt, int(self.count_evals.value / dt), self.count_runs.value, self.count_evals.value,
self.best_y.value, self.worst_y.value, self.num_stored.value, vals, self.get_x(0))
self.logger.info(message)
def _retry_loop(pid, rgs, fun, store, optimize, num_retries, value_limit):
seed_random() # make sure cpp random generator for this process is initialized properly
while store.get_count_runs() < num_retries:
if _crossover(fun, store, optimize, rgs[pid]):
continue
try:
dim = len(store.lower)
sol, y, evals = optimize(fun, None, Bounds(store.lower, store.upper),
[random.uniform(0.05, 0.1)]*dim, rgs[pid])
store.add_result(y, sol, store.lower, store.upper, evals, value_limit)
except Exception as ex:
continue
def _crossover(fun, store, optimize, rg):
if random.random() < 0.5:
return False
y0, guess, lower, upper, sdev = store.limits()
if guess is None:
return False
guess = fitting(guess, lower, upper) # take X from lower
try:
sol, y, evals = optimize(fun, guess, Bounds(lower, upper), sdev, rg)
store.add_result(y, sol, lower, upper, evals, y0) # limit to y0
except:
return False
return True
|
[] |
[] |
[
"MKL_NUM_THREADS",
"OPENBLAS_NUM_THREADS",
"MKL_DEBUG_CPU_TYPE"
] |
[]
|
["MKL_NUM_THREADS", "OPENBLAS_NUM_THREADS", "MKL_DEBUG_CPU_TYPE"]
|
python
| 3 | 0 | |
core/runtime/src/main/java/io/quarkus/runtime/logging/LoggingSetupRecorder.java
|
package io.quarkus.runtime.logging;
import static org.wildfly.common.net.HostName.getQualifiedHostName;
import static org.wildfly.common.os.Process.getProcessName;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Locale;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Optional;
import java.util.logging.ErrorManager;
import java.util.logging.Formatter;
import java.util.logging.Handler;
import java.util.logging.Level;
import org.graalvm.nativeimage.ImageInfo;
import org.jboss.logmanager.EmbeddedConfigurator;
import org.jboss.logmanager.LogContext;
import org.jboss.logmanager.Logger;
import org.jboss.logmanager.errormanager.OnlyOnceErrorManager;
import org.jboss.logmanager.formatters.ColorPatternFormatter;
import org.jboss.logmanager.formatters.PatternFormatter;
import org.jboss.logmanager.handlers.AsyncHandler;
import org.jboss.logmanager.handlers.ConsoleHandler;
import org.jboss.logmanager.handlers.FileHandler;
import org.jboss.logmanager.handlers.PeriodicRotatingFileHandler;
import org.jboss.logmanager.handlers.PeriodicSizeRotatingFileHandler;
import org.jboss.logmanager.handlers.SizeRotatingFileHandler;
import org.jboss.logmanager.handlers.SyslogHandler;
import io.quarkus.runtime.RuntimeValue;
import io.quarkus.runtime.annotations.Recorder;
/**
*
*/
@Recorder
public class LoggingSetupRecorder {
private static final boolean IS_WINDOWS = System.getProperty("os.name").toLowerCase(Locale.ENGLISH).contains("win");
/**
* <a href="https://conemu.github.io">ConEmu</a> ANSI X3.64 support enabled,
* used by <a href="https://cmder.net/">cmder</a>
*/
private static final boolean IS_CON_EMU_ANSI = IS_WINDOWS && "ON".equals(System.getenv("ConEmuANSI"));
/**
* These tests are same as used in jansi
* Source: https://github.com/fusesource/jansi/commit/bb3d538315c44f799d34fd3426f6c91c8e8dfc55
*/
private static final boolean IS_CYGWIN = IS_WINDOWS
&& System.getenv("PWD") != null
&& System.getenv("PWD").startsWith("/")
&& !"cygwin".equals(System.getenv("TERM"));
private static final boolean IS_MINGW_XTERM = IS_WINDOWS
&& System.getenv("MSYSTEM") != null
&& System.getenv("MSYSTEM").startsWith("MINGW")
&& "xterm".equals(System.getenv("TERM"));
public LoggingSetupRecorder() {
}
public void initializeLogging(LogConfig config, final List<RuntimeValue<Optional<Handler>>> additionalHandlers,
final List<RuntimeValue<Optional<Formatter>>> possibleFormatters) {
final Map<String, CategoryConfig> categories = config.categories;
final LogContext logContext = LogContext.getLogContext();
final Logger rootLogger = logContext.getLogger("");
rootLogger.setLevel(config.level.orElse(Level.INFO));
ErrorManager errorManager = new OnlyOnceErrorManager();
final Map<String, CleanupFilterConfig> filters = config.filters;
List<LogCleanupFilterElement> filterElements = new ArrayList<>(filters.size());
for (Entry<String, CleanupFilterConfig> entry : filters.entrySet()) {
filterElements.add(new LogCleanupFilterElement(entry.getKey(), entry.getValue().ifStartsWith));
}
final ArrayList<Handler> handlers = new ArrayList<>(3 + additionalHandlers.size());
if (config.console.enable) {
final Handler consoleHandler = configureConsoleHandler(config.console, errorManager, filterElements,
possibleFormatters);
errorManager = consoleHandler.getErrorManager();
handlers.add(consoleHandler);
}
if (config.file.enable) {
handlers.add(configureFileHandler(config.file, errorManager, filterElements));
}
if (config.syslog.enable) {
final Handler syslogHandler = configureSyslogHandler(config.syslog, errorManager, filterElements);
if (syslogHandler != null) {
handlers.add(syslogHandler);
}
}
Map<String, Handler> namedHandlers = createNamedHandlers(config, possibleFormatters, errorManager, filterElements);
for (Map.Entry<String, CategoryConfig> entry : categories.entrySet()) {
final String name = entry.getKey();
final Logger categoryLogger = logContext.getLogger(name);
final CategoryConfig categoryConfig = entry.getValue();
if (!"inherit".equals(categoryConfig.level)) {
categoryLogger.setLevelName(categoryConfig.level);
}
categoryLogger.setUseParentHandlers(categoryConfig.useParentHandlers);
if (categoryConfig.handlers.isPresent()) {
addNamedHandlersToCategory(categoryConfig, namedHandlers, categoryLogger, errorManager);
}
}
for (RuntimeValue<Optional<Handler>> additionalHandler : additionalHandlers) {
final Optional<Handler> optional = additionalHandler.getValue();
if (optional.isPresent()) {
final Handler handler = optional.get();
handler.setErrorManager(errorManager);
handler.setFilter(new LogCleanupFilter(filterElements));
handlers.add(handler);
}
}
InitialConfigurator.DELAYED_HANDLER.setAutoFlush(false);
InitialConfigurator.DELAYED_HANDLER.setHandlers(handlers.toArray(EmbeddedConfigurator.NO_HANDLERS));
}
private static Map<String, Handler> createNamedHandlers(LogConfig config,
List<RuntimeValue<Optional<Formatter>>> possibleFormatters, ErrorManager errorManager,
List<LogCleanupFilterElement> filterElements) {
Map<String, Handler> namedHandlers = new HashMap<>();
for (Entry<String, ConsoleConfig> consoleConfigEntry : config.consoleHandlers.entrySet()) {
final Handler consoleHandler = configureConsoleHandler(consoleConfigEntry.getValue(), errorManager, filterElements,
possibleFormatters);
addToNamedHandlers(namedHandlers, consoleHandler, consoleConfigEntry.getKey());
}
for (Entry<String, FileConfig> fileConfigEntry : config.fileHandlers.entrySet()) {
final Handler fileHandler = configureFileHandler(fileConfigEntry.getValue(), errorManager, filterElements);
addToNamedHandlers(namedHandlers, fileHandler, fileConfigEntry.getKey());
}
for (Entry<String, SyslogConfig> sysLogConfigEntry : config.syslogHandlers.entrySet()) {
final Handler syslogHandler = configureSyslogHandler(sysLogConfigEntry.getValue(), errorManager, filterElements);
if (syslogHandler != null) {
addToNamedHandlers(namedHandlers, syslogHandler, sysLogConfigEntry.getKey());
}
}
return namedHandlers;
}
private static void addToNamedHandlers(Map<String, Handler> namedHandlers, Handler handler, String handlerName) {
if (namedHandlers.containsKey(handlerName)) {
throw new RuntimeException(String.format("Only one handler can be configured with the same name '%s'",
handlerName));
}
namedHandlers.put(handlerName, handler);
}
private void addNamedHandlersToCategory(CategoryConfig categoryConfig, Map<String, Handler> namedHandlers,
Logger categoryLogger,
ErrorManager errorManager) {
for (String categoryNamedHandler : categoryConfig.handlers.get()) {
if (namedHandlers.get(categoryNamedHandler) != null) {
categoryLogger.addHandler(namedHandlers.get(categoryNamedHandler));
} else {
errorManager.error(String.format("Handler with name '%s' is linked to a category but not configured.",
categoryNamedHandler), null, ErrorManager.GENERIC_FAILURE);
}
}
}
public void initializeLoggingForImageBuild() {
if (ImageInfo.inImageBuildtimeCode()) {
final ConsoleHandler handler = new ConsoleHandler(new PatternFormatter(
"%d{HH:mm:ss,SSS} %-5p [%c{1.}] %s%e%n"));
handler.setLevel(Level.INFO);
InitialConfigurator.DELAYED_HANDLER.setAutoFlush(false);
InitialConfigurator.DELAYED_HANDLER.setHandlers(new Handler[] { handler });
}
}
private static boolean hasColorSupport() {
if (IS_WINDOWS) {
// On Windows without a known good emulator
// TODO: optimally we would check if Win32 getConsoleMode has
// ENABLE_VIRTUAL_TERMINAL_PROCESSING enabled or enable it via
// setConsoleMode.
// For now we turn it off to not generate noisy output for most
// users.
// Must be on some Unix variant or ANSI-enabled windows terminal...
return IS_CON_EMU_ANSI || IS_CYGWIN || IS_MINGW_XTERM;
} else {
// on sane operating systems having a console is a good indicator
// you are attached to a TTY with colors.
return System.console() != null;
}
}
private static Handler configureConsoleHandler(final ConsoleConfig config, final ErrorManager defaultErrorManager,
final List<LogCleanupFilterElement> filterElements,
final List<RuntimeValue<Optional<Formatter>>> possibleFormatters) {
Formatter formatter = null;
boolean formatterWarning = false;
for (RuntimeValue<Optional<Formatter>> value : possibleFormatters) {
if (formatter != null) {
formatterWarning = true;
}
final Optional<Formatter> val = value.getValue();
if (val.isPresent()) {
formatter = val.get();
}
}
if (formatter == null) {
if (config.color.orElse(hasColorSupport())) {
formatter = new ColorPatternFormatter(config.darken, config.format);
} else {
formatter = new PatternFormatter(config.format);
}
}
final ConsoleHandler consoleHandler = new ConsoleHandler(formatter);
consoleHandler.setLevel(config.level);
consoleHandler.setErrorManager(defaultErrorManager);
consoleHandler.setFilter(new LogCleanupFilter(filterElements));
final Handler handler = config.async.enable ? createAsyncHandler(config.async, config.level, consoleHandler)
: consoleHandler;
if (formatterWarning) {
handler.getErrorManager().error("Multiple formatters were activated", null, ErrorManager.GENERIC_FAILURE);
}
return handler;
}
private static Handler configureFileHandler(final FileConfig config, final ErrorManager errorManager,
final List<LogCleanupFilterElement> filterElements) {
FileHandler handler = new FileHandler();
FileConfig.RotationConfig rotationConfig = config.rotation;
if (rotationConfig.maxFileSize.isPresent() && rotationConfig.fileSuffix.isPresent()) {
PeriodicSizeRotatingFileHandler periodicSizeRotatingFileHandler = new PeriodicSizeRotatingFileHandler();
periodicSizeRotatingFileHandler.setSuffix(rotationConfig.fileSuffix.get());
periodicSizeRotatingFileHandler.setRotateSize(rotationConfig.maxFileSize.get().asLongValue());
periodicSizeRotatingFileHandler.setRotateOnBoot(rotationConfig.rotateOnBoot);
periodicSizeRotatingFileHandler.setMaxBackupIndex(rotationConfig.maxBackupIndex);
handler = periodicSizeRotatingFileHandler;
} else if (rotationConfig.maxFileSize.isPresent()) {
SizeRotatingFileHandler sizeRotatingFileHandler = new SizeRotatingFileHandler(
rotationConfig.maxFileSize.get().asLongValue(), rotationConfig.maxBackupIndex);
sizeRotatingFileHandler.setRotateOnBoot(rotationConfig.rotateOnBoot);
handler = sizeRotatingFileHandler;
} else if (rotationConfig.fileSuffix.isPresent()) {
PeriodicRotatingFileHandler periodicRotatingFileHandler = new PeriodicRotatingFileHandler();
periodicRotatingFileHandler.setSuffix(rotationConfig.fileSuffix.get());
handler = periodicRotatingFileHandler;
}
final PatternFormatter formatter = new PatternFormatter(config.format);
handler.setFormatter(formatter);
handler.setAppend(true);
try {
handler.setFile(config.path);
} catch (FileNotFoundException e) {
errorManager.error("Failed to set log file", e, ErrorManager.OPEN_FAILURE);
}
handler.setErrorManager(errorManager);
handler.setLevel(config.level);
handler.setFilter(new LogCleanupFilter(filterElements));
if (config.async.enable) {
return createAsyncHandler(config.async, config.level, handler);
}
return handler;
}
private static Handler configureSyslogHandler(final SyslogConfig config,
final ErrorManager errorManager,
final List<LogCleanupFilterElement> filterElements) {
try {
final SyslogHandler handler = new SyslogHandler(config.endpoint.getHostString(), config.endpoint.getPort());
handler.setAppName(config.appName.orElse(getProcessName()));
handler.setHostname(config.hostname.orElse(getQualifiedHostName()));
handler.setFacility(config.facility);
handler.setSyslogType(config.syslogType);
handler.setProtocol(config.protocol);
handler.setBlockOnReconnect(config.blockOnReconnect);
handler.setTruncate(config.truncate);
handler.setUseCountingFraming(config.useCountingFraming);
handler.setLevel(config.level);
final PatternFormatter formatter = new PatternFormatter(config.format);
handler.setFormatter(formatter);
handler.setErrorManager(errorManager);
handler.setFilter(new LogCleanupFilter(filterElements));
if (config.async.enable) {
return createAsyncHandler(config.async, config.level, handler);
}
return handler;
} catch (IOException e) {
errorManager.error("Failed to create syslog handler", e, ErrorManager.OPEN_FAILURE);
return null;
}
}
private static AsyncHandler createAsyncHandler(AsyncConfig asyncConfig, Level level, Handler handler) {
final AsyncHandler asyncHandler = new AsyncHandler(asyncConfig.queueLength);
asyncHandler.setOverflowAction(asyncConfig.overflow);
asyncHandler.addHandler(handler);
asyncHandler.setLevel(level);
return asyncHandler;
}
}
|
[
"\"ConEmuANSI\"",
"\"PWD\"",
"\"PWD\"",
"\"TERM\"",
"\"MSYSTEM\"",
"\"MSYSTEM\"",
"\"TERM\""
] |
[] |
[
"MSYSTEM",
"PWD",
"ConEmuANSI",
"TERM"
] |
[]
|
["MSYSTEM", "PWD", "ConEmuANSI", "TERM"]
|
java
| 4 | 0 | |
python/mxnet/image/image.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=no-member, too-many-lines, redefined-builtin, protected-access, unused-import, invalid-name
# pylint: disable=too-many-arguments, too-many-locals, no-name-in-module, too-many-branches, too-many-statements
"""Read individual image files and perform augmentations."""
from __future__ import absolute_import, print_function
import os
import random
import logging
import json
import warnings
import numpy as np
try:
import cv2
except ImportError:
cv2 = None
from ..base import numeric_types
from .. import ndarray as nd
from ..ndarray import _internal
from ..ndarray._internal import _cvimresize as imresize
from ..ndarray._internal import _cvcopyMakeBorder as copyMakeBorder
from .. import io
from .. import recordio
def imread(filename, *args, **kwargs):
"""Read and decode an image to an NDArray.
Note: `imread` uses OpenCV (not the CV2 Python library).
MXNet must have been built with USE_OPENCV=1 for `imdecode` to work.
Parameters
----------
filename : str
Name of the image file to be loaded.
flag : {0, 1}, default 1
1 for three channel color output. 0 for grayscale output.
to_rgb : bool, default True
True for RGB formatted output (MXNet default).
False for BGR formatted output (OpenCV default).
out : NDArray, optional
Output buffer. Use `None` for automatic allocation.
Returns
-------
NDArray
An `NDArray` containing the image.
Example
-------
>>> mx.img.imread("flower.jpg")
<NDArray 224x224x3 @cpu(0)>
Set `flag` parameter to 0 to get grayscale output
>>> mx.img.imread("flower.jpg", flag=0)
<NDArray 224x224x1 @cpu(0)>
Set `to_rgb` parameter to 0 to get output in OpenCV format (BGR)
>>> mx.img.imread("flower.jpg", to_rgb=0)
<NDArray 224x224x3 @cpu(0)>
"""
return _internal._cvimread(filename, *args, **kwargs)
def imdecode(buf, *args, **kwargs):
"""Decode an image to an NDArray.
Note: `imdecode` uses OpenCV (not the CV2 Python library).
MXNet must have been built with USE_OPENCV=1 for `imdecode` to work.
Parameters
----------
buf : str/bytes or numpy.ndarray
Binary image data as string or numpy ndarray.
flag : int, optional, default=1
1 for three channel color output. 0 for grayscale output.
to_rgb : int, optional, default=1
1 for RGB formatted output (MXNet default). 0 for BGR formatted output (OpenCV default).
out : NDArray, optional
Output buffer. Use `None` for automatic allocation.
Returns
-------
NDArray
An `NDArray` containing the image.
Example
-------
>>> with open("flower.jpg", 'rb') as fp:
... str_image = fp.read()
...
>>> image = mx.img.imdecode(str_image)
>>> image
<NDArray 224x224x3 @cpu(0)>
Set `flag` parameter to 0 to get grayscale output
>>> with open("flower.jpg", 'rb') as fp:
... str_image = fp.read()
...
>>> image = mx.img.imdecode(str_image, flag=0)
>>> image
<NDArray 224x224x1 @cpu(0)>
Set `to_rgb` parameter to 0 to get output in OpenCV format (BGR)
>>> with open("flower.jpg", 'rb') as fp:
... str_image = fp.read()
...
>>> image = mx.img.imdecode(str_image, to_rgb=0)
>>> image
<NDArray 224x224x3 @cpu(0)>
"""
if not isinstance(buf, nd.NDArray):
buf = nd.array(np.frombuffer(buf, dtype=np.uint8), dtype=np.uint8)
return _internal._cvimdecode(buf, *args, **kwargs)
def scale_down(src_size, size):
"""Scales down crop size if it's larger than image size.
If width/height of the crop is larger than the width/height of the image,
sets the width/height to the width/height of the image.
Parameters
----------
src_size : tuple of int
Size of the image in (width, height) format.
size : tuple of int
Size of the crop in (width, height) format.
Returns
-------
tuple of int
A tuple containing the scaled crop size in (width, height) format.
Example
--------
>>> src_size = (640,480)
>>> size = (720,120)
>>> new_size = mx.img.scale_down(src_size, size)
>>> new_size
(640,106)
"""
w, h = size
sw, sh = src_size
if sh < h:
w, h = float(w * sh) / h, sh
if sw < w:
w, h = sw, float(h * sw) / w
return int(w), int(h)
def _get_interp_method(interp, sizes=()):
"""Get the interpolation method for resize functions.
The major purpose of this function is to wrap a random interp method selection
and a auto-estimation method.
Parameters
----------
interp : int
interpolation method for all resizing operations
Possible values:
0: Nearest Neighbors Interpolation.
1: Bilinear interpolation.
2: Area-based (resampling using pixel area relation). It may be a
preferred method for image decimation, as it gives moire-free
results. But when the image is zoomed, it is similar to the Nearest
Neighbors method. (used by default).
3: Bicubic interpolation over 4x4 pixel neighborhood.
4: Lanczos interpolation over 8x8 pixel neighborhood.
9: Cubic for enlarge, area for shrink, bilinear for others
10: Random select from interpolation method metioned above.
Note:
When shrinking an image, it will generally look best with AREA-based
interpolation, whereas, when enlarging an image, it will generally look best
with Bicubic (slow) or Bilinear (faster but still looks OK).
More details can be found in the documentation of OpenCV, please refer to
http://docs.opencv.org/master/da/d54/group__imgproc__transform.html.
sizes : tuple of int
(old_height, old_width, new_height, new_width), if None provided, auto(9)
will return Area(2) anyway.
Returns
-------
int
interp method from 0 to 4
"""
if interp == 9:
if sizes:
assert len(sizes) == 4
oh, ow, nh, nw = sizes
if nh > oh and nw > ow:
return 2
elif nh < oh and nw < ow:
return 3
else:
return 1
else:
return 2
if interp == 10:
return random.randint(0, 4)
if interp not in (0, 1, 2, 3, 4):
raise ValueError('Unknown interp method %d' % interp)
return interp
def resize_short(src, size, interp=2):
"""Resizes shorter edge to size.
Note: `resize_short` uses OpenCV (not the CV2 Python library).
MXNet must have been built with OpenCV for `resize_short` to work.
Resizes the original image by setting the shorter edge to size
and setting the longer edge accordingly.
Resizing function is called from OpenCV.
Parameters
----------
src : NDArray
The original image.
size : int
The length to be set for the shorter edge.
interp : int, optional, default=2
Interpolation method used for resizing the image.
Possible values:
0: Nearest Neighbors Interpolation.
1: Bilinear interpolation.
2: Area-based (resampling using pixel area relation). It may be a
preferred method for image decimation, as it gives moire-free
results. But when the image is zoomed, it is similar to the Nearest
Neighbors method. (used by default).
3: Bicubic interpolation over 4x4 pixel neighborhood.
4: Lanczos interpolation over 8x8 pixel neighborhood.
9: Cubic for enlarge, area for shrink, bilinear for others
10: Random select from interpolation method metioned above.
Note:
When shrinking an image, it will generally look best with AREA-based
interpolation, whereas, when enlarging an image, it will generally look best
with Bicubic (slow) or Bilinear (faster but still looks OK).
More details can be found in the documentation of OpenCV, please refer to
http://docs.opencv.org/master/da/d54/group__imgproc__transform.html.
Returns
-------
NDArray
An 'NDArray' containing the resized image.
Example
-------
>>> with open("flower.jpeg", 'rb') as fp:
... str_image = fp.read()
...
>>> image = mx.img.imdecode(str_image)
>>> image
<NDArray 2321x3482x3 @cpu(0)>
>>> size = 640
>>> new_image = mx.img.resize_short(image, size)
>>> new_image
<NDArray 2321x3482x3 @cpu(0)>
"""
h, w, _ = src.shape
if h > w:
new_h, new_w = size * h // w, size
else:
new_h, new_w = size, size * w // h
return imresize(src, new_w, new_h, interp=_get_interp_method(interp, (h, w, new_h, new_w)))
def fixed_crop(src, x0, y0, w, h, size=None, interp=2):
"""Crop src at fixed location, and (optionally) resize it to size.
Parameters
----------
src : NDArray
Input image
x0 : int
Left boundary of the cropping area
y0 : int
Top boundary of the cropping area
w : int
Width of the cropping area
h : int
Height of the cropping area
size : tuple of (w, h)
Optional, resize to new size after cropping
interp : int, optional, default=2
Interpolation method. See resize_short for details.
Returns
-------
NDArray
An `NDArray` containing the cropped image.
"""
out = nd.crop(src, begin=(y0, x0, 0), end=(y0 + h, x0 + w, int(src.shape[2])))
if size is not None and (w, h) != size:
sizes = (h, w, size[1], size[0])
out = imresize(out, *size, interp=_get_interp_method(interp, sizes))
return out
def random_crop(src, size, interp=2):
"""Randomly crop `src` with `size` (width, height).
Upsample result if `src` is smaller than `size`.
Parameters
----------
src: Source image `NDArray`
size: Size of the crop formatted as (width, height). If the `size` is larger
than the image, then the source image is upsampled to `size` and returned.
interp: int, optional, default=2
Interpolation method. See resize_short for details.
Returns
-------
NDArray
An `NDArray` containing the cropped image.
Tuple
A tuple (x, y, width, height) where (x, y) is top-left position of the crop in the
original image and (width, height) are the dimensions of the cropped image.
Example
-------
>>> im = mx.nd.array(cv2.imread("flower.jpg"))
>>> cropped_im, rect = mx.image.random_crop(im, (100, 100))
>>> print cropped_im
<NDArray 100x100x1 @cpu(0)>
>>> print rect
(20, 21, 100, 100)
"""
h, w, _ = src.shape
new_w, new_h = scale_down((w, h), size)
x0 = random.randint(0, w - new_w)
y0 = random.randint(0, h - new_h)
out = fixed_crop(src, x0, y0, new_w, new_h, size, interp)
return out, (x0, y0, new_w, new_h)
def center_crop(src, size, interp=2):
"""Crops the image `src` to the given `size` by trimming on all four
sides and preserving the center of the image. Upsamples if `src` is smaller
than `size`.
.. note:: This requires MXNet to be compiled with USE_OPENCV.
Parameters
----------
src : NDArray
Binary source image data.
size : list or tuple of int
The desired output image size.
interp : int, optional, default=2
Interpolation method. See resize_short for details.
Returns
-------
NDArray
The cropped image.
Tuple
(x, y, width, height) where x, y are the positions of the crop in the
original image and width, height the dimensions of the crop.
Example
-------
>>> with open("flower.jpg", 'rb') as fp:
... str_image = fp.read()
...
>>> image = mx.image.imdecode(str_image)
>>> image
<NDArray 2321x3482x3 @cpu(0)>
>>> cropped_image, (x, y, width, height) = mx.image.center_crop(image, (1000, 500))
>>> cropped_image
<NDArray 500x1000x3 @cpu(0)>
>>> x, y, width, height
(1241, 910, 1000, 500)
"""
h, w, _ = src.shape
new_w, new_h = scale_down((w, h), size)
x0 = int((w - new_w) / 2)
y0 = int((h - new_h) / 2)
out = fixed_crop(src, x0, y0, new_w, new_h, size, interp)
return out, (x0, y0, new_w, new_h)
def color_normalize(src, mean, std=None):
"""Normalize src with mean and std.
Parameters
----------
src : NDArray
Input image
mean : NDArray
RGB mean to be subtracted
std : NDArray
RGB standard deviation to be divided
Returns
-------
NDArray
An `NDArray` containing the normalized image.
"""
if mean is not None:
src -= mean
if std is not None:
src /= std
return src
def random_size_crop(src, size, area, ratio, interp=2, **kwargs):
"""Randomly crop src with size. Randomize area and aspect ratio.
Parameters
----------
src : NDArray
Input image
size : tuple of (int, int)
Size of the crop formatted as (width, height).
area : float in (0, 1] or tuple of (float, float)
If tuple, minimum area and maximum area to be maintained after cropping
If float, minimum area to be maintained after cropping, maximum area is set to 1.0
ratio : tuple of (float, float)
Aspect ratio range as (min_aspect_ratio, max_aspect_ratio)
interp: int, optional, default=2
Interpolation method. See resize_short for details.
Returns
-------
NDArray
An `NDArray` containing the cropped image.
Tuple
A tuple (x, y, width, height) where (x, y) is top-left position of the crop in the
original image and (width, height) are the dimensions of the cropped image.
"""
h, w, _ = src.shape
src_area = h * w
if 'min_area' in kwargs:
warnings.warn('`min_area` is deprecated. Please use `area` instead.',
DeprecationWarning)
area = kwargs.pop('min_area')
assert not kwargs, "unexpected keyword arguments for `random_size_crop`."
if isinstance(area, numeric_types):
area = (area, 1.0)
for _ in range(10):
target_area = random.uniform(area[0], area[1]) * src_area
new_ratio = random.uniform(*ratio)
new_w = int(round(np.sqrt(target_area * new_ratio)))
new_h = int(round(np.sqrt(target_area / new_ratio)))
if random.random() < 0.5:
new_h, new_w = new_w, new_h
if new_w <= w and new_h <= h:
x0 = random.randint(0, w - new_w)
y0 = random.randint(0, h - new_h)
out = fixed_crop(src, x0, y0, new_w, new_h, size, interp)
return out, (x0, y0, new_w, new_h)
# fall back to center_crop
return center_crop(src, size, interp)
class Augmenter(object):
"""Image Augmenter base class"""
def __init__(self, **kwargs):
self._kwargs = kwargs
for k, v in self._kwargs.items():
if isinstance(v, nd.NDArray):
v = v.asnumpy()
if isinstance(v, np.ndarray):
v = v.tolist()
self._kwargs[k] = v
def dumps(self):
"""Saves the Augmenter to string
Returns
-------
str
JSON formatted string that describes the Augmenter.
"""
return json.dumps([self.__class__.__name__.lower(), self._kwargs])
def __call__(self, src):
"""Abstract implementation body"""
raise NotImplementedError("Must override implementation.")
class SequentialAug(Augmenter):
"""Composing a sequential augmenter list.
Parameters
----------
ts : list of augmenters
A series of augmenters to be applied in sequential order.
"""
def __init__(self, ts):
super(SequentialAug, self).__init__()
self.ts = ts
def dumps(self):
"""Override the default to avoid duplicate dump."""
return [self.__class__.__name__.lower(), [x.dumps() for x in self.ts]]
def __call__(self, src):
"""Augmenter body"""
for aug in self.ts:
src = aug(src)
return src
class ResizeAug(Augmenter):
"""Make resize shorter edge to size augmenter.
Parameters
----------
size : int
The length to be set for the shorter edge.
interp : int, optional, default=2
Interpolation method. See resize_short for details.
"""
def __init__(self, size, interp=2):
super(ResizeAug, self).__init__(size=size, interp=interp)
self.size = size
self.interp = interp
def __call__(self, src):
"""Augmenter body"""
return resize_short(src, self.size, self.interp)
class ForceResizeAug(Augmenter):
"""Force resize to size regardless of aspect ratio
Parameters
----------
size : tuple of (int, int)
The desired size as in (width, height)
interp : int, optional, default=2
Interpolation method. See resize_short for details.
"""
def __init__(self, size, interp=2):
super(ForceResizeAug, self).__init__(size=size, interp=interp)
self.size = size
self.interp = interp
def __call__(self, src):
"""Augmenter body"""
sizes = (src.shape[0], src.shape[1], self.size[1], self.size[0])
return imresize(src, *self.size, interp=_get_interp_method(self.interp, sizes))
class RandomCropAug(Augmenter):
"""Make random crop augmenter
Parameters
----------
size : int
The length to be set for the shorter edge.
interp : int, optional, default=2
Interpolation method. See resize_short for details.
"""
def __init__(self, size, interp=2):
super(RandomCropAug, self).__init__(size=size, interp=interp)
self.size = size
self.interp = interp
def __call__(self, src):
"""Augmenter body"""
return random_crop(src, self.size, self.interp)[0]
class RandomSizedCropAug(Augmenter):
"""Make random crop with random resizing and random aspect ratio jitter augmenter.
Parameters
----------
size : tuple of (int, int)
Size of the crop formatted as (width, height).
area : float in (0, 1] or tuple of (float, float)
If tuple, minimum area and maximum area to be maintained after cropping
If float, minimum area to be maintained after cropping, maximum area is set to 1.0
ratio : tuple of (float, float)
Aspect ratio range as (min_aspect_ratio, max_aspect_ratio)
interp: int, optional, default=2
Interpolation method. See resize_short for details.
"""
def __init__(self, size, area, ratio, interp=2, **kwargs):
super(RandomSizedCropAug, self).__init__(size=size, area=area,
ratio=ratio, interp=interp)
self.size = size
if 'min_area' in kwargs:
warnings.warn('`min_area` is deprecated. Please use `area` instead.',
DeprecationWarning)
self.area = kwargs.pop('min_area')
else:
self.area = area
self.ratio = ratio
self.interp = interp
assert not kwargs, "unexpected keyword arguments for `RandomSizedCropAug`."
def __call__(self, src):
"""Augmenter body"""
return random_size_crop(src, self.size, self.area, self.ratio, self.interp)[0]
class CenterCropAug(Augmenter):
"""Make center crop augmenter.
Parameters
----------
size : list or tuple of int
The desired output image size.
interp : int, optional, default=2
Interpolation method. See resize_short for details.
"""
def __init__(self, size, interp=2):
super(CenterCropAug, self).__init__(size=size, interp=interp)
self.size = size
self.interp = interp
def __call__(self, src):
"""Augmenter body"""
return center_crop(src, self.size, self.interp)[0]
class RandomOrderAug(Augmenter):
"""Apply list of augmenters in random order
Parameters
----------
ts : list of augmenters
A series of augmenters to be applied in random order
"""
def __init__(self, ts):
super(RandomOrderAug, self).__init__()
self.ts = ts
def dumps(self):
"""Override the default to avoid duplicate dump."""
return [self.__class__.__name__.lower(), [x.dumps() for x in self.ts]]
def __call__(self, src):
"""Augmenter body"""
random.shuffle(self.ts)
for t in self.ts:
src = t(src)
return src
class BrightnessJitterAug(Augmenter):
"""Random brightness jitter augmentation.
Parameters
----------
brightness : float
The brightness jitter ratio range, [0, 1]
"""
def __init__(self, brightness):
super(BrightnessJitterAug, self).__init__(brightness=brightness)
self.brightness = brightness
def __call__(self, src):
"""Augmenter body"""
alpha = 1.0 + random.uniform(-self.brightness, self.brightness)
src *= alpha
return src
class ContrastJitterAug(Augmenter):
"""Random contrast jitter augmentation.
Parameters
----------
contrast : float
The contrast jitter ratio range, [0, 1]
"""
def __init__(self, contrast):
super(ContrastJitterAug, self).__init__(contrast=contrast)
self.contrast = contrast
self.coef = nd.array([[[0.299, 0.587, 0.114]]])
def __call__(self, src):
"""Augmenter body"""
alpha = 1.0 + random.uniform(-self.contrast, self.contrast)
gray = src * self.coef
gray = (3.0 * (1.0 - alpha) / gray.size) * nd.sum(gray)
src *= alpha
src += gray
return src
class SaturationJitterAug(Augmenter):
"""Random saturation jitter augmentation.
Parameters
----------
saturation : float
The saturation jitter ratio range, [0, 1]
"""
def __init__(self, saturation):
super(SaturationJitterAug, self).__init__(saturation=saturation)
self.saturation = saturation
self.coef = nd.array([[[0.299, 0.587, 0.114]]])
def __call__(self, src):
"""Augmenter body"""
alpha = 1.0 + random.uniform(-self.saturation, self.saturation)
gray = src * self.coef
gray = nd.sum(gray, axis=2, keepdims=True)
gray *= (1.0 - alpha)
src *= alpha
src += gray
return src
class HueJitterAug(Augmenter):
"""Random hue jitter augmentation.
Parameters
----------
hue : float
The hue jitter ratio range, [0, 1]
"""
def __init__(self, hue):
super(HueJitterAug, self).__init__(hue=hue)
self.hue = hue
self.tyiq = np.array([[0.299, 0.587, 0.114],
[0.596, -0.274, -0.321],
[0.211, -0.523, 0.311]])
self.ityiq = np.array([[1.0, 0.956, 0.621],
[1.0, -0.272, -0.647],
[1.0, -1.107, 1.705]])
def __call__(self, src):
"""Augmenter body.
Using approximate linear transfomation described in:
https://beesbuzz.biz/code/hsv_color_transforms.php
"""
alpha = random.uniform(-self.hue, self.hue)
u = np.cos(alpha * np.pi)
w = np.sin(alpha * np.pi)
bt = np.array([[1.0, 0.0, 0.0],
[0.0, u, -w],
[0.0, w, u]])
t = np.dot(np.dot(self.ityiq, bt), self.tyiq).T
src = nd.dot(src, nd.array(t))
return src
class ColorJitterAug(RandomOrderAug):
"""Apply random brightness, contrast and saturation jitter in random order.
Parameters
----------
brightness : float
The brightness jitter ratio range, [0, 1]
contrast : float
The contrast jitter ratio range, [0, 1]
saturation : float
The saturation jitter ratio range, [0, 1]
"""
def __init__(self, brightness, contrast, saturation):
ts = []
if brightness > 0:
ts.append(BrightnessJitterAug(brightness))
if contrast > 0:
ts.append(ContrastJitterAug(contrast))
if saturation > 0:
ts.append(SaturationJitterAug(saturation))
super(ColorJitterAug, self).__init__(ts)
class LightingAug(Augmenter):
"""Add PCA based noise.
Parameters
----------
alphastd : float
Noise level
eigval : 3x1 np.array
Eigen values
eigvec : 3x3 np.array
Eigen vectors
"""
def __init__(self, alphastd, eigval, eigvec):
super(LightingAug, self).__init__(alphastd=alphastd, eigval=eigval, eigvec=eigvec)
self.alphastd = alphastd
self.eigval = eigval
self.eigvec = eigvec
def __call__(self, src):
"""Augmenter body"""
alpha = np.random.normal(0, self.alphastd, size=(3,))
rgb = np.dot(self.eigvec * alpha, self.eigval)
src += nd.array(rgb)
return src
class ColorNormalizeAug(Augmenter):
"""Mean and std normalization.
Parameters
----------
mean : NDArray
RGB mean to be subtracted
std : NDArray
RGB standard deviation to be divided
"""
def __init__(self, mean, std):
super(ColorNormalizeAug, self).__init__(mean=mean, std=std)
self.mean = mean if mean is None or isinstance(mean, nd.NDArray) else nd.array(mean)
self.std = std if std is None or isinstance(std, nd.NDArray) else nd.array(std)
def __call__(self, src):
"""Augmenter body"""
return color_normalize(src, self.mean, self.std)
class RandomGrayAug(Augmenter):
"""Randomly convert to gray image.
Parameters
----------
p : float
Probability to convert to grayscale
"""
def __init__(self, p):
super(RandomGrayAug, self).__init__(p=p)
self.p = p
self.mat = nd.array([[0.21, 0.21, 0.21],
[0.72, 0.72, 0.72],
[0.07, 0.07, 0.07]])
def __call__(self, src):
"""Augmenter body"""
if random.random() < self.p:
src = nd.dot(src, self.mat)
return src
class HorizontalFlipAug(Augmenter):
"""Random horizontal flip.
Parameters
----------
p : float
Probability to flip image horizontally
"""
def __init__(self, p):
super(HorizontalFlipAug, self).__init__(p=p)
self.p = p
def __call__(self, src):
"""Augmenter body"""
if random.random() < self.p:
src = nd.flip(src, axis=1)
return src
class CastAug(Augmenter):
"""Cast to float32"""
def __init__(self, typ='float32'):
super(CastAug, self).__init__(type=typ)
self.typ = typ
def __call__(self, src):
"""Augmenter body"""
src = src.astype(self.typ)
return src
def CreateAugmenter(data_shape, resize=0, rand_crop=False, rand_resize=False, rand_mirror=False,
mean=None, std=None, brightness=0, contrast=0, saturation=0, hue=0,
pca_noise=0, rand_gray=0, inter_method=2):
"""Creates an augmenter list.
Parameters
----------
data_shape : tuple of int
Shape for output data
resize : int
Resize shorter edge if larger than 0 at the begining
rand_crop : bool
Whether to enable random cropping other than center crop
rand_resize : bool
Whether to enable random sized cropping, require rand_crop to be enabled
rand_gray : float
[0, 1], probability to convert to grayscale for all channels, the number
of channels will not be reduced to 1
rand_mirror : bool
Whether to apply horizontal flip to image with probability 0.5
mean : np.ndarray or None
Mean pixel values for [r, g, b]
std : np.ndarray or None
Standard deviations for [r, g, b]
brightness : float
Brightness jittering range (percent)
contrast : float
Contrast jittering range (percent)
saturation : float
Saturation jittering range (percent)
hue : float
Hue jittering range (percent)
pca_noise : float
Pca noise level (percent)
inter_method : int, default=2(Area-based)
Interpolation method for all resizing operations
Possible values:
0: Nearest Neighbors Interpolation.
1: Bilinear interpolation.
2: Area-based (resampling using pixel area relation). It may be a
preferred method for image decimation, as it gives moire-free
results. But when the image is zoomed, it is similar to the Nearest
Neighbors method. (used by default).
3: Bicubic interpolation over 4x4 pixel neighborhood.
4: Lanczos interpolation over 8x8 pixel neighborhood.
9: Cubic for enlarge, area for shrink, bilinear for others
10: Random select from interpolation method metioned above.
Note:
When shrinking an image, it will generally look best with AREA-based
interpolation, whereas, when enlarging an image, it will generally look best
with Bicubic (slow) or Bilinear (faster but still looks OK).
Examples
--------
>>> # An example of creating multiple augmenters
>>> augs = mx.image.CreateAugmenter(data_shape=(3, 300, 300), rand_mirror=True,
... mean=True, brightness=0.125, contrast=0.125, rand_gray=0.05,
... saturation=0.125, pca_noise=0.05, inter_method=10)
>>> # dump the details
>>> for aug in augs:
... aug.dumps()
"""
auglist = []
if resize > 0:
auglist.append(ResizeAug(resize, inter_method))
crop_size = (data_shape[2], data_shape[1])
if rand_resize:
assert rand_crop
auglist.append(RandomSizedCropAug(crop_size, 0.08, (3.0 / 4.0, 4.0 / 3.0), inter_method))
elif rand_crop:
auglist.append(RandomCropAug(crop_size, inter_method))
else:
auglist.append(CenterCropAug(crop_size, inter_method))
if rand_mirror:
auglist.append(HorizontalFlipAug(0.5))
auglist.append(CastAug())
if brightness or contrast or saturation:
auglist.append(ColorJitterAug(brightness, contrast, saturation))
if hue:
auglist.append(HueJitterAug(hue))
if pca_noise > 0:
eigval = np.array([55.46, 4.794, 1.148])
eigvec = np.array([[-0.5675, 0.7192, 0.4009],
[-0.5808, -0.0045, -0.8140],
[-0.5836, -0.6948, 0.4203]])
auglist.append(LightingAug(pca_noise, eigval, eigvec))
if rand_gray > 0:
auglist.append(RandomGrayAug(rand_gray))
if mean is True:
mean = nd.array([123.68, 116.28, 103.53])
elif mean is not None:
assert isinstance(mean, (np.ndarray, nd.NDArray)) and mean.shape[0] in [1, 3]
if std is True:
std = nd.array([58.395, 57.12, 57.375])
elif std is not None:
assert isinstance(std, (np.ndarray, nd.NDArray)) and std.shape[0] in [1, 3]
if mean is not None or std is not None:
auglist.append(ColorNormalizeAug(mean, std))
return auglist
class ImageIter(io.DataIter):
"""Image data iterator with a large number of augmentation choices.
This iterator supports reading from both .rec files and raw image files.
To load input images from .rec files, use `path_imgrec` parameter and to load from raw image
files, use `path_imglist` and `path_root` parameters.
To use data partition (for distributed training) or shuffling, specify `path_imgidx` parameter.
Parameters
----------
batch_size : int
Number of examples per batch.
data_shape : tuple
Data shape in (channels, height, width) format.
For now, only RGB image with 3 channels is supported.
label_width : int, optional
Number of labels per example. The default label width is 1.
path_imgrec : str
Path to image record file (.rec).
Created with tools/im2rec.py or bin/im2rec.
path_imglist : str
Path to image list (.lst).
Created with tools/im2rec.py or with custom script.
Format: Tab separated record of index, one or more labels and relative_path_from_root.
imglist: list
A list of images with the label(s).
Each item is a list [imagelabel: float or list of float, imgpath].
path_root : str
Root folder of image files.
path_imgidx : str
Path to image index file. Needed for partition and shuffling when using .rec source.
shuffle : bool
Whether to shuffle all images at the start of each iteration or not.
Can be slow for HDD.
part_index : int
Partition index.
num_parts : int
Total number of partitions.
data_name : str
Data name for provided symbols.
label_name : str
Label name for provided symbols.
dtype : str
Label data type. Default: float32. Other options: int32, int64, float64
last_batch_handle : str, optional
How to handle the last batch.
This parameter can be 'pad'(default), 'discard' or 'roll_over'.
If 'pad', the last batch will be padded with data starting from the begining
If 'discard', the last batch will be discarded
If 'roll_over', the remaining elements will be rolled over to the next iteration
kwargs : ...
More arguments for creating augmenter. See mx.image.CreateAugmenter.
"""
def __init__(self, batch_size, data_shape, label_width=1,
path_imgrec=None, path_imglist=None, path_root=None, path_imgidx=None,
shuffle=False, part_index=0, num_parts=1, aug_list=None, imglist=None,
data_name='data', label_name='softmax_label', dtype='float32',
last_batch_handle='pad', **kwargs):
super(ImageIter, self).__init__()
assert path_imgrec or path_imglist or (isinstance(imglist, list))
assert dtype in ['int32', 'float32', 'int64', 'float64'], dtype + ' label not supported'
num_threads = os.environ.get('MXNET_CPU_WORKER_NTHREADS', 1)
logging.info('Using %s threads for decoding...', str(num_threads))
logging.info('Set enviroment variable MXNET_CPU_WORKER_NTHREADS to a'
' larger number to use more threads.')
class_name = self.__class__.__name__
if path_imgrec:
logging.info('%s: loading recordio %s...',
class_name, path_imgrec)
if path_imgidx:
self.imgrec = recordio.MXIndexedRecordIO(path_imgidx, path_imgrec, 'r') # pylint: disable=redefined-variable-type
self.imgidx = list(self.imgrec.keys)
else:
self.imgrec = recordio.MXRecordIO(path_imgrec, 'r') # pylint: disable=redefined-variable-type
self.imgidx = None
else:
self.imgrec = None
if path_imglist:
logging.info('%s: loading image list %s...', class_name, path_imglist)
with open(path_imglist) as fin:
imglist = {}
imgkeys = []
for line in iter(fin.readline, ''):
line = line.strip().split('\t')
label = nd.array(line[1:-1], dtype=dtype)
key = int(line[0])
imglist[key] = (label, line[-1])
imgkeys.append(key)
self.imglist = imglist
elif isinstance(imglist, list):
logging.info('%s: loading image list...', class_name)
result = {}
imgkeys = []
index = 1
for img in imglist:
key = str(index) # pylint: disable=redefined-variable-type
index += 1
if len(img) > 2:
label = nd.array(img[:-1], dtype=dtype)
elif isinstance(img[0], numeric_types):
label = nd.array([img[0]], dtype=dtype)
else:
label = nd.array(img[0], dtype=dtype)
result[key] = (label, img[-1])
imgkeys.append(str(key))
self.imglist = result
else:
self.imglist = None
self.path_root = path_root
self.check_data_shape(data_shape)
self.provide_data = [(data_name, (batch_size,) + data_shape)]
if label_width > 1:
self.provide_label = [(label_name, (batch_size, label_width))]
else:
self.provide_label = [(label_name, (batch_size,))]
self.batch_size = batch_size
self.data_shape = data_shape
self.label_width = label_width
self.shuffle = shuffle
if self.imgrec is None:
self.seq = imgkeys
elif shuffle or num_parts > 1:
assert self.imgidx is not None
self.seq = self.imgidx
else:
self.seq = None
if num_parts > 1:
assert part_index < num_parts
N = len(self.seq)
C = N // num_parts
self.seq = self.seq[part_index * C:(part_index + 1) * C]
if aug_list is None:
self.auglist = CreateAugmenter(data_shape, **kwargs)
else:
self.auglist = aug_list
self.cur = 0
self._allow_read = True
self.last_batch_handle = last_batch_handle
self.num_image = len(self.seq) if self.seq is not None else None
self._cache_data = None
self._cache_label = None
self._cache_idx = None
self.reset()
def reset(self):
"""Resets the iterator to the beginning of the data."""
if self.seq is not None and self.shuffle:
random.shuffle(self.seq)
if self.last_batch_handle != 'roll_over' or \
self._cache_data is None:
if self.imgrec is not None:
self.imgrec.reset()
self.cur = 0
if self._allow_read is False:
self._allow_read = True
def hard_reset(self):
"""Resets the iterator and ignore roll over data"""
if self.seq is not None and self.shuffle:
random.shuffle(self.seq)
if self.imgrec is not None:
self.imgrec.reset()
self.cur = 0
self._allow_read = True
self._cache_data = None
self._cache_label = None
self._cache_idx = None
def next_sample(self):
"""Helper function for reading in next sample."""
if self._allow_read is False:
raise StopIteration
if self.seq is not None:
if self.cur < self.num_image:
idx = self.seq[self.cur]
else:
if self.last_batch_handle != 'discard':
self.cur = 0
raise StopIteration
self.cur += 1
if self.imgrec is not None:
s = self.imgrec.read_idx(idx)
header, img = recordio.unpack(s)
if self.imglist is None:
return header.label, img
else:
return self.imglist[idx][0], img
else:
label, fname = self.imglist[idx]
return label, self.read_image(fname)
else:
s = self.imgrec.read()
if s is None:
if self.last_batch_handle != 'discard':
self.imgrec.reset()
raise StopIteration
header, img = recordio.unpack(s)
return header.label, img
def _batchify(self, batch_data, batch_label, start=0):
"""Helper function for batchifying data"""
i = start
batch_size = self.batch_size
try:
while i < batch_size:
label, s = self.next_sample()
data = self.imdecode(s)
try:
self.check_valid_image(data)
except RuntimeError as e:
logging.debug('Invalid image, skipping: %s', str(e))
continue
data = self.augmentation_transform(data)
assert i < batch_size, 'Batch size must be multiples of augmenter output length'
batch_data[i] = self.postprocess_data(data)
batch_label[i] = label
i += 1
except StopIteration:
if not i:
raise StopIteration
return i
def next(self):
"""Returns the next batch of data."""
batch_size = self.batch_size
c, h, w = self.data_shape
# if last batch data is rolled over
if self._cache_data is not None:
# check both the data and label have values
assert self._cache_label is not None, "_cache_label didn't have values"
assert self._cache_idx is not None, "_cache_idx didn't have values"
batch_data = self._cache_data
batch_label = self._cache_label
i = self._cache_idx
# clear the cache data
else:
batch_data = nd.empty((batch_size, c, h, w))
batch_label = nd.empty(self.provide_label[0][1])
i = self._batchify(batch_data, batch_label)
# calculate the padding
pad = batch_size - i
# handle padding for the last batch
if pad != 0:
if self.last_batch_handle == 'discard':
raise StopIteration
# if the option is 'roll_over', throw StopIteration and cache the data
elif self.last_batch_handle == 'roll_over' and \
self._cache_data is None:
self._cache_data = batch_data
self._cache_label = batch_label
self._cache_idx = i
raise StopIteration
else:
_ = self._batchify(batch_data, batch_label, i)
if self.last_batch_handle == 'pad':
self._allow_read = False
else:
self._cache_data = None
self._cache_label = None
self._cache_idx = None
return io.DataBatch([batch_data], [batch_label], pad=pad)
def check_data_shape(self, data_shape):
"""Checks if the input data shape is valid"""
if not len(data_shape) == 3:
raise ValueError('data_shape should have length 3, with dimensions CxHxW')
if not data_shape[0] == 3:
raise ValueError('This iterator expects inputs to have 3 channels.')
def check_valid_image(self, data):
"""Checks if the input data is valid"""
if len(data[0].shape) == 0:
raise RuntimeError('Data shape is wrong')
def imdecode(self, s):
"""Decodes a string or byte string to an NDArray.
See mx.img.imdecode for more details."""
def locate():
"""Locate the image file/index if decode fails."""
if self.seq is not None:
idx = self.seq[(self.cur % self.num_image) - 1]
else:
idx = (self.cur % self.num_image) - 1
if self.imglist is not None:
_, fname = self.imglist[idx]
msg = "filename: {}".format(fname)
else:
msg = "index: {}".format(idx)
return "Broken image " + msg
try:
img = imdecode(s)
except Exception as e:
raise RuntimeError("{}, {}".format(locate(), e))
return img
def read_image(self, fname):
"""Reads an input image `fname` and returns the decoded raw bytes.
Example usage:
----------
>>> dataIter.read_image('Face.jpg') # returns decoded raw bytes.
"""
with open(os.path.join(self.path_root, fname), 'rb') as fin:
img = fin.read()
return img
def augmentation_transform(self, data):
"""Transforms input data with specified augmentation."""
for aug in self.auglist:
data = aug(data)
return data
def postprocess_data(self, datum):
"""Final postprocessing step before image is loaded into the batch."""
return nd.transpose(datum, axes=(2, 0, 1))
|
[] |
[] |
[
"MXNET_CPU_WORKER_NTHREADS"
] |
[]
|
["MXNET_CPU_WORKER_NTHREADS"]
|
python
| 1 | 0 | |
bucket_test.go
|
package bbolt_test
import (
"bytes"
"encoding/binary"
"errors"
"fmt"
"log"
"math/rand"
"os"
"strconv"
"strings"
"testing"
"testing/quick"
bolt "github.com/prysmaticlabs/bbolt"
)
// Ensure that a bucket that gets a non-existent key returns nil.
func TestBucket_Get_NonExistent(t *testing.T) {
db := MustOpenDB()
defer db.MustClose()
if err := db.Update(func(tx *bolt.Tx) error {
b, err := tx.CreateBucket([]byte("widgets"))
if err != nil {
t.Fatal(err)
}
if v := b.Get([]byte("foo")); v != nil {
t.Fatal("expected nil value")
}
return nil
}); err != nil {
t.Fatal(err)
}
}
// Ensure that a bucket can read a value that is not flushed yet.
func TestBucket_Get_FromNode(t *testing.T) {
db := MustOpenDB()
defer db.MustClose()
if err := db.Update(func(tx *bolt.Tx) error {
b, err := tx.CreateBucket([]byte("widgets"))
if err != nil {
t.Fatal(err)
}
if err := b.Put([]byte("foo"), []byte("bar")); err != nil {
t.Fatal(err)
}
if v := b.Get([]byte("foo")); !bytes.Equal(v, []byte("bar")) {
t.Fatalf("unexpected value: %v", v)
}
return nil
}); err != nil {
t.Fatal(err)
}
}
// Ensure that a bucket retrieved via Get() returns a nil.
func TestBucket_Get_IncompatibleValue(t *testing.T) {
db := MustOpenDB()
defer db.MustClose()
if err := db.Update(func(tx *bolt.Tx) error {
_, err := tx.CreateBucket([]byte("widgets"))
if err != nil {
t.Fatal(err)
}
if _, err := tx.Bucket([]byte("widgets")).CreateBucket([]byte("foo")); err != nil {
t.Fatal(err)
}
if tx.Bucket([]byte("widgets")).Get([]byte("foo")) != nil {
t.Fatal("expected nil value")
}
return nil
}); err != nil {
t.Fatal(err)
}
}
// Ensure that a slice returned from a bucket has a capacity equal to its length.
// This also allows slices to be appended to since it will require a realloc by Go.
//
// https://github.com/boltdb/bolt/issues/544
func TestBucket_Get_Capacity(t *testing.T) {
db := MustOpenDB()
defer db.MustClose()
// Write key to a bucket.
if err := db.Update(func(tx *bolt.Tx) error {
b, err := tx.CreateBucket([]byte("bucket"))
if err != nil {
return err
}
return b.Put([]byte("key"), []byte("val"))
}); err != nil {
t.Fatal(err)
}
// Retrieve value and attempt to append to it.
if err := db.Update(func(tx *bolt.Tx) error {
k, v := tx.Bucket([]byte("bucket")).Cursor().First()
// Verify capacity.
if len(k) != cap(k) {
t.Fatalf("unexpected key slice capacity: %d", cap(k))
} else if len(v) != cap(v) {
t.Fatalf("unexpected value slice capacity: %d", cap(v))
}
// Ensure slice can be appended to without a segfault.
k = append(k, []byte("123")...)
v = append(v, []byte("123")...)
_, _ = k, v // to pass ineffassign
return nil
}); err != nil {
t.Fatal(err)
}
}
// Ensure that a bucket can write a key/value.
func TestBucket_Put(t *testing.T) {
db := MustOpenDB()
defer db.MustClose()
if err := db.Update(func(tx *bolt.Tx) error {
b, err := tx.CreateBucket([]byte("widgets"))
if err != nil {
t.Fatal(err)
}
if err := b.Put([]byte("foo"), []byte("bar")); err != nil {
t.Fatal(err)
}
v := tx.Bucket([]byte("widgets")).Get([]byte("foo"))
if !bytes.Equal([]byte("bar"), v) {
t.Fatalf("unexpected value: %v", v)
}
return nil
}); err != nil {
t.Fatal(err)
}
}
// Ensure that a bucket can rewrite a key in the same transaction.
func TestBucket_Put_Repeat(t *testing.T) {
db := MustOpenDB()
defer db.MustClose()
if err := db.Update(func(tx *bolt.Tx) error {
b, err := tx.CreateBucket([]byte("widgets"))
if err != nil {
t.Fatal(err)
}
if err := b.Put([]byte("foo"), []byte("bar")); err != nil {
t.Fatal(err)
}
if err := b.Put([]byte("foo"), []byte("baz")); err != nil {
t.Fatal(err)
}
value := tx.Bucket([]byte("widgets")).Get([]byte("foo"))
if !bytes.Equal([]byte("baz"), value) {
t.Fatalf("unexpected value: %v", value)
}
return nil
}); err != nil {
t.Fatal(err)
}
}
// Ensure that a bucket can write a bunch of large values.
func TestBucket_Put_Large(t *testing.T) {
db := MustOpenDB()
defer db.MustClose()
count, factor := 100, 200
if err := db.Update(func(tx *bolt.Tx) error {
b, err := tx.CreateBucket([]byte("widgets"))
if err != nil {
t.Fatal(err)
}
for i := 1; i < count; i++ {
if err := b.Put([]byte(strings.Repeat("0", i*factor)), []byte(strings.Repeat("X", (count-i)*factor))); err != nil {
t.Fatal(err)
}
}
return nil
}); err != nil {
t.Fatal(err)
}
if err := db.View(func(tx *bolt.Tx) error {
b := tx.Bucket([]byte("widgets"))
for i := 1; i < count; i++ {
value := b.Get([]byte(strings.Repeat("0", i*factor)))
if !bytes.Equal(value, []byte(strings.Repeat("X", (count-i)*factor))) {
t.Fatalf("unexpected value: %v", value)
}
}
return nil
}); err != nil {
t.Fatal(err)
}
}
// Ensure that a database can perform multiple large appends safely.
func TestDB_Put_VeryLarge(t *testing.T) {
if testing.Short() {
t.Skip("skipping test in short mode.")
}
n, batchN := 400000, 200000
ksize, vsize := 8, 500
db := MustOpenDB()
defer db.MustClose()
for i := 0; i < n; i += batchN {
if err := db.Update(func(tx *bolt.Tx) error {
b, err := tx.CreateBucketIfNotExists([]byte("widgets"))
if err != nil {
t.Fatal(err)
}
for j := 0; j < batchN; j++ {
k, v := make([]byte, ksize), make([]byte, vsize)
binary.BigEndian.PutUint32(k, uint32(i+j))
if err := b.Put(k, v); err != nil {
t.Fatal(err)
}
}
return nil
}); err != nil {
t.Fatal(err)
}
}
}
// Ensure that a setting a value on a key with a bucket value returns an error.
func TestBucket_Put_IncompatibleValue(t *testing.T) {
db := MustOpenDB()
defer db.MustClose()
if err := db.Update(func(tx *bolt.Tx) error {
b0, err := tx.CreateBucket([]byte("widgets"))
if err != nil {
t.Fatal(err)
}
if _, err := tx.Bucket([]byte("widgets")).CreateBucket([]byte("foo")); err != nil {
t.Fatal(err)
}
if err := b0.Put([]byte("foo"), []byte("bar")); err != bolt.ErrIncompatibleValue {
t.Fatalf("unexpected error: %s", err)
}
return nil
}); err != nil {
t.Fatal(err)
}
}
// Ensure that a setting a value while the transaction is closed returns an error.
func TestBucket_Put_Closed(t *testing.T) {
db := MustOpenDB()
defer db.MustClose()
tx, err := db.Begin(true)
if err != nil {
t.Fatal(err)
}
b, err := tx.CreateBucket([]byte("widgets"))
if err != nil {
t.Fatal(err)
}
if err := tx.Rollback(); err != nil {
t.Fatal(err)
}
if err := b.Put([]byte("foo"), []byte("bar")); err != bolt.ErrTxClosed {
t.Fatalf("unexpected error: %s", err)
}
}
// Ensure that setting a value on a read-only bucket returns an error.
func TestBucket_Put_ReadOnly(t *testing.T) {
db := MustOpenDB()
defer db.MustClose()
if err := db.Update(func(tx *bolt.Tx) error {
if _, err := tx.CreateBucket([]byte("widgets")); err != nil {
t.Fatal(err)
}
return nil
}); err != nil {
t.Fatal(err)
}
if err := db.View(func(tx *bolt.Tx) error {
b := tx.Bucket([]byte("widgets"))
if err := b.Put([]byte("foo"), []byte("bar")); err != bolt.ErrTxNotWritable {
t.Fatalf("unexpected error: %s", err)
}
return nil
}); err != nil {
t.Fatal(err)
}
}
// Ensure that a bucket can delete an existing key.
func TestBucket_Delete(t *testing.T) {
db := MustOpenDB()
defer db.MustClose()
if err := db.Update(func(tx *bolt.Tx) error {
b, err := tx.CreateBucket([]byte("widgets"))
if err != nil {
t.Fatal(err)
}
if err := b.Put([]byte("foo"), []byte("bar")); err != nil {
t.Fatal(err)
}
if err := b.Delete([]byte("foo")); err != nil {
t.Fatal(err)
}
if v := b.Get([]byte("foo")); v != nil {
t.Fatalf("unexpected value: %v", v)
}
return nil
}); err != nil {
t.Fatal(err)
}
}
// Ensure that deleting a large set of keys will work correctly.
func TestBucket_Delete_Large(t *testing.T) {
db := MustOpenDB()
defer db.MustClose()
if err := db.Update(func(tx *bolt.Tx) error {
b, err := tx.CreateBucket([]byte("widgets"))
if err != nil {
t.Fatal(err)
}
for i := 0; i < 100; i++ {
if err := b.Put([]byte(strconv.Itoa(i)), []byte(strings.Repeat("*", 1024))); err != nil {
t.Fatal(err)
}
}
return nil
}); err != nil {
t.Fatal(err)
}
if err := db.Update(func(tx *bolt.Tx) error {
b := tx.Bucket([]byte("widgets"))
for i := 0; i < 100; i++ {
if err := b.Delete([]byte(strconv.Itoa(i))); err != nil {
t.Fatal(err)
}
}
return nil
}); err != nil {
t.Fatal(err)
}
if err := db.View(func(tx *bolt.Tx) error {
b := tx.Bucket([]byte("widgets"))
for i := 0; i < 100; i++ {
if v := b.Get([]byte(strconv.Itoa(i))); v != nil {
t.Fatalf("unexpected value: %v, i=%d", v, i)
}
}
return nil
}); err != nil {
t.Fatal(err)
}
}
// Deleting a very large list of keys will cause the freelist to use overflow.
func TestBucket_Delete_FreelistOverflow(t *testing.T) {
if testing.Short() {
t.Skip("skipping test in short mode.")
}
db := MustOpenDB()
defer db.MustClose()
k := make([]byte, 16)
for i := uint64(0); i < 10000; i++ {
if err := db.Update(func(tx *bolt.Tx) error {
b, err := tx.CreateBucketIfNotExists([]byte("0"))
if err != nil {
t.Fatalf("bucket error: %s", err)
}
for j := uint64(0); j < 1000; j++ {
binary.BigEndian.PutUint64(k[:8], i)
binary.BigEndian.PutUint64(k[8:], j)
if err := b.Put(k, nil); err != nil {
t.Fatalf("put error: %s", err)
}
}
return nil
}); err != nil {
t.Fatal(err)
}
}
// Delete all of them in one large transaction
if err := db.Update(func(tx *bolt.Tx) error {
b := tx.Bucket([]byte("0"))
c := b.Cursor()
for k, _ := c.First(); k != nil; k, _ = c.Next() {
if err := c.Delete(); err != nil {
t.Fatal(err)
}
}
return nil
}); err != nil {
t.Fatal(err)
}
// Check more than an overflow's worth of pages are freed.
stats := db.Stats()
freePages := stats.FreePageN + stats.PendingPageN
if freePages <= 0xFFFF {
t.Fatalf("expected more than 0xFFFF free pages, got %v", freePages)
}
// Free page count should be preserved on reopen.
if err := db.DB.Close(); err != nil {
t.Fatal(err)
}
db.MustReopen()
if reopenFreePages := db.Stats().FreePageN; freePages != reopenFreePages {
t.Fatalf("expected %d free pages, got %+v", freePages, db.Stats())
}
}
// Ensure that deleting of non-existing key is a no-op.
func TestBucket_Delete_NonExisting(t *testing.T) {
db := MustOpenDB()
defer db.MustClose()
if err := db.Update(func(tx *bolt.Tx) error {
b, err := tx.CreateBucket([]byte("widgets"))
if err != nil {
t.Fatal(err)
}
if _, err = b.CreateBucket([]byte("nested")); err != nil {
t.Fatal(err)
}
return nil
}); err != nil {
t.Fatal(err)
}
if err := db.Update(func(tx *bolt.Tx) error {
b := tx.Bucket([]byte("widgets"))
if err := b.Delete([]byte("foo")); err != nil {
t.Fatal(err)
}
if b.Bucket([]byte("nested")) == nil {
t.Fatal("nested bucket has been deleted")
}
return nil
}); err != nil {
t.Fatal(err)
}
}
// Ensure that accessing and updating nested buckets is ok across transactions.
func TestBucket_Nested(t *testing.T) {
db := MustOpenDB()
defer db.MustClose()
if err := db.Update(func(tx *bolt.Tx) error {
// Create a widgets bucket.
b, err := tx.CreateBucket([]byte("widgets"))
if err != nil {
t.Fatal(err)
}
// Create a widgets/foo bucket.
_, err = b.CreateBucket([]byte("foo"))
if err != nil {
t.Fatal(err)
}
// Create a widgets/bar key.
if err := b.Put([]byte("bar"), []byte("0000")); err != nil {
t.Fatal(err)
}
return nil
}); err != nil {
t.Fatal(err)
}
db.MustCheck()
// Update widgets/bar.
if err := db.Update(func(tx *bolt.Tx) error {
b := tx.Bucket([]byte("widgets"))
if err := b.Put([]byte("bar"), []byte("xxxx")); err != nil {
t.Fatal(err)
}
return nil
}); err != nil {
t.Fatal(err)
}
db.MustCheck()
// Cause a split.
if err := db.Update(func(tx *bolt.Tx) error {
var b = tx.Bucket([]byte("widgets"))
for i := 0; i < 10000; i++ {
if err := b.Put([]byte(strconv.Itoa(i)), []byte(strconv.Itoa(i))); err != nil {
t.Fatal(err)
}
}
return nil
}); err != nil {
t.Fatal(err)
}
db.MustCheck()
// Insert into widgets/foo/baz.
if err := db.Update(func(tx *bolt.Tx) error {
var b = tx.Bucket([]byte("widgets"))
if err := b.Bucket([]byte("foo")).Put([]byte("baz"), []byte("yyyy")); err != nil {
t.Fatal(err)
}
return nil
}); err != nil {
t.Fatal(err)
}
db.MustCheck()
// Verify.
if err := db.View(func(tx *bolt.Tx) error {
var b = tx.Bucket([]byte("widgets"))
if v := b.Bucket([]byte("foo")).Get([]byte("baz")); !bytes.Equal(v, []byte("yyyy")) {
t.Fatalf("unexpected value: %v", v)
}
if v := b.Get([]byte("bar")); !bytes.Equal(v, []byte("xxxx")) {
t.Fatalf("unexpected value: %v", v)
}
for i := 0; i < 10000; i++ {
if v := b.Get([]byte(strconv.Itoa(i))); !bytes.Equal(v, []byte(strconv.Itoa(i))) {
t.Fatalf("unexpected value: %v", v)
}
}
return nil
}); err != nil {
t.Fatal(err)
}
}
// Ensure that deleting a bucket using Delete() returns an error.
func TestBucket_Delete_Bucket(t *testing.T) {
db := MustOpenDB()
defer db.MustClose()
if err := db.Update(func(tx *bolt.Tx) error {
b, err := tx.CreateBucket([]byte("widgets"))
if err != nil {
t.Fatal(err)
}
if _, err := b.CreateBucket([]byte("foo")); err != nil {
t.Fatal(err)
}
if err := b.Delete([]byte("foo")); err != bolt.ErrIncompatibleValue {
t.Fatalf("unexpected error: %s", err)
}
return nil
}); err != nil {
t.Fatal(err)
}
}
// Ensure that deleting a key on a read-only bucket returns an error.
func TestBucket_Delete_ReadOnly(t *testing.T) {
db := MustOpenDB()
defer db.MustClose()
if err := db.Update(func(tx *bolt.Tx) error {
if _, err := tx.CreateBucket([]byte("widgets")); err != nil {
t.Fatal(err)
}
return nil
}); err != nil {
t.Fatal(err)
}
if err := db.View(func(tx *bolt.Tx) error {
if err := tx.Bucket([]byte("widgets")).Delete([]byte("foo")); err != bolt.ErrTxNotWritable {
t.Fatalf("unexpected error: %s", err)
}
return nil
}); err != nil {
t.Fatal(err)
}
}
// Ensure that a deleting value while the transaction is closed returns an error.
func TestBucket_Delete_Closed(t *testing.T) {
db := MustOpenDB()
defer db.MustClose()
tx, err := db.Begin(true)
if err != nil {
t.Fatal(err)
}
b, err := tx.CreateBucket([]byte("widgets"))
if err != nil {
t.Fatal(err)
}
if err := tx.Rollback(); err != nil {
t.Fatal(err)
}
if err := b.Delete([]byte("foo")); err != bolt.ErrTxClosed {
t.Fatalf("unexpected error: %s", err)
}
}
// Ensure that deleting a bucket causes nested buckets to be deleted.
func TestBucket_DeleteBucket_Nested(t *testing.T) {
db := MustOpenDB()
defer db.MustClose()
if err := db.Update(func(tx *bolt.Tx) error {
widgets, err := tx.CreateBucket([]byte("widgets"))
if err != nil {
t.Fatal(err)
}
foo, err := widgets.CreateBucket([]byte("foo"))
if err != nil {
t.Fatal(err)
}
bar, err := foo.CreateBucket([]byte("bar"))
if err != nil {
t.Fatal(err)
}
if err := bar.Put([]byte("baz"), []byte("bat")); err != nil {
t.Fatal(err)
}
if err := tx.Bucket([]byte("widgets")).DeleteBucket([]byte("foo")); err != nil {
t.Fatal(err)
}
return nil
}); err != nil {
t.Fatal(err)
}
}
// Ensure that deleting a bucket causes nested buckets to be deleted after they have been committed.
func TestBucket_DeleteBucket_Nested2(t *testing.T) {
db := MustOpenDB()
defer db.MustClose()
if err := db.Update(func(tx *bolt.Tx) error {
widgets, err := tx.CreateBucket([]byte("widgets"))
if err != nil {
t.Fatal(err)
}
foo, err := widgets.CreateBucket([]byte("foo"))
if err != nil {
t.Fatal(err)
}
bar, err := foo.CreateBucket([]byte("bar"))
if err != nil {
t.Fatal(err)
}
if err := bar.Put([]byte("baz"), []byte("bat")); err != nil {
t.Fatal(err)
}
return nil
}); err != nil {
t.Fatal(err)
}
if err := db.Update(func(tx *bolt.Tx) error {
widgets := tx.Bucket([]byte("widgets"))
if widgets == nil {
t.Fatal("expected widgets bucket")
}
foo := widgets.Bucket([]byte("foo"))
if foo == nil {
t.Fatal("expected foo bucket")
}
bar := foo.Bucket([]byte("bar"))
if bar == nil {
t.Fatal("expected bar bucket")
}
if v := bar.Get([]byte("baz")); !bytes.Equal(v, []byte("bat")) {
t.Fatalf("unexpected value: %v", v)
}
if err := tx.DeleteBucket([]byte("widgets")); err != nil {
t.Fatal(err)
}
return nil
}); err != nil {
t.Fatal(err)
}
if err := db.View(func(tx *bolt.Tx) error {
if tx.Bucket([]byte("widgets")) != nil {
t.Fatal("expected bucket to be deleted")
}
return nil
}); err != nil {
t.Fatal(err)
}
}
// Ensure that deleting a child bucket with multiple pages causes all pages to get collected.
// NOTE: Consistency check in bolt_test.DB.Close() will panic if pages not freed properly.
func TestBucket_DeleteBucket_Large(t *testing.T) {
db := MustOpenDB()
defer db.MustClose()
if err := db.Update(func(tx *bolt.Tx) error {
widgets, err := tx.CreateBucket([]byte("widgets"))
if err != nil {
t.Fatal(err)
}
foo, err := widgets.CreateBucket([]byte("foo"))
if err != nil {
t.Fatal(err)
}
for i := 0; i < 1000; i++ {
if err := foo.Put([]byte(fmt.Sprintf("%d", i)), []byte(fmt.Sprintf("%0100d", i))); err != nil {
t.Fatal(err)
}
}
return nil
}); err != nil {
t.Fatal(err)
}
if err := db.Update(func(tx *bolt.Tx) error {
if err := tx.DeleteBucket([]byte("widgets")); err != nil {
t.Fatal(err)
}
return nil
}); err != nil {
t.Fatal(err)
}
}
// Ensure that a simple value retrieved via Bucket() returns a nil.
func TestBucket_Bucket_IncompatibleValue(t *testing.T) {
db := MustOpenDB()
defer db.MustClose()
if err := db.Update(func(tx *bolt.Tx) error {
widgets, err := tx.CreateBucket([]byte("widgets"))
if err != nil {
t.Fatal(err)
}
if err := widgets.Put([]byte("foo"), []byte("bar")); err != nil {
t.Fatal(err)
}
if b := tx.Bucket([]byte("widgets")).Bucket([]byte("foo")); b != nil {
t.Fatal("expected nil bucket")
}
return nil
}); err != nil {
t.Fatal(err)
}
}
// Ensure that creating a bucket on an existing non-bucket key returns an error.
func TestBucket_CreateBucket_IncompatibleValue(t *testing.T) {
db := MustOpenDB()
defer db.MustClose()
if err := db.Update(func(tx *bolt.Tx) error {
widgets, err := tx.CreateBucket([]byte("widgets"))
if err != nil {
t.Fatal(err)
}
if err := widgets.Put([]byte("foo"), []byte("bar")); err != nil {
t.Fatal(err)
}
if _, err := widgets.CreateBucket([]byte("foo")); err != bolt.ErrIncompatibleValue {
t.Fatalf("unexpected error: %s", err)
}
return nil
}); err != nil {
t.Fatal(err)
}
}
// Ensure that deleting a bucket on an existing non-bucket key returns an error.
func TestBucket_DeleteBucket_IncompatibleValue(t *testing.T) {
db := MustOpenDB()
defer db.MustClose()
if err := db.Update(func(tx *bolt.Tx) error {
widgets, err := tx.CreateBucket([]byte("widgets"))
if err != nil {
t.Fatal(err)
}
if err := widgets.Put([]byte("foo"), []byte("bar")); err != nil {
t.Fatal(err)
}
if err := tx.Bucket([]byte("widgets")).DeleteBucket([]byte("foo")); err != bolt.ErrIncompatibleValue {
t.Fatalf("unexpected error: %s", err)
}
return nil
}); err != nil {
t.Fatal(err)
}
}
// Ensure bucket can set and update its sequence number.
func TestBucket_Sequence(t *testing.T) {
db := MustOpenDB()
defer db.MustClose()
if err := db.Update(func(tx *bolt.Tx) error {
bkt, err := tx.CreateBucket([]byte("0"))
if err != nil {
t.Fatal(err)
}
// Retrieve sequence.
if v := bkt.Sequence(); v != 0 {
t.Fatalf("unexpected sequence: %d", v)
}
// Update sequence.
if err := bkt.SetSequence(1000); err != nil {
t.Fatal(err)
}
// Read sequence again.
if v := bkt.Sequence(); v != 1000 {
t.Fatalf("unexpected sequence: %d", v)
}
return nil
}); err != nil {
t.Fatal(err)
}
// Verify sequence in separate transaction.
if err := db.View(func(tx *bolt.Tx) error {
if v := tx.Bucket([]byte("0")).Sequence(); v != 1000 {
t.Fatalf("unexpected sequence: %d", v)
}
return nil
}); err != nil {
t.Fatal(err)
}
}
// Ensure that a bucket can return an autoincrementing sequence.
func TestBucket_NextSequence(t *testing.T) {
db := MustOpenDB()
defer db.MustClose()
if err := db.Update(func(tx *bolt.Tx) error {
widgets, err := tx.CreateBucket([]byte("widgets"))
if err != nil {
t.Fatal(err)
}
woojits, err := tx.CreateBucket([]byte("woojits"))
if err != nil {
t.Fatal(err)
}
// Make sure sequence increments.
if seq, err := widgets.NextSequence(); err != nil {
t.Fatal(err)
} else if seq != 1 {
t.Fatalf("unexpecte sequence: %d", seq)
}
if seq, err := widgets.NextSequence(); err != nil {
t.Fatal(err)
} else if seq != 2 {
t.Fatalf("unexpected sequence: %d", seq)
}
// Buckets should be separate.
if seq, err := woojits.NextSequence(); err != nil {
t.Fatal(err)
} else if seq != 1 {
t.Fatalf("unexpected sequence: %d", 1)
}
return nil
}); err != nil {
t.Fatal(err)
}
}
// Ensure that a bucket will persist an autoincrementing sequence even if its
// the only thing updated on the bucket.
// https://github.com/boltdb/bolt/issues/296
func TestBucket_NextSequence_Persist(t *testing.T) {
db := MustOpenDB()
defer db.MustClose()
if err := db.Update(func(tx *bolt.Tx) error {
if _, err := tx.CreateBucket([]byte("widgets")); err != nil {
t.Fatal(err)
}
return nil
}); err != nil {
t.Fatal(err)
}
if err := db.Update(func(tx *bolt.Tx) error {
if _, err := tx.Bucket([]byte("widgets")).NextSequence(); err != nil {
t.Fatal(err)
}
return nil
}); err != nil {
t.Fatal(err)
}
if err := db.Update(func(tx *bolt.Tx) error {
seq, err := tx.Bucket([]byte("widgets")).NextSequence()
if err != nil {
t.Fatalf("unexpected error: %s", err)
} else if seq != 2 {
t.Fatalf("unexpected sequence: %d", seq)
}
return nil
}); err != nil {
t.Fatal(err)
}
}
// Ensure that retrieving the next sequence on a read-only bucket returns an error.
func TestBucket_NextSequence_ReadOnly(t *testing.T) {
db := MustOpenDB()
defer db.MustClose()
if err := db.Update(func(tx *bolt.Tx) error {
if _, err := tx.CreateBucket([]byte("widgets")); err != nil {
t.Fatal(err)
}
return nil
}); err != nil {
t.Fatal(err)
}
if err := db.View(func(tx *bolt.Tx) error {
_, err := tx.Bucket([]byte("widgets")).NextSequence()
if err != bolt.ErrTxNotWritable {
t.Fatalf("unexpected error: %s", err)
}
return nil
}); err != nil {
t.Fatal(err)
}
}
// Ensure that retrieving the next sequence for a bucket on a closed database return an error.
func TestBucket_NextSequence_Closed(t *testing.T) {
db := MustOpenDB()
defer db.MustClose()
tx, err := db.Begin(true)
if err != nil {
t.Fatal(err)
}
b, err := tx.CreateBucket([]byte("widgets"))
if err != nil {
t.Fatal(err)
}
if err := tx.Rollback(); err != nil {
t.Fatal(err)
}
if _, err := b.NextSequence(); err != bolt.ErrTxClosed {
t.Fatal(err)
}
}
// Ensure a user can loop over all key/value pairs in a bucket.
func TestBucket_ForEach(t *testing.T) {
db := MustOpenDB()
defer db.MustClose()
if err := db.Update(func(tx *bolt.Tx) error {
b, err := tx.CreateBucket([]byte("widgets"))
if err != nil {
t.Fatal(err)
}
if err := b.Put([]byte("foo"), []byte("0000")); err != nil {
t.Fatal(err)
}
if err := b.Put([]byte("baz"), []byte("0001")); err != nil {
t.Fatal(err)
}
if err := b.Put([]byte("bar"), []byte("0002")); err != nil {
t.Fatal(err)
}
var index int
if err := b.ForEach(func(k, v []byte) error {
switch index {
case 0:
if !bytes.Equal(k, []byte("bar")) {
t.Fatalf("unexpected key: %v", k)
} else if !bytes.Equal(v, []byte("0002")) {
t.Fatalf("unexpected value: %v", v)
}
case 1:
if !bytes.Equal(k, []byte("baz")) {
t.Fatalf("unexpected key: %v", k)
} else if !bytes.Equal(v, []byte("0001")) {
t.Fatalf("unexpected value: %v", v)
}
case 2:
if !bytes.Equal(k, []byte("foo")) {
t.Fatalf("unexpected key: %v", k)
} else if !bytes.Equal(v, []byte("0000")) {
t.Fatalf("unexpected value: %v", v)
}
}
index++
return nil
}); err != nil {
t.Fatal(err)
}
if index != 3 {
t.Fatalf("unexpected index: %d", index)
}
return nil
}); err != nil {
t.Fatal(err)
}
}
// Ensure a database can stop iteration early.
func TestBucket_ForEach_ShortCircuit(t *testing.T) {
db := MustOpenDB()
defer db.MustClose()
if err := db.Update(func(tx *bolt.Tx) error {
b, err := tx.CreateBucket([]byte("widgets"))
if err != nil {
t.Fatal(err)
}
if err := b.Put([]byte("bar"), []byte("0000")); err != nil {
t.Fatal(err)
}
if err := b.Put([]byte("baz"), []byte("0000")); err != nil {
t.Fatal(err)
}
if err := b.Put([]byte("foo"), []byte("0000")); err != nil {
t.Fatal(err)
}
var index int
if err := tx.Bucket([]byte("widgets")).ForEach(func(k, v []byte) error {
index++
if bytes.Equal(k, []byte("baz")) {
return errors.New("marker")
}
return nil
}); err == nil || err.Error() != "marker" {
t.Fatalf("unexpected error: %s", err)
}
if index != 2 {
t.Fatalf("unexpected index: %d", index)
}
return nil
}); err != nil {
t.Fatal(err)
}
}
// Ensure that looping over a bucket on a closed database returns an error.
func TestBucket_ForEach_Closed(t *testing.T) {
db := MustOpenDB()
defer db.MustClose()
tx, err := db.Begin(true)
if err != nil {
t.Fatal(err)
}
b, err := tx.CreateBucket([]byte("widgets"))
if err != nil {
t.Fatal(err)
}
if err := tx.Rollback(); err != nil {
t.Fatal(err)
}
if err := b.ForEach(func(k, v []byte) error { return nil }); err != bolt.ErrTxClosed {
t.Fatalf("unexpected error: %s", err)
}
}
// Ensure that an error is returned when inserting with an empty key.
func TestBucket_Put_EmptyKey(t *testing.T) {
db := MustOpenDB()
defer db.MustClose()
if err := db.Update(func(tx *bolt.Tx) error {
b, err := tx.CreateBucket([]byte("widgets"))
if err != nil {
t.Fatal(err)
}
if err := b.Put([]byte(""), []byte("bar")); err != bolt.ErrKeyRequired {
t.Fatalf("unexpected error: %s", err)
}
if err := b.Put(nil, []byte("bar")); err != bolt.ErrKeyRequired {
t.Fatalf("unexpected error: %s", err)
}
return nil
}); err != nil {
t.Fatal(err)
}
}
// Ensure that an error is returned when inserting with a key that's too large.
func TestBucket_Put_KeyTooLarge(t *testing.T) {
db := MustOpenDB()
defer db.MustClose()
if err := db.Update(func(tx *bolt.Tx) error {
b, err := tx.CreateBucket([]byte("widgets"))
if err != nil {
t.Fatal(err)
}
if err := b.Put(make([]byte, 32769), []byte("bar")); err != bolt.ErrKeyTooLarge {
t.Fatalf("unexpected error: %s", err)
}
return nil
}); err != nil {
t.Fatal(err)
}
}
// Ensure that an error is returned when inserting a value that's too large.
func TestBucket_Put_ValueTooLarge(t *testing.T) {
// Skip this test on DroneCI because the machine is resource constrained.
if os.Getenv("DRONE") == "true" {
t.Skip("not enough RAM for test")
}
db := MustOpenDB()
defer db.MustClose()
if err := db.Update(func(tx *bolt.Tx) error {
b, err := tx.CreateBucket([]byte("widgets"))
if err != nil {
t.Fatal(err)
}
if err := b.Put([]byte("foo"), make([]byte, bolt.MaxValueSize+1)); err != bolt.ErrValueTooLarge {
t.Fatalf("unexpected error: %s", err)
}
return nil
}); err != nil {
t.Fatal(err)
}
}
// Ensure a bucket can calculate stats.
func TestBucket_Stats(t *testing.T) {
db := MustOpenDB()
defer db.MustClose()
// Add bucket with fewer keys but one big value.
bigKey := []byte("really-big-value")
for i := 0; i < 500; i++ {
if err := db.Update(func(tx *bolt.Tx) error {
b, err := tx.CreateBucketIfNotExists([]byte("woojits"))
if err != nil {
t.Fatal(err)
}
if err := b.Put([]byte(fmt.Sprintf("%03d", i)), []byte(strconv.Itoa(i))); err != nil {
t.Fatal(err)
}
return nil
}); err != nil {
t.Fatal(err)
}
}
if err := db.Update(func(tx *bolt.Tx) error {
if err := tx.Bucket([]byte("woojits")).Put(bigKey, []byte(strings.Repeat("*", 10000))); err != nil {
t.Fatal(err)
}
return nil
}); err != nil {
t.Fatal(err)
}
db.MustCheck()
if err := db.View(func(tx *bolt.Tx) error {
stats := tx.Bucket([]byte("woojits")).Stats()
if stats.BranchPageN != 1 {
t.Fatalf("unexpected BranchPageN: %d", stats.BranchPageN)
} else if stats.BranchOverflowN != 0 {
t.Fatalf("unexpected BranchOverflowN: %d", stats.BranchOverflowN)
} else if stats.LeafPageN != 7 {
t.Fatalf("unexpected LeafPageN: %d", stats.LeafPageN)
} else if stats.LeafOverflowN != 2 {
t.Fatalf("unexpected LeafOverflowN: %d", stats.LeafOverflowN)
} else if stats.KeyN != 501 {
t.Fatalf("unexpected KeyN: %d", stats.KeyN)
} else if stats.Depth != 2 {
t.Fatalf("unexpected Depth: %d", stats.Depth)
}
branchInuse := 16 // branch page header
branchInuse += 7 * 16 // branch elements
branchInuse += 7 * 3 // branch keys (6 3-byte keys)
if stats.BranchInuse != branchInuse {
t.Fatalf("unexpected BranchInuse: %d", stats.BranchInuse)
}
leafInuse := 7 * 16 // leaf page header
leafInuse += 501 * 16 // leaf elements
leafInuse += 500*3 + len(bigKey) // leaf keys
leafInuse += 1*10 + 2*90 + 3*400 + 10000 // leaf values
if stats.LeafInuse != leafInuse {
t.Fatalf("unexpected LeafInuse: %d", stats.LeafInuse)
}
// Only check allocations for 4KB pages.
if db.Info().PageSize == 4096 {
if stats.BranchAlloc != 4096 {
t.Fatalf("unexpected BranchAlloc: %d", stats.BranchAlloc)
} else if stats.LeafAlloc != 36864 {
t.Fatalf("unexpected LeafAlloc: %d", stats.LeafAlloc)
}
}
if stats.BucketN != 1 {
t.Fatalf("unexpected BucketN: %d", stats.BucketN)
} else if stats.InlineBucketN != 0 {
t.Fatalf("unexpected InlineBucketN: %d", stats.InlineBucketN)
} else if stats.InlineBucketInuse != 0 {
t.Fatalf("unexpected InlineBucketInuse: %d", stats.InlineBucketInuse)
}
return nil
}); err != nil {
t.Fatal(err)
}
}
// Ensure a bucket with random insertion utilizes fill percentage correctly.
func TestBucket_Stats_RandomFill(t *testing.T) {
if testing.Short() {
t.Skip("skipping test in short mode.")
} else if os.Getpagesize() != 4096 {
t.Skip("invalid page size for test")
}
db := MustOpenDB()
defer db.MustClose()
// Add a set of values in random order. It will be the same random
// order so we can maintain consistency between test runs.
var count int
rand := rand.New(rand.NewSource(42))
for _, i := range rand.Perm(1000) {
if err := db.Update(func(tx *bolt.Tx) error {
b, err := tx.CreateBucketIfNotExists([]byte("woojits"))
if err != nil {
t.Fatal(err)
}
b.FillPercent = 0.9
for _, j := range rand.Perm(100) {
index := (j * 10000) + i
if err := b.Put([]byte(fmt.Sprintf("%d000000000000000", index)), []byte("0000000000")); err != nil {
t.Fatal(err)
}
count++
}
return nil
}); err != nil {
t.Fatal(err)
}
}
db.MustCheck()
if err := db.View(func(tx *bolt.Tx) error {
stats := tx.Bucket([]byte("woojits")).Stats()
if stats.KeyN != 100000 {
t.Fatalf("unexpected KeyN: %d", stats.KeyN)
}
if stats.BranchPageN != 98 {
t.Fatalf("unexpected BranchPageN: %d", stats.BranchPageN)
} else if stats.BranchOverflowN != 0 {
t.Fatalf("unexpected BranchOverflowN: %d", stats.BranchOverflowN)
} else if stats.BranchInuse != 130984 {
t.Fatalf("unexpected BranchInuse: %d", stats.BranchInuse)
} else if stats.BranchAlloc != 401408 {
t.Fatalf("unexpected BranchAlloc: %d", stats.BranchAlloc)
}
if stats.LeafPageN != 3412 {
t.Fatalf("unexpected LeafPageN: %d", stats.LeafPageN)
} else if stats.LeafOverflowN != 0 {
t.Fatalf("unexpected LeafOverflowN: %d", stats.LeafOverflowN)
} else if stats.LeafInuse != 4742482 {
t.Fatalf("unexpected LeafInuse: %d", stats.LeafInuse)
} else if stats.LeafAlloc != 13975552 {
t.Fatalf("unexpected LeafAlloc: %d", stats.LeafAlloc)
}
return nil
}); err != nil {
t.Fatal(err)
}
}
// Ensure a bucket can calculate stats.
func TestBucket_Stats_Small(t *testing.T) {
db := MustOpenDB()
defer db.MustClose()
if err := db.Update(func(tx *bolt.Tx) error {
// Add a bucket that fits on a single root leaf.
b, err := tx.CreateBucket([]byte("whozawhats"))
if err != nil {
t.Fatal(err)
}
if err := b.Put([]byte("foo"), []byte("bar")); err != nil {
t.Fatal(err)
}
return nil
}); err != nil {
t.Fatal(err)
}
db.MustCheck()
if err := db.View(func(tx *bolt.Tx) error {
b := tx.Bucket([]byte("whozawhats"))
stats := b.Stats()
if stats.BranchPageN != 0 {
t.Fatalf("unexpected BranchPageN: %d", stats.BranchPageN)
} else if stats.BranchOverflowN != 0 {
t.Fatalf("unexpected BranchOverflowN: %d", stats.BranchOverflowN)
} else if stats.LeafPageN != 0 {
t.Fatalf("unexpected LeafPageN: %d", stats.LeafPageN)
} else if stats.LeafOverflowN != 0 {
t.Fatalf("unexpected LeafOverflowN: %d", stats.LeafOverflowN)
} else if stats.KeyN != 1 {
t.Fatalf("unexpected KeyN: %d", stats.KeyN)
} else if stats.Depth != 1 {
t.Fatalf("unexpected Depth: %d", stats.Depth)
} else if stats.BranchInuse != 0 {
t.Fatalf("unexpected BranchInuse: %d", stats.BranchInuse)
} else if stats.LeafInuse != 0 {
t.Fatalf("unexpected LeafInuse: %d", stats.LeafInuse)
}
if db.Info().PageSize == 4096 {
if stats.BranchAlloc != 0 {
t.Fatalf("unexpected BranchAlloc: %d", stats.BranchAlloc)
} else if stats.LeafAlloc != 0 {
t.Fatalf("unexpected LeafAlloc: %d", stats.LeafAlloc)
}
}
if stats.BucketN != 1 {
t.Fatalf("unexpected BucketN: %d", stats.BucketN)
} else if stats.InlineBucketN != 1 {
t.Fatalf("unexpected InlineBucketN: %d", stats.InlineBucketN)
} else if stats.InlineBucketInuse != 16+16+6 {
t.Fatalf("unexpected InlineBucketInuse: %d", stats.InlineBucketInuse)
}
return nil
}); err != nil {
t.Fatal(err)
}
}
func TestBucket_Stats_EmptyBucket(t *testing.T) {
db := MustOpenDB()
defer db.MustClose()
if err := db.Update(func(tx *bolt.Tx) error {
// Add a bucket that fits on a single root leaf.
if _, err := tx.CreateBucket([]byte("whozawhats")); err != nil {
t.Fatal(err)
}
return nil
}); err != nil {
t.Fatal(err)
}
db.MustCheck()
if err := db.View(func(tx *bolt.Tx) error {
b := tx.Bucket([]byte("whozawhats"))
stats := b.Stats()
if stats.BranchPageN != 0 {
t.Fatalf("unexpected BranchPageN: %d", stats.BranchPageN)
} else if stats.BranchOverflowN != 0 {
t.Fatalf("unexpected BranchOverflowN: %d", stats.BranchOverflowN)
} else if stats.LeafPageN != 0 {
t.Fatalf("unexpected LeafPageN: %d", stats.LeafPageN)
} else if stats.LeafOverflowN != 0 {
t.Fatalf("unexpected LeafOverflowN: %d", stats.LeafOverflowN)
} else if stats.KeyN != 0 {
t.Fatalf("unexpected KeyN: %d", stats.KeyN)
} else if stats.Depth != 1 {
t.Fatalf("unexpected Depth: %d", stats.Depth)
} else if stats.BranchInuse != 0 {
t.Fatalf("unexpected BranchInuse: %d", stats.BranchInuse)
} else if stats.LeafInuse != 0 {
t.Fatalf("unexpected LeafInuse: %d", stats.LeafInuse)
}
if db.Info().PageSize == 4096 {
if stats.BranchAlloc != 0 {
t.Fatalf("unexpected BranchAlloc: %d", stats.BranchAlloc)
} else if stats.LeafAlloc != 0 {
t.Fatalf("unexpected LeafAlloc: %d", stats.LeafAlloc)
}
}
if stats.BucketN != 1 {
t.Fatalf("unexpected BucketN: %d", stats.BucketN)
} else if stats.InlineBucketN != 1 {
t.Fatalf("unexpected InlineBucketN: %d", stats.InlineBucketN)
} else if stats.InlineBucketInuse != 16 {
t.Fatalf("unexpected InlineBucketInuse: %d", stats.InlineBucketInuse)
}
return nil
}); err != nil {
t.Fatal(err)
}
}
// Ensure a bucket can calculate stats.
func TestBucket_Stats_Nested(t *testing.T) {
db := MustOpenDB()
defer db.MustClose()
if err := db.Update(func(tx *bolt.Tx) error {
b, err := tx.CreateBucket([]byte("foo"))
if err != nil {
t.Fatal(err)
}
for i := 0; i < 100; i++ {
if err := b.Put([]byte(fmt.Sprintf("%02d", i)), []byte(fmt.Sprintf("%02d", i))); err != nil {
t.Fatal(err)
}
}
bar, err := b.CreateBucket([]byte("bar"))
if err != nil {
t.Fatal(err)
}
for i := 0; i < 10; i++ {
if err := bar.Put([]byte(strconv.Itoa(i)), []byte(strconv.Itoa(i))); err != nil {
t.Fatal(err)
}
}
baz, err := bar.CreateBucket([]byte("baz"))
if err != nil {
t.Fatal(err)
}
for i := 0; i < 10; i++ {
if err := baz.Put([]byte(strconv.Itoa(i)), []byte(strconv.Itoa(i))); err != nil {
t.Fatal(err)
}
}
return nil
}); err != nil {
t.Fatal(err)
}
db.MustCheck()
if err := db.View(func(tx *bolt.Tx) error {
b := tx.Bucket([]byte("foo"))
stats := b.Stats()
if stats.BranchPageN != 0 {
t.Fatalf("unexpected BranchPageN: %d", stats.BranchPageN)
} else if stats.BranchOverflowN != 0 {
t.Fatalf("unexpected BranchOverflowN: %d", stats.BranchOverflowN)
} else if stats.LeafPageN != 2 {
t.Fatalf("unexpected LeafPageN: %d", stats.LeafPageN)
} else if stats.LeafOverflowN != 0 {
t.Fatalf("unexpected LeafOverflowN: %d", stats.LeafOverflowN)
} else if stats.KeyN != 122 {
t.Fatalf("unexpected KeyN: %d", stats.KeyN)
} else if stats.Depth != 3 {
t.Fatalf("unexpected Depth: %d", stats.Depth)
} else if stats.BranchInuse != 0 {
t.Fatalf("unexpected BranchInuse: %d", stats.BranchInuse)
}
foo := 16 // foo (pghdr)
foo += 101 * 16 // foo leaf elements
foo += 100*2 + 100*2 // foo leaf key/values
foo += 3 + 16 // foo -> bar key/value
bar := 16 // bar (pghdr)
bar += 11 * 16 // bar leaf elements
bar += 10 + 10 // bar leaf key/values
bar += 3 + 16 // bar -> baz key/value
baz := 16 // baz (inline) (pghdr)
baz += 10 * 16 // baz leaf elements
baz += 10 + 10 // baz leaf key/values
if stats.LeafInuse != foo+bar+baz {
t.Fatalf("unexpected LeafInuse: %d", stats.LeafInuse)
}
if db.Info().PageSize == 4096 {
if stats.BranchAlloc != 0 {
t.Fatalf("unexpected BranchAlloc: %d", stats.BranchAlloc)
} else if stats.LeafAlloc != 8192 {
t.Fatalf("unexpected LeafAlloc: %d", stats.LeafAlloc)
}
}
if stats.BucketN != 3 {
t.Fatalf("unexpected BucketN: %d", stats.BucketN)
} else if stats.InlineBucketN != 1 {
t.Fatalf("unexpected InlineBucketN: %d", stats.InlineBucketN)
} else if stats.InlineBucketInuse != baz {
t.Fatalf("unexpected InlineBucketInuse: %d", stats.InlineBucketInuse)
}
return nil
}); err != nil {
t.Fatal(err)
}
}
// Ensure a large bucket can calculate stats.
func TestBucket_Stats_Large(t *testing.T) {
if testing.Short() {
t.Skip("skipping test in short mode.")
}
db := MustOpenDB()
defer db.MustClose()
var index int
for i := 0; i < 100; i++ {
// Add bucket with lots of keys.
if err := db.Update(func(tx *bolt.Tx) error {
b, err := tx.CreateBucketIfNotExists([]byte("widgets"))
if err != nil {
t.Fatal(err)
}
for i := 0; i < 1000; i++ {
if err := b.Put([]byte(strconv.Itoa(index)), []byte(strconv.Itoa(index))); err != nil {
t.Fatal(err)
}
index++
}
return nil
}); err != nil {
t.Fatal(err)
}
}
db.MustCheck()
if err := db.View(func(tx *bolt.Tx) error {
stats := tx.Bucket([]byte("widgets")).Stats()
if stats.BranchPageN != 13 {
t.Fatalf("unexpected BranchPageN: %d", stats.BranchPageN)
} else if stats.BranchOverflowN != 0 {
t.Fatalf("unexpected BranchOverflowN: %d", stats.BranchOverflowN)
} else if stats.LeafPageN != 1196 {
t.Fatalf("unexpected LeafPageN: %d", stats.LeafPageN)
} else if stats.LeafOverflowN != 0 {
t.Fatalf("unexpected LeafOverflowN: %d", stats.LeafOverflowN)
} else if stats.KeyN != 100000 {
t.Fatalf("unexpected KeyN: %d", stats.KeyN)
} else if stats.Depth != 3 {
t.Fatalf("unexpected Depth: %d", stats.Depth)
} else if stats.BranchInuse != 25257 {
t.Fatalf("unexpected BranchInuse: %d", stats.BranchInuse)
} else if stats.LeafInuse != 2596916 {
t.Fatalf("unexpected LeafInuse: %d", stats.LeafInuse)
}
if db.Info().PageSize == 4096 {
if stats.BranchAlloc != 53248 {
t.Fatalf("unexpected BranchAlloc: %d", stats.BranchAlloc)
} else if stats.LeafAlloc != 4898816 {
t.Fatalf("unexpected LeafAlloc: %d", stats.LeafAlloc)
}
}
if stats.BucketN != 1 {
t.Fatalf("unexpected BucketN: %d", stats.BucketN)
} else if stats.InlineBucketN != 0 {
t.Fatalf("unexpected InlineBucketN: %d", stats.InlineBucketN)
} else if stats.InlineBucketInuse != 0 {
t.Fatalf("unexpected InlineBucketInuse: %d", stats.InlineBucketInuse)
}
return nil
}); err != nil {
t.Fatal(err)
}
}
// Ensure that a bucket can write random keys and values across multiple transactions.
func TestBucket_Put_Single(t *testing.T) {
if testing.Short() {
t.Skip("skipping test in short mode.")
}
index := 0
if err := quick.Check(func(items testdata) bool {
db := MustOpenDB()
defer db.MustClose()
m := make(map[string][]byte)
if err := db.Update(func(tx *bolt.Tx) error {
if _, err := tx.CreateBucket([]byte("widgets")); err != nil {
t.Fatal(err)
}
return nil
}); err != nil {
t.Fatal(err)
}
for _, item := range items {
if err := db.Update(func(tx *bolt.Tx) error {
if err := tx.Bucket([]byte("widgets")).Put(item.Key, item.Value); err != nil {
panic("put error: " + err.Error())
}
m[string(item.Key)] = item.Value
return nil
}); err != nil {
t.Fatal(err)
}
// Verify all key/values so far.
if err := db.View(func(tx *bolt.Tx) error {
i := 0
for k, v := range m {
value := tx.Bucket([]byte("widgets")).Get([]byte(k))
if !bytes.Equal(value, v) {
t.Logf("value mismatch [run %d] (%d of %d):\nkey: %x\ngot: %x\nexp: %x", index, i, len(m), []byte(k), value, v)
db.CopyTempFile()
t.FailNow()
}
i++
}
return nil
}); err != nil {
t.Fatal(err)
}
}
index++
return true
}, qconfig()); err != nil {
t.Error(err)
}
}
// Ensure that a transaction can insert multiple key/value pairs at once.
func TestBucket_Put_Multiple(t *testing.T) {
if testing.Short() {
t.Skip("skipping test in short mode.")
}
if err := quick.Check(func(items testdata) bool {
db := MustOpenDB()
defer db.MustClose()
// Bulk insert all values.
if err := db.Update(func(tx *bolt.Tx) error {
if _, err := tx.CreateBucket([]byte("widgets")); err != nil {
t.Fatal(err)
}
return nil
}); err != nil {
t.Fatal(err)
}
if err := db.Update(func(tx *bolt.Tx) error {
b := tx.Bucket([]byte("widgets"))
for _, item := range items {
if err := b.Put(item.Key, item.Value); err != nil {
t.Fatal(err)
}
}
return nil
}); err != nil {
t.Fatal(err)
}
// Verify all items exist.
if err := db.View(func(tx *bolt.Tx) error {
b := tx.Bucket([]byte("widgets"))
for _, item := range items {
value := b.Get(item.Key)
if !bytes.Equal(item.Value, value) {
db.CopyTempFile()
t.Fatalf("exp=%x; got=%x", item.Value, value)
}
}
return nil
}); err != nil {
t.Fatal(err)
}
return true
}, qconfig()); err != nil {
t.Error(err)
}
}
// Ensure that a transaction can delete all key/value pairs and return to a single leaf page.
func TestBucket_Delete_Quick(t *testing.T) {
if testing.Short() {
t.Skip("skipping test in short mode.")
}
if err := quick.Check(func(items testdata) bool {
db := MustOpenDB()
defer db.MustClose()
// Bulk insert all values.
if err := db.Update(func(tx *bolt.Tx) error {
if _, err := tx.CreateBucket([]byte("widgets")); err != nil {
t.Fatal(err)
}
return nil
}); err != nil {
t.Fatal(err)
}
if err := db.Update(func(tx *bolt.Tx) error {
b := tx.Bucket([]byte("widgets"))
for _, item := range items {
if err := b.Put(item.Key, item.Value); err != nil {
t.Fatal(err)
}
}
return nil
}); err != nil {
t.Fatal(err)
}
// Remove items one at a time and check consistency.
for _, item := range items {
if err := db.Update(func(tx *bolt.Tx) error {
return tx.Bucket([]byte("widgets")).Delete(item.Key)
}); err != nil {
t.Fatal(err)
}
}
// Anything before our deletion index should be nil.
if err := db.View(func(tx *bolt.Tx) error {
if err := tx.Bucket([]byte("widgets")).ForEach(func(k, v []byte) error {
t.Fatalf("bucket should be empty; found: %06x", trunc(k, 3))
return nil
}); err != nil {
t.Fatal(err)
}
return nil
}); err != nil {
t.Fatal(err)
}
return true
}, qconfig()); err != nil {
t.Error(err)
}
}
func ExampleBucket_Put() {
// Open the database.
db, err := bolt.Open(tempfile(), 0666, nil)
if err != nil {
log.Fatal(err)
}
defer os.Remove(db.Path())
// Start a write transaction.
if err := db.Update(func(tx *bolt.Tx) error {
// Create a bucket.
b, err := tx.CreateBucket([]byte("widgets"))
if err != nil {
return err
}
// Set the value "bar" for the key "foo".
if err := b.Put([]byte("foo"), []byte("bar")); err != nil {
return err
}
return nil
}); err != nil {
log.Fatal(err)
}
// Read value back in a different read-only transaction.
if err := db.View(func(tx *bolt.Tx) error {
value := tx.Bucket([]byte("widgets")).Get([]byte("foo"))
fmt.Printf("The value of 'foo' is: %s\n", value)
return nil
}); err != nil {
log.Fatal(err)
}
// Close database to release file lock.
if err := db.Close(); err != nil {
log.Fatal(err)
}
// Output:
// The value of 'foo' is: bar
}
func ExampleBucket_Delete() {
// Open the database.
db, err := bolt.Open(tempfile(), 0666, nil)
if err != nil {
log.Fatal(err)
}
defer os.Remove(db.Path())
// Start a write transaction.
if err := db.Update(func(tx *bolt.Tx) error {
// Create a bucket.
b, err := tx.CreateBucket([]byte("widgets"))
if err != nil {
return err
}
// Set the value "bar" for the key "foo".
if err := b.Put([]byte("foo"), []byte("bar")); err != nil {
return err
}
// Retrieve the key back from the database and verify it.
value := b.Get([]byte("foo"))
fmt.Printf("The value of 'foo' was: %s\n", value)
return nil
}); err != nil {
log.Fatal(err)
}
// Delete the key in a different write transaction.
if err := db.Update(func(tx *bolt.Tx) error {
return tx.Bucket([]byte("widgets")).Delete([]byte("foo"))
}); err != nil {
log.Fatal(err)
}
// Retrieve the key again.
if err := db.View(func(tx *bolt.Tx) error {
value := tx.Bucket([]byte("widgets")).Get([]byte("foo"))
if value == nil {
fmt.Printf("The value of 'foo' is now: nil\n")
}
return nil
}); err != nil {
log.Fatal(err)
}
// Close database to release file lock.
if err := db.Close(); err != nil {
log.Fatal(err)
}
// Output:
// The value of 'foo' was: bar
// The value of 'foo' is now: nil
}
func ExampleBucket_ForEach() {
// Open the database.
db, err := bolt.Open(tempfile(), 0666, nil)
if err != nil {
log.Fatal(err)
}
defer os.Remove(db.Path())
// Insert data into a bucket.
if err := db.Update(func(tx *bolt.Tx) error {
b, err := tx.CreateBucket([]byte("animals"))
if err != nil {
return err
}
if err := b.Put([]byte("dog"), []byte("fun")); err != nil {
return err
}
if err := b.Put([]byte("cat"), []byte("lame")); err != nil {
return err
}
if err := b.Put([]byte("liger"), []byte("awesome")); err != nil {
return err
}
// Iterate over items in sorted key order.
if err := b.ForEach(func(k, v []byte) error {
fmt.Printf("A %s is %s.\n", k, v)
return nil
}); err != nil {
return err
}
return nil
}); err != nil {
log.Fatal(err)
}
// Close database to release file lock.
if err := db.Close(); err != nil {
log.Fatal(err)
}
// Output:
// A cat is lame.
// A dog is fun.
// A liger is awesome.
}
|
[
"\"DRONE\""
] |
[] |
[
"DRONE"
] |
[]
|
["DRONE"]
|
go
| 1 | 0 | |
spyder/plugins/editor/plugin.py
|
# -*- coding: utf-8 -*-
#
# Copyright © Spyder Project Contributors
# Licensed under the terms of the MIT License
# (see spyder/__init__.py for details)
"""Editor Plugin"""
# pylint: disable=C0103
# pylint: disable=R0903
# pylint: disable=R0911
# pylint: disable=R0201
# Standard library imports
import logging
import os
import os.path as osp
import re
import sys
import time
# Third party imports
from qtpy.compat import from_qvariant, getopenfilenames, to_qvariant
from qtpy.QtCore import QByteArray, Qt, Signal, Slot
from qtpy.QtPrintSupport import QAbstractPrintDialog, QPrintDialog, QPrinter
from qtpy.QtWidgets import (QAction, QActionGroup, QApplication, QDialog,
QFileDialog, QInputDialog, QMenu, QSplitter,
QToolBar, QVBoxLayout, QWidget)
# Local imports
from spyder.config.base import _, get_conf_path, running_under_pytest
from spyder.config.manager import CONF
from spyder.config.utils import (get_edit_filetypes, get_edit_filters,
get_filter)
from spyder.py3compat import PY2, qbytearray_to_str, to_text_string
from spyder.utils import encoding, programs, sourcecode
from spyder.utils import icon_manager as ima
from spyder.utils.qthelpers import create_action, add_actions, MENU_SEPARATOR
from spyder.utils.misc import getcwd_or_home
from spyder.widgets.findreplace import FindReplace
from spyder.plugins.editor.confpage import EditorConfigPage
from spyder.plugins.editor.utils.autosave import AutosaveForPlugin
from spyder.plugins.editor.utils.switcher import EditorSwitcherManager
from spyder.plugins.editor.widgets.editor import (EditorMainWindow, Printer,
EditorSplitter, EditorStack,)
from spyder.plugins.editor.widgets.codeeditor import CodeEditor
from spyder.plugins.editor.utils.bookmarks import (load_bookmarks,
save_bookmarks)
from spyder.plugins.editor.utils.debugger import (clear_all_breakpoints,
clear_breakpoint)
from spyder.plugins.editor.widgets.status import (CursorPositionStatus,
EncodingStatus, EOLStatus,
ReadWriteStatus, VCSStatus)
from spyder.api.plugins import SpyderPluginWidget
from spyder.preferences.runconfig import (ALWAYS_OPEN_FIRST_RUN_OPTION,
get_run_configuration,
RunConfigDialog, RunConfigOneDialog)
logger = logging.getLogger(__name__)
WINPDB_PATH = programs.find_program('winpdb')
class Editor(SpyderPluginWidget):
"""
Multi-file Editor widget
"""
CONF_SECTION = 'editor'
CONFIGWIDGET_CLASS = EditorConfigPage
CONF_FILE = False
TEMPFILE_PATH = get_conf_path('temp.py')
TEMPLATE_PATH = get_conf_path('template.py')
DISABLE_ACTIONS_WHEN_HIDDEN = False # SpyderPluginWidget class attribute
# Signals
run_in_current_ipyclient = Signal(str, str, str,
bool, bool, bool, bool, bool)
run_cell_in_ipyclient = Signal(str, object, str, bool)
debug_cell_in_ipyclient = Signal(str, object, str, bool)
exec_in_extconsole = Signal(str, bool)
redirect_stdio = Signal(bool)
open_dir = Signal(str)
breakpoints_saved = Signal()
run_in_current_extconsole = Signal(str, str, str, bool, bool)
open_file_update = Signal(str)
# This signal is fired for any focus change among all editor stacks
sig_editor_focus_changed = Signal()
def __init__(self, parent, ignore_last_opened_files=False):
SpyderPluginWidget.__init__(self, parent)
self.__set_eol_chars = True
# Creating template if it doesn't already exist
if not osp.isfile(self.TEMPLATE_PATH):
if os.name == "nt":
shebang = []
else:
shebang = ['#!/usr/bin/env python' + ('2' if PY2 else '3')]
header = shebang + [
'# -*- coding: utf-8 -*-',
'"""', 'Created on %(date)s', '',
'@author: %(username)s', '"""', '', '']
try:
encoding.write(os.linesep.join(header), self.TEMPLATE_PATH,
'utf-8')
except EnvironmentError:
pass
self.projects = None
self.outlineexplorer = None
self.help = None
self.file_dependent_actions = []
self.pythonfile_dependent_actions = []
self.dock_toolbar_actions = None
self.edit_menu_actions = None #XXX: find another way to notify Spyder
self.stack_menu_actions = None
self.checkable_actions = {}
self.__first_open_files_setup = True
self.editorstacks = []
self.last_focus_editorstack = {}
self.editorwindows = []
self.editorwindows_to_be_created = []
self.toolbar_list = None
self.menu_list = None
# We need to call this here to create self.dock_toolbar_actions,
# which is used below.
self._setup()
self.options_button.hide()
# Configuration dialog size
self.dialog_size = None
statusbar = parent.statusBar() # Create a status bar
# Remove separator line
statusbar.setStyleSheet('QStatusBar::item {border: None;}')
self.vcs_status = VCSStatus(self, statusbar)
self.cursorpos_status = CursorPositionStatus(self, statusbar)
self.encoding_status = EncodingStatus(self, statusbar)
self.eol_status = EOLStatus(self, statusbar)
self.readwrite_status = ReadWriteStatus(self, statusbar)
layout = QVBoxLayout()
self.dock_toolbar = QToolBar(self)
add_actions(self.dock_toolbar, self.dock_toolbar_actions)
layout.addWidget(self.dock_toolbar)
self.last_edit_cursor_pos = None
self.cursor_pos_history = []
self.cursor_pos_index = None
self.__ignore_cursor_position = True
# Completions setup
self.completion_editor_settings = {}
# Setup new windows:
self.main.all_actions_defined.connect(self.setup_other_windows)
# Change module completions when PYTHONPATH changes
self.main.sig_pythonpath_changed.connect(self.set_path)
# Find widget
self.find_widget = FindReplace(self, enable_replace=True)
self.find_widget.hide()
self.find_widget.visibility_changed.connect(
lambda vs: self.rehighlight_cells())
self.register_widget_shortcuts(self.find_widget)
# Start autosave component
# (needs to be done before EditorSplitter)
self.autosave = AutosaveForPlugin(self)
self.autosave.try_recover_from_autosave()
# Multiply by 1000 to convert seconds to milliseconds
self.autosave.interval = self.get_option('autosave_interval') * 1000
self.autosave.enabled = self.get_option('autosave_enabled')
# Tabbed editor widget + Find/Replace widget
editor_widgets = QWidget(self)
editor_layout = QVBoxLayout()
editor_layout.setContentsMargins(0, 0, 0, 0)
editor_widgets.setLayout(editor_layout)
self.editorsplitter = EditorSplitter(self, self,
self.stack_menu_actions, first=True)
editor_layout.addWidget(self.editorsplitter)
editor_layout.addWidget(self.find_widget)
# Splitter: editor widgets (see above) + outline explorer
self.splitter = QSplitter(self)
self.splitter.setContentsMargins(0, 0, 0, 0)
self.splitter.addWidget(editor_widgets)
self.splitter.setStretchFactor(0, 5)
self.splitter.setStretchFactor(1, 1)
layout.addWidget(self.splitter)
self.setLayout(layout)
self.setFocusPolicy(Qt.ClickFocus)
# Editor's splitter state
state = self.get_option('splitter_state', None)
if state is not None:
self.splitter.restoreState( QByteArray().fromHex(
str(state).encode('utf-8')) )
self.recent_files = self.get_option('recent_files', [])
self.untitled_num = 0
# Parameters of last file execution:
self.__last_ic_exec = None # internal console
self.__last_ec_exec = None # external console
# File types and filters used by the Open dialog
self.edit_filetypes = None
self.edit_filters = None
self.__ignore_cursor_position = False
current_editor = self.get_current_editor()
if current_editor is not None:
filename = self.get_current_filename()
position = current_editor.get_position('cursor')
self.add_cursor_position_to_history(filename, position)
self.update_cursorpos_actions()
self.set_path()
def set_projects(self, projects):
self.projects = projects
@Slot()
def show_hide_projects(self):
if self.projects is not None:
dw = self.projects.dockwidget
if dw.isVisible():
dw.hide()
else:
dw.show()
dw.raise_()
self.switch_to_plugin()
def set_outlineexplorer(self, outlineexplorer):
self.outlineexplorer = outlineexplorer
for editorstack in self.editorstacks:
# Pass the OutlineExplorer widget to the stacks because they
# don't need the plugin
editorstack.set_outlineexplorer(self.outlineexplorer.explorer)
self.editorstacks[0].initialize_outlineexplorer()
self.outlineexplorer.explorer.edit_goto.connect(
lambda filenames, goto, word:
self.load(filenames=filenames, goto=goto, word=word,
editorwindow=self))
self.outlineexplorer.explorer.edit.connect(
lambda filenames:
self.load(filenames=filenames, editorwindow=self))
def set_help(self, help_plugin):
self.help = help_plugin
for editorstack in self.editorstacks:
editorstack.set_help(self.help)
#------ Private API --------------------------------------------------------
def restore_scrollbar_position(self):
"""Restoring scrollbar position after main window is visible"""
# Widget is now visible, we may center cursor on top level editor:
try:
self.get_current_editor().centerCursor()
except AttributeError:
pass
@Slot(dict)
def report_open_file(self, options):
"""Request to start a completion server to attend a language."""
filename = options['filename']
language = options['language']
logger.debug('Start completion server for %s [%s]' % (
filename, language))
codeeditor = options['codeeditor']
status = self.main.completions.start_client(language.lower())
self.main.completions.register_file(
language.lower(), filename, codeeditor)
if status:
logger.debug('{0} completion server is ready'.format(language))
codeeditor.start_completion_services()
if language.lower() in self.completion_editor_settings:
codeeditor.update_completion_configuration(
self.completion_editor_settings[language.lower()])
else:
if codeeditor.language == language.lower():
logger.debug('Setting {0} completions off'.format(filename))
codeeditor.completions_available = False
@Slot(dict, str)
def register_completion_server_settings(self, settings, language):
"""Register completion server settings."""
self.completion_editor_settings[language] = dict(settings)
logger.debug('Completion server settings for {!s} are: {!r}'.format(
language, settings))
self.completion_server_settings_ready(
language, self.completion_editor_settings[language])
def stop_completion_services(self, language):
"""Notify all editorstacks about LSP server unavailability."""
for editorstack in self.editorstacks:
editorstack.notify_server_down(language)
def completion_server_ready(self, language):
for editorstack in self.editorstacks:
editorstack.completion_server_ready(language)
def completion_server_settings_ready(self, language, configuration):
"""Notify all stackeditors about LSP server availability."""
for editorstack in self.editorstacks:
editorstack.update_server_configuration(language, configuration)
def send_completion_request(self, language, request, params):
logger.debug("%s completion server request: %r" % (language, request))
self.main.completions.send_request(language, request, params)
def kite_completions_file_status(self):
"""Connect open_file_update to Kite's status."""
self.open_file_update.connect(
self.main.completions.get_client('kite').send_status_request)
#------ SpyderPluginWidget API ---------------------------------------------
def get_plugin_title(self):
"""Return widget title"""
title = _('Editor')
return title
def get_plugin_icon(self):
"""Return widget icon."""
return ima.icon('edit')
def get_focus_widget(self):
"""
Return the widget to give focus to.
This happens when plugin's dockwidget is raised on top-level.
"""
return self.get_current_editor()
def _visibility_changed(self, enable):
"""DockWidget visibility has changed"""
SpyderPluginWidget._visibility_changed(self, enable)
if self.dockwidget is None:
return
if self.dockwidget.isWindow():
self.dock_toolbar.show()
else:
self.dock_toolbar.hide()
if enable:
self.refresh_plugin()
self.sig_update_plugin_title.emit()
def refresh_plugin(self):
"""Refresh editor plugin"""
editorstack = self.get_current_editorstack()
editorstack.refresh()
self.refresh_save_all_action()
def closing_plugin(self, cancelable=False):
"""Perform actions before parent main window is closed"""
state = self.splitter.saveState()
self.set_option('splitter_state', qbytearray_to_str(state))
editorstack = self.editorstacks[0]
active_project_path = None
if self.projects is not None:
active_project_path = self.projects.get_active_project_path()
if not active_project_path:
self.set_open_filenames()
else:
self.projects.set_project_filenames(
[finfo.filename for finfo in editorstack.data])
self.set_option('layout_settings',
self.editorsplitter.get_layout_settings())
self.set_option('windows_layout_settings',
[win.get_layout_settings() for win in self.editorwindows])
# self.set_option('filenames', filenames)
self.set_option('recent_files', self.recent_files)
# Stop autosave timer before closing windows
self.autosave.stop_autosave_timer()
try:
if not editorstack.save_if_changed(cancelable) and cancelable:
return False
else:
for win in self.editorwindows[:]:
win.close()
return True
except IndexError:
return True
def get_plugin_actions(self):
"""Return a list of actions related to plugin"""
# ---- File menu and toolbar ----
self.new_action = create_action(
self,
_("&New file..."),
icon=ima.icon('filenew'), tip=_("New file"),
triggered=self.new,
context=Qt.WidgetShortcut
)
self.register_shortcut(self.new_action, context="Editor",
name="New file", add_shortcut_to_tip=True)
self.open_last_closed_action = create_action(
self,
_("O&pen last closed"),
tip=_("Open last closed"),
triggered=self.open_last_closed
)
self.register_shortcut(self.open_last_closed_action, context="Editor",
name="Open last closed")
self.open_action = create_action(self, _("&Open..."),
icon=ima.icon('fileopen'), tip=_("Open file"),
triggered=self.load,
context=Qt.WidgetShortcut)
self.register_shortcut(self.open_action, context="Editor",
name="Open file", add_shortcut_to_tip=True)
self.revert_action = create_action(self, _("&Revert"),
icon=ima.icon('revert'), tip=_("Revert file from disk"),
triggered=self.revert)
self.save_action = create_action(self, _("&Save"),
icon=ima.icon('filesave'), tip=_("Save file"),
triggered=self.save,
context=Qt.WidgetShortcut)
self.register_shortcut(self.save_action, context="Editor",
name="Save file", add_shortcut_to_tip=True)
self.save_all_action = create_action(self, _("Sav&e all"),
icon=ima.icon('save_all'), tip=_("Save all files"),
triggered=self.save_all,
context=Qt.WidgetShortcut)
self.register_shortcut(self.save_all_action, context="Editor",
name="Save all", add_shortcut_to_tip=True)
save_as_action = create_action(self, _("Save &as..."), None,
ima.icon('filesaveas'), tip=_("Save current file as..."),
triggered=self.save_as,
context=Qt.WidgetShortcut)
self.register_shortcut(save_as_action, "Editor", "Save As")
save_copy_as_action = create_action(self, _("Save copy as..."), None,
ima.icon('filesaveas'), _("Save copy of current file as..."),
triggered=self.save_copy_as)
print_preview_action = create_action(self, _("Print preview..."),
tip=_("Print preview..."), triggered=self.print_preview)
self.print_action = create_action(self, _("&Print..."),
icon=ima.icon('print'), tip=_("Print current file..."),
triggered=self.print_file)
# Shortcut for close_action is defined in widgets/editor.py
self.close_action = create_action(self, _("&Close"),
icon=ima.icon('fileclose'), tip=_("Close current file"),
triggered=self.close_file)
self.close_all_action = create_action(self, _("C&lose all"),
icon=ima.icon('filecloseall'), tip=_("Close all opened files"),
triggered=self.close_all_files,
context=Qt.WidgetShortcut)
self.register_shortcut(self.close_all_action, context="Editor",
name="Close all")
# ---- Find menu and toolbar ----
_text = _("&Find text")
find_action = create_action(self, _text, icon=ima.icon('find'),
tip=_text, triggered=self.find,
context=Qt.WidgetShortcut)
self.register_shortcut(find_action, context="find_replace",
name="Find text", add_shortcut_to_tip=True)
find_next_action = create_action(self, _("Find &next"),
icon=ima.icon('findnext'),
triggered=self.find_next,
context=Qt.WidgetShortcut)
self.register_shortcut(find_next_action, context="find_replace",
name="Find next")
find_previous_action = create_action(self, _("Find &previous"),
icon=ima.icon('findprevious'),
triggered=self.find_previous,
context=Qt.WidgetShortcut)
self.register_shortcut(find_previous_action, context="find_replace",
name="Find previous")
_text = _("&Replace text")
replace_action = create_action(self, _text, icon=ima.icon('replace'),
tip=_text, triggered=self.replace,
context=Qt.WidgetShortcut)
self.register_shortcut(replace_action, context="find_replace",
name="Replace text")
# ---- Debug menu and toolbar ----
set_clear_breakpoint_action = create_action(self,
_("Set/Clear breakpoint"),
icon=ima.icon('breakpoint_big'),
triggered=self.set_or_clear_breakpoint,
context=Qt.WidgetShortcut)
self.register_shortcut(set_clear_breakpoint_action, context="Editor",
name="Breakpoint")
set_cond_breakpoint_action = create_action(self,
_("Set/Edit conditional breakpoint"),
icon=ima.icon('breakpoint_cond_big'),
triggered=self.set_or_edit_conditional_breakpoint,
context=Qt.WidgetShortcut)
self.register_shortcut(set_cond_breakpoint_action, context="Editor",
name="Conditional breakpoint")
clear_all_breakpoints_action = create_action(self,
_('Clear breakpoints in all files'),
triggered=self.clear_all_breakpoints)
pdb_ignore_lib = create_action(
self, _("Ignore Python libraries while debugging"),
toggled=self.toggle_pdb_ignore_lib)
pdb_execute_events = create_action(
self, _("Process execute events while debugging"),
toggled=self.toggle_pdb_execute_events)
self.winpdb_action = create_action(self, _("Debug with winpdb"),
triggered=self.run_winpdb)
self.winpdb_action.setEnabled(WINPDB_PATH is not None and PY2)
# --- Debug toolbar ---
debug_action = create_action(self, _("&Debug"),
icon=ima.icon('debug'),
tip=_("Debug file"),
triggered=self.debug_file)
self.register_shortcut(debug_action, context="_", name="Debug",
add_shortcut_to_tip=True)
debug_next_action = create_action(self, _("Step"),
icon=ima.icon('arrow-step-over'), tip=_("Run current line"),
triggered=lambda: self.debug_command("next"))
self.register_shortcut(debug_next_action, "_", "Debug Step Over",
add_shortcut_to_tip=True)
debug_continue_action = create_action(self, _("Continue"),
icon=ima.icon('arrow-continue'),
tip=_("Continue execution until next breakpoint"),
triggered=lambda: self.debug_command("continue"))
self.register_shortcut(debug_continue_action, "_", "Debug Continue",
add_shortcut_to_tip=True)
debug_step_action = create_action(self, _("Step Into"),
icon=ima.icon('arrow-step-in'),
tip=_("Step into function or method of current line"),
triggered=lambda: self.debug_command("step"))
self.register_shortcut(debug_step_action, "_", "Debug Step Into",
add_shortcut_to_tip=True)
debug_return_action = create_action(self, _("Step Return"),
icon=ima.icon('arrow-step-out'),
tip=_("Run until current function or method returns"),
triggered=lambda: self.debug_command("return"))
self.register_shortcut(debug_return_action, "_", "Debug Step Return",
add_shortcut_to_tip=True)
debug_exit_action = create_action(self, _("Stop"),
icon=ima.icon('stop_debug'), tip=_("Stop debugging"),
triggered=lambda: self.debug_command("exit"))
self.register_shortcut(debug_exit_action, "_", "Debug Exit",
add_shortcut_to_tip=True)
# --- Run toolbar ---
run_action = create_action(self, _("&Run"), icon=ima.icon('run'),
tip=_("Run file"),
triggered=self.run_file)
self.register_shortcut(run_action, context="_", name="Run",
add_shortcut_to_tip=True)
configure_action = create_action(
self,
_("&Configuration per file..."),
icon=ima.icon('run_settings'),
tip=_("Run settings"),
menurole=QAction.NoRole,
triggered=self.edit_run_configurations)
self.register_shortcut(configure_action, context="_",
name="Configure", add_shortcut_to_tip=True)
re_run_action = create_action(self, _("Re-run &last script"),
icon=ima.icon('run_again'),
tip=_("Run again last file"),
triggered=self.re_run_file)
self.register_shortcut(re_run_action, context="_",
name="Re-run last script",
add_shortcut_to_tip=True)
run_selected_action = create_action(self, _("Run &selection or "
"current line"),
icon=ima.icon('run_selection'),
tip=_("Run selection or "
"current line"),
triggered=self.run_selection,
context=Qt.WidgetShortcut)
self.register_shortcut(run_selected_action, context="Editor",
name="Run selection", add_shortcut_to_tip=True)
run_cell_action = create_action(self,
_("Run cell"),
icon=ima.icon('run_cell'),
shortcut=CONF.get_shortcut('editor', 'run cell'),
tip=_("Run current cell \n"
"[Use #%% to create cells]"),
triggered=self.run_cell,
context=Qt.WidgetShortcut)
run_cell_advance_action = create_action(
self,
_("Run cell and advance"),
icon=ima.icon('run_cell_advance'),
shortcut=CONF.get_shortcut('editor', 'run cell and advance'),
tip=_("Run current cell and go to the next one "),
triggered=self.run_cell_and_advance,
context=Qt.WidgetShortcut)
debug_cell_action = create_action(
self,
_("Debug cell"),
icon=ima.icon('debug_cell'),
shortcut=CONF.get_shortcut('editor', 'debug cell'),
tip=_("Debug current cell "
"(Alt+Shift+Enter)"),
triggered=self.debug_cell,
context=Qt.WidgetShortcut)
re_run_last_cell_action = create_action(self,
_("Re-run last cell"),
tip=_("Re run last cell "),
triggered=self.re_run_last_cell,
context=Qt.WidgetShortcut)
self.register_shortcut(re_run_last_cell_action,
context="Editor",
name='re-run last cell',
add_shortcut_to_tip=True)
# --- Source code Toolbar ---
self.todo_list_action = create_action(self,
_("Show todo list"), icon=ima.icon('todo_list'),
tip=_("Show comments list (TODO/FIXME/XXX/HINT/TIP/@todo/"
"HACK/BUG/OPTIMIZE/!!!/???)"),
triggered=self.go_to_next_todo)
self.todo_menu = QMenu(self)
self.todo_menu.setStyleSheet("QMenu {menu-scrollable: 1;}")
self.todo_list_action.setMenu(self.todo_menu)
self.todo_menu.aboutToShow.connect(self.update_todo_menu)
self.warning_list_action = create_action(self,
_("Show warning/error list"), icon=ima.icon('wng_list'),
tip=_("Show code analysis warnings/errors"),
triggered=self.go_to_next_warning)
self.warning_menu = QMenu(self)
self.warning_menu.setStyleSheet("QMenu {menu-scrollable: 1;}")
self.warning_list_action.setMenu(self.warning_menu)
self.warning_menu.aboutToShow.connect(self.update_warning_menu)
self.previous_warning_action = create_action(self,
_("Previous warning/error"), icon=ima.icon('prev_wng'),
tip=_("Go to previous code analysis warning/error"),
triggered=self.go_to_previous_warning,
context=Qt.WidgetShortcut)
self.register_shortcut(self.previous_warning_action,
context="Editor",
name="Previous warning",
add_shortcut_to_tip=True)
self.next_warning_action = create_action(self,
_("Next warning/error"), icon=ima.icon('next_wng'),
tip=_("Go to next code analysis warning/error"),
triggered=self.go_to_next_warning,
context=Qt.WidgetShortcut)
self.register_shortcut(self.next_warning_action,
context="Editor",
name="Next warning",
add_shortcut_to_tip=True)
self.previous_edit_cursor_action = create_action(self,
_("Last edit location"), icon=ima.icon('last_edit_location'),
tip=_("Go to last edit location"),
triggered=self.go_to_last_edit_location,
context=Qt.WidgetShortcut)
self.register_shortcut(self.previous_edit_cursor_action,
context="Editor",
name="Last edit location",
add_shortcut_to_tip=True)
self.previous_cursor_action = create_action(self,
_("Previous cursor position"), icon=ima.icon('prev_cursor'),
tip=_("Go to previous cursor position"),
triggered=self.go_to_previous_cursor_position,
context=Qt.WidgetShortcut)
self.register_shortcut(self.previous_cursor_action,
context="Editor",
name="Previous cursor position",
add_shortcut_to_tip=True)
self.next_cursor_action = create_action(self,
_("Next cursor position"), icon=ima.icon('next_cursor'),
tip=_("Go to next cursor position"),
triggered=self.go_to_next_cursor_position,
context=Qt.WidgetShortcut)
self.register_shortcut(self.next_cursor_action,
context="Editor",
name="Next cursor position",
add_shortcut_to_tip=True)
# --- Edit Toolbar ---
self.toggle_comment_action = create_action(self,
_("Comment")+"/"+_("Uncomment"), icon=ima.icon('comment'),
tip=_("Comment current line or selection"),
triggered=self.toggle_comment, context=Qt.WidgetShortcut)
self.register_shortcut(self.toggle_comment_action, context="Editor",
name="Toggle comment")
blockcomment_action = create_action(self, _("Add &block comment"),
tip=_("Add block comment around "
"current line or selection"),
triggered=self.blockcomment, context=Qt.WidgetShortcut)
self.register_shortcut(blockcomment_action, context="Editor",
name="Blockcomment")
unblockcomment_action = create_action(self,
_("R&emove block comment"),
tip = _("Remove comment block around "
"current line or selection"),
triggered=self.unblockcomment, context=Qt.WidgetShortcut)
self.register_shortcut(unblockcomment_action, context="Editor",
name="Unblockcomment")
# ----------------------------------------------------------------------
# The following action shortcuts are hard-coded in CodeEditor
# keyPressEvent handler (the shortcut is here only to inform user):
# (context=Qt.WidgetShortcut -> disable shortcut for other widgets)
self.indent_action = create_action(self,
_("Indent"), "Tab", icon=ima.icon('indent'),
tip=_("Indent current line or selection"),
triggered=self.indent, context=Qt.WidgetShortcut)
self.unindent_action = create_action(self,
_("Unindent"), "Shift+Tab", icon=ima.icon('unindent'),
tip=_("Unindent current line or selection"),
triggered=self.unindent, context=Qt.WidgetShortcut)
self.text_uppercase_action = create_action(self,
_("Toggle Uppercase"), icon=ima.icon('toggle_uppercase'),
tip=_("Change to uppercase current line or selection"),
triggered=self.text_uppercase, context=Qt.WidgetShortcut)
self.register_shortcut(self.text_uppercase_action, context="Editor",
name="transform to uppercase")
self.text_lowercase_action = create_action(self,
_("Toggle Lowercase"), icon=ima.icon('toggle_lowercase'),
tip=_("Change to lowercase current line or selection"),
triggered=self.text_lowercase, context=Qt.WidgetShortcut)
self.register_shortcut(self.text_lowercase_action, context="Editor",
name="transform to lowercase")
# ----------------------------------------------------------------------
self.win_eol_action = create_action(self,
_("Carriage return and line feed (Windows)"),
toggled=lambda checked: self.toggle_eol_chars('nt', checked))
self.linux_eol_action = create_action(self,
_("Line feed (UNIX)"),
toggled=lambda checked: self.toggle_eol_chars('posix', checked))
self.mac_eol_action = create_action(self,
_("Carriage return (Mac)"),
toggled=lambda checked: self.toggle_eol_chars('mac', checked))
eol_action_group = QActionGroup(self)
eol_actions = (self.win_eol_action, self.linux_eol_action,
self.mac_eol_action)
add_actions(eol_action_group, eol_actions)
eol_menu = QMenu(_("Convert end-of-line characters"), self)
add_actions(eol_menu, eol_actions)
trailingspaces_action = create_action(
self,
_("Remove trailing spaces"),
triggered=self.remove_trailing_spaces)
# Checkable actions
showblanks_action = self._create_checkable_action(
_("Show blank spaces"), 'blank_spaces', 'set_blanks_enabled')
scrollpastend_action = self._create_checkable_action(
_("Scroll past the end"), 'scroll_past_end',
'set_scrollpastend_enabled')
showindentguides_action = self._create_checkable_action(
_("Show indent guides"), 'indent_guides', 'set_indent_guides')
show_classfunc_dropdown_action = self._create_checkable_action(
_("Show selector for classes and functions"),
'show_class_func_dropdown', 'set_classfunc_dropdown_visible')
show_codestyle_warnings_action = self._create_checkable_action(
_("Show code style warnings"), 'pycodestyle',)
show_docstring_warnings_action = self._create_checkable_action(
_("Show docstring style warnings"), 'pydocstyle')
underline_errors = self._create_checkable_action(
_("Underline errors and warnings"),
'underline_errors', 'set_underline_errors_enabled')
self.checkable_actions = {
'blank_spaces': showblanks_action,
'scroll_past_end': scrollpastend_action,
'indent_guides': showindentguides_action,
'show_class_func_dropdown': show_classfunc_dropdown_action,
'pycodestyle': show_codestyle_warnings_action,
'pydocstyle': show_docstring_warnings_action,
'underline_errors': underline_errors}
fixindentation_action = create_action(self, _("Fix indentation"),
tip=_("Replace tab characters by space characters"),
triggered=self.fix_indentation)
gotoline_action = create_action(self, _("Go to line..."),
icon=ima.icon('gotoline'),
triggered=self.go_to_line,
context=Qt.WidgetShortcut)
self.register_shortcut(gotoline_action, context="Editor",
name="Go to line")
workdir_action = create_action(self,
_("Set console working directory"),
icon=ima.icon('DirOpenIcon'),
tip=_("Set current console (and file explorer) working "
"directory to current script directory"),
triggered=self.__set_workdir)
self.max_recent_action = create_action(self,
_("Maximum number of recent files..."),
triggered=self.change_max_recent_files)
self.clear_recent_action = create_action(self,
_("Clear this list"), tip=_("Clear recent files list"),
triggered=self.clear_recent_files)
# Fixes spyder-ide/spyder#6055.
# See: https://bugreports.qt.io/browse/QTBUG-8596
self.tab_navigation_actions = []
if sys.platform == 'darwin':
self.go_to_next_file_action = create_action(
self,
_("Go to next file"),
shortcut=CONF.get_shortcut('editor', 'go to previous file'),
triggered=self.go_to_next_file,
)
self.go_to_previous_file_action = create_action(
self,
_("Go to previous file"),
shortcut=CONF.get_shortcut('editor', 'go to next file'),
triggered=self.go_to_previous_file,
)
self.register_shortcut(
self.go_to_next_file_action,
context="Editor",
name="Go to next file",
)
self.register_shortcut(
self.go_to_previous_file_action,
context="Editor",
name="Go to previous file",
)
self.tab_navigation_actions = [
MENU_SEPARATOR,
self.go_to_previous_file_action,
self.go_to_next_file_action,
]
# ---- File menu/toolbar construction ----
self.recent_file_menu = QMenu(_("Open &recent"), self)
self.recent_file_menu.aboutToShow.connect(self.update_recent_file_menu)
file_menu_actions = [
self.new_action,
MENU_SEPARATOR,
self.open_action,
self.open_last_closed_action,
self.recent_file_menu,
MENU_SEPARATOR,
MENU_SEPARATOR,
self.save_action,
self.save_all_action,
save_as_action,
save_copy_as_action,
self.revert_action,
MENU_SEPARATOR,
print_preview_action,
self.print_action,
MENU_SEPARATOR,
self.close_action,
self.close_all_action,
MENU_SEPARATOR,
]
self.main.file_menu_actions += file_menu_actions
file_toolbar_actions = ([self.new_action, self.open_action,
self.save_action, self.save_all_action] +
self.main.file_toolbar_actions)
self.main.file_toolbar_actions = file_toolbar_actions
# ---- Find menu/toolbar construction ----
self.main.search_menu_actions = [find_action,
find_next_action,
find_previous_action,
replace_action]
self.main.search_toolbar_actions = [find_action,
find_next_action,
replace_action]
# ---- Edit menu/toolbar construction ----
self.edit_menu_actions = [self.toggle_comment_action,
blockcomment_action, unblockcomment_action,
self.indent_action, self.unindent_action,
self.text_uppercase_action,
self.text_lowercase_action]
self.main.edit_menu_actions += [MENU_SEPARATOR] + self.edit_menu_actions
edit_toolbar_actions = [self.toggle_comment_action,
self.unindent_action, self.indent_action]
self.main.edit_toolbar_actions += edit_toolbar_actions
# ---- Search menu/toolbar construction ----
self.main.search_menu_actions += [gotoline_action]
self.main.search_toolbar_actions += [gotoline_action]
# ---- Run menu/toolbar construction ----
run_menu_actions = [run_action, run_cell_action,
run_cell_advance_action,
re_run_last_cell_action, MENU_SEPARATOR,
run_selected_action, re_run_action,
configure_action, MENU_SEPARATOR]
self.main.run_menu_actions += run_menu_actions
run_toolbar_actions = [run_action, run_cell_action,
run_cell_advance_action, run_selected_action,
re_run_action]
self.main.run_toolbar_actions += run_toolbar_actions
# ---- Debug menu/toolbar construction ----
# NOTE: 'list_breakpoints' is used by the breakpoints
# plugin to add its "List breakpoints" action to this
# menu
debug_menu_actions = [
debug_action,
debug_cell_action,
debug_next_action,
debug_step_action,
debug_return_action,
debug_continue_action,
debug_exit_action,
MENU_SEPARATOR,
pdb_ignore_lib,
pdb_execute_events,
set_clear_breakpoint_action,
set_cond_breakpoint_action,
clear_all_breakpoints_action,
'list_breakpoints',
MENU_SEPARATOR,
self.winpdb_action
]
self.main.debug_menu_actions += debug_menu_actions
debug_toolbar_actions = [
debug_action,
debug_next_action,
debug_step_action,
debug_return_action,
debug_continue_action,
debug_exit_action
]
self.main.debug_toolbar_actions += debug_toolbar_actions
# ---- Source menu/toolbar construction ----
source_menu_actions = [
showblanks_action,
scrollpastend_action,
showindentguides_action,
show_classfunc_dropdown_action,
show_codestyle_warnings_action,
show_docstring_warnings_action,
underline_errors,
MENU_SEPARATOR,
self.todo_list_action,
self.warning_list_action,
self.previous_warning_action,
self.next_warning_action,
MENU_SEPARATOR,
self.previous_edit_cursor_action,
self.previous_cursor_action,
self.next_cursor_action,
MENU_SEPARATOR,
eol_menu,
trailingspaces_action,
fixindentation_action
]
self.main.source_menu_actions += source_menu_actions
source_toolbar_actions = [
self.todo_list_action,
self.warning_list_action,
self.previous_warning_action,
self.next_warning_action,
MENU_SEPARATOR,
self.previous_edit_cursor_action,
self.previous_cursor_action,
self.next_cursor_action
]
self.main.source_toolbar_actions += source_toolbar_actions
# ---- Dock widget and file dependent actions ----
self.dock_toolbar_actions = (
file_toolbar_actions +
[MENU_SEPARATOR] +
source_toolbar_actions +
[MENU_SEPARATOR] +
run_toolbar_actions +
[MENU_SEPARATOR] +
debug_toolbar_actions +
[MENU_SEPARATOR] +
edit_toolbar_actions
)
self.pythonfile_dependent_actions = [
run_action,
configure_action,
set_clear_breakpoint_action,
set_cond_breakpoint_action,
debug_action,
debug_cell_action,
run_selected_action,
run_cell_action,
run_cell_advance_action,
re_run_last_cell_action,
blockcomment_action,
unblockcomment_action,
self.winpdb_action
]
self.cythonfile_compatible_actions = [run_action, configure_action]
self.file_dependent_actions = (
self.pythonfile_dependent_actions +
[
self.save_action,
save_as_action,
save_copy_as_action,
print_preview_action,
self.print_action,
self.save_all_action,
gotoline_action,
workdir_action,
self.close_action,
self.close_all_action,
self.toggle_comment_action,
self.revert_action,
self.indent_action,
self.unindent_action
]
)
self.stack_menu_actions = [gotoline_action, workdir_action]
return self.file_dependent_actions
def toggle_pdb_ignore_lib(self, checked):
""""Set pdb_ignore_lib"""
CONF.set('run', 'pdb_ignore_lib', checked)
self.main.ipyconsole.set_pdb_ignore_lib()
def toggle_pdb_execute_events(self, checked):
""""Set pdb_execute_events"""
CONF.set('run', 'pdb_execute_events', checked)
self.main.ipyconsole.set_pdb_execute_events()
def register_plugin(self):
"""Register plugin in Spyder's main window"""
self.main.restore_scrollbar_position.connect(
self.restore_scrollbar_position)
self.main.console.edit_goto.connect(self.load)
self.exec_in_extconsole.connect(self.main.execute_in_external_console)
self.redirect_stdio.connect(self.main.redirect_internalshell_stdio)
self.open_dir.connect(self.main.workingdirectory.chdir)
self.set_help(self.main.help)
if self.main.outlineexplorer is not None:
self.set_outlineexplorer(self.main.outlineexplorer)
editorstack = self.get_current_editorstack()
if not editorstack.data:
self.__load_temp_file()
self.add_dockwidget()
# Add modes to switcher
self.switcher_manager = EditorSwitcherManager(
self,
self.main.switcher,
lambda: self.get_current_editor(),
lambda: self.get_current_editorstack(),
section=self.get_plugin_title())
def update_font(self):
"""Update font from Preferences"""
font = self.get_font()
color_scheme = self.get_color_scheme()
for editorstack in self.editorstacks:
editorstack.set_default_font(font, color_scheme)
completion_size = CONF.get('main', 'completion/size')
for finfo in editorstack.data:
comp_widget = finfo.editor.completion_widget
kite_call_to_action = finfo.editor.kite_call_to_action
comp_widget.setup_appearance(completion_size, font)
kite_call_to_action.setFont(font)
def _create_checkable_action(self, text, conf_name, method=''):
"""Helper function to create a checkable action.
Args:
text (str): Text to be displayed in the action.
conf_name (str): configuration setting associated with the
action
method (str): name of EditorStack class that will be used
to update the changes in each editorstack.
"""
def toogle(checked):
self.switch_to_plugin()
self._toggle_checkable_action(checked, method, conf_name)
action = create_action(self, text, toggled=toogle)
action.blockSignals(True)
if conf_name not in ['pycodestyle', 'pydocstyle']:
action.setChecked(self.get_option(conf_name))
else:
action.setChecked(CONF.get('lsp-server', conf_name))
action.blockSignals(False)
return action
@Slot(bool, str, str)
def _toggle_checkable_action(self, checked, method_name, conf_name):
"""
Handle the toogle of a checkable action.
Update editorstacks, PyLS and CONF.
Args:
checked (bool): State of the action.
method_name (str): name of EditorStack class that will be used
to update the changes in each editorstack.
conf_name (str): configuration setting associated with the
action.
"""
if method_name:
if self.editorstacks:
for editorstack in self.editorstacks:
try:
method = getattr(editorstack, method_name)
method(checked)
except AttributeError as e:
logger.error(e, exc_info=True)
self.set_option(conf_name, checked)
else:
if conf_name in ('pycodestyle', 'pydocstyle'):
CONF.set('lsp-server', conf_name, checked)
completions = self.main.completions
completions.update_configuration()
#------ Focus tabwidget
def __get_focus_editorstack(self):
fwidget = QApplication.focusWidget()
if isinstance(fwidget, EditorStack):
return fwidget
else:
for editorstack in self.editorstacks:
if editorstack.isAncestorOf(fwidget):
return editorstack
def set_last_focus_editorstack(self, editorwindow, editorstack):
self.last_focus_editorstack[editorwindow] = editorstack
self.last_focus_editorstack[None] = editorstack # very last editorstack
def get_last_focus_editorstack(self, editorwindow=None):
return self.last_focus_editorstack[editorwindow]
def remove_last_focus_editorstack(self, editorstack):
for editorwindow, widget in list(self.last_focus_editorstack.items()):
if widget is editorstack:
self.last_focus_editorstack[editorwindow] = None
def save_focus_editorstack(self):
editorstack = self.__get_focus_editorstack()
if editorstack is not None:
for win in [self]+self.editorwindows:
if win.isAncestorOf(editorstack):
self.set_last_focus_editorstack(win, editorstack)
# ------ Handling editorstacks
def register_editorstack(self, editorstack):
self.editorstacks.append(editorstack)
self.register_widget_shortcuts(editorstack)
if self.isAncestorOf(editorstack):
# editorstack is a child of the Editor plugin
self.set_last_focus_editorstack(self, editorstack)
editorstack.set_closable( len(self.editorstacks) > 1 )
if self.outlineexplorer is not None:
editorstack.set_outlineexplorer(self.outlineexplorer.explorer)
editorstack.set_find_widget(self.find_widget)
editorstack.reset_statusbar.connect(self.readwrite_status.hide)
editorstack.reset_statusbar.connect(self.encoding_status.hide)
editorstack.reset_statusbar.connect(self.cursorpos_status.hide)
editorstack.readonly_changed.connect(
self.readwrite_status.update_readonly)
editorstack.encoding_changed.connect(
self.encoding_status.update_encoding)
editorstack.sig_editor_cursor_position_changed.connect(
self.cursorpos_status.update_cursor_position)
editorstack.sig_refresh_eol_chars.connect(
self.eol_status.update_eol)
editorstack.current_file_changed.connect(
self.vcs_status.update_vcs)
editorstack.file_saved.connect(
self.vcs_status.update_vcs_state)
editorstack.set_help(self.help)
editorstack.set_io_actions(self.new_action, self.open_action,
self.save_action, self.revert_action)
editorstack.set_tempfile_path(self.TEMPFILE_PATH)
settings = (
('set_todolist_enabled', 'todo_list'),
('set_blanks_enabled', 'blank_spaces'),
('set_underline_errors_enabled', 'underline_errors'),
('set_scrollpastend_enabled', 'scroll_past_end'),
('set_linenumbers_enabled', 'line_numbers'),
('set_edgeline_enabled', 'edge_line'),
('set_edgeline_columns', 'edge_line_columns'),
('set_indent_guides', 'indent_guides'),
('set_focus_to_editor', 'focus_to_editor'),
('set_run_cell_copy', 'run_cell_copy'),
('set_close_parentheses_enabled', 'close_parentheses'),
('set_close_quotes_enabled', 'close_quotes'),
('set_add_colons_enabled', 'add_colons'),
('set_auto_unindent_enabled', 'auto_unindent'),
('set_indent_chars', 'indent_chars'),
('set_tab_stop_width_spaces', 'tab_stop_width_spaces'),
('set_wrap_enabled', 'wrap'),
('set_tabmode_enabled', 'tab_always_indent'),
('set_stripmode_enabled', 'strip_trailing_spaces_on_modify'),
('set_intelligent_backspace_enabled', 'intelligent_backspace'),
('set_automatic_completions_enabled', 'automatic_completions'),
('set_automatic_completions_after_chars',
'automatic_completions_after_chars'),
('set_automatic_completions_after_ms',
'automatic_completions_after_ms'),
('set_completions_hint_enabled', 'completions_hint'),
('set_completions_hint_after_ms',
'completions_hint_after_ms'),
('set_highlight_current_line_enabled', 'highlight_current_line'),
('set_highlight_current_cell_enabled', 'highlight_current_cell'),
('set_occurrence_highlighting_enabled', 'occurrence_highlighting'),
('set_occurrence_highlighting_timeout', 'occurrence_highlighting/timeout'),
('set_checkeolchars_enabled', 'check_eol_chars'),
('set_tabbar_visible', 'show_tab_bar'),
('set_classfunc_dropdown_visible', 'show_class_func_dropdown'),
('set_always_remove_trailing_spaces', 'always_remove_trailing_spaces'),
('set_convert_eol_on_save', 'convert_eol_on_save'),
('set_convert_eol_on_save_to', 'convert_eol_on_save_to'),
)
for method, setting in settings:
getattr(editorstack, method)(self.get_option(setting))
editorstack.set_help_enabled(CONF.get('help', 'connect/editor'))
editorstack.set_hover_hints_enabled(CONF.get('lsp-server',
'enable_hover_hints'))
color_scheme = self.get_color_scheme()
editorstack.set_default_font(self.get_font(), color_scheme)
editorstack.starting_long_process.connect(self.starting_long_process)
editorstack.ending_long_process.connect(self.ending_long_process)
# Redirect signals
editorstack.sig_option_changed.connect(self.sig_option_changed)
editorstack.redirect_stdio.connect(
lambda state: self.redirect_stdio.emit(state))
editorstack.exec_in_extconsole.connect(
lambda text, option:
self.exec_in_extconsole.emit(text, option))
editorstack.run_cell_in_ipyclient.connect(
lambda code, cell_name, filename, run_cell_copy:
self.run_cell_in_ipyclient.emit(code, cell_name, filename,
run_cell_copy))
editorstack.debug_cell_in_ipyclient.connect(
lambda code, cell_name, filename, run_cell_copy:
self.debug_cell_in_ipyclient.emit(code, cell_name, filename,
run_cell_copy))
editorstack.update_plugin_title.connect(
lambda: self.sig_update_plugin_title.emit())
editorstack.editor_focus_changed.connect(self.save_focus_editorstack)
editorstack.editor_focus_changed.connect(self.main.plugin_focus_changed)
editorstack.editor_focus_changed.connect(self.sig_editor_focus_changed)
editorstack.zoom_in.connect(lambda: self.zoom(1))
editorstack.zoom_out.connect(lambda: self.zoom(-1))
editorstack.zoom_reset.connect(lambda: self.zoom(0))
editorstack.sig_open_file.connect(self.report_open_file)
editorstack.sig_new_file.connect(lambda s: self.new(text=s))
editorstack.sig_new_file[()].connect(self.new)
editorstack.sig_close_file.connect(self.close_file_in_all_editorstacks)
editorstack.file_saved.connect(self.file_saved_in_editorstack)
editorstack.file_renamed_in_data.connect(
self.file_renamed_in_data_in_editorstack)
editorstack.opened_files_list_changed.connect(
self.opened_files_list_changed)
editorstack.active_languages_stats.connect(
self.update_active_languages)
editorstack.sig_go_to_definition.connect(
lambda fname, line, col: self.load(
fname, line, start_column=col))
editorstack.sig_perform_completion_request.connect(
self.send_completion_request)
editorstack.todo_results_changed.connect(self.todo_results_changed)
editorstack.update_code_analysis_actions.connect(
self.update_code_analysis_actions)
editorstack.update_code_analysis_actions.connect(
self.update_todo_actions)
editorstack.refresh_file_dependent_actions.connect(
self.refresh_file_dependent_actions)
editorstack.refresh_save_all_action.connect(self.refresh_save_all_action)
editorstack.sig_refresh_eol_chars.connect(self.refresh_eol_chars)
editorstack.sig_breakpoints_saved.connect(self.breakpoints_saved)
editorstack.text_changed_at.connect(self.text_changed_at)
editorstack.current_file_changed.connect(self.current_file_changed)
editorstack.plugin_load.connect(self.load)
editorstack.plugin_load[()].connect(self.load)
editorstack.edit_goto.connect(self.load)
editorstack.sig_save_as.connect(self.save_as)
editorstack.sig_prev_edit_pos.connect(self.go_to_last_edit_location)
editorstack.sig_prev_cursor.connect(self.go_to_previous_cursor_position)
editorstack.sig_next_cursor.connect(self.go_to_next_cursor_position)
editorstack.sig_prev_warning.connect(self.go_to_previous_warning)
editorstack.sig_next_warning.connect(self.go_to_next_warning)
editorstack.sig_save_bookmark.connect(self.save_bookmark)
editorstack.sig_load_bookmark.connect(self.load_bookmark)
editorstack.sig_save_bookmarks.connect(self.save_bookmarks)
# Register editorstack's autosave component with plugin's autosave
# component
self.autosave.register_autosave_for_stack(editorstack.autosave)
def unregister_editorstack(self, editorstack):
"""Removing editorstack only if it's not the last remaining"""
self.remove_last_focus_editorstack(editorstack)
if len(self.editorstacks) > 1:
index = self.editorstacks.index(editorstack)
self.editorstacks.pop(index)
return True
else:
# editorstack was not removed!
return False
def clone_editorstack(self, editorstack):
editorstack.clone_from(self.editorstacks[0])
for finfo in editorstack.data:
self.register_widget_shortcuts(finfo.editor)
@Slot(str, str)
def close_file_in_all_editorstacks(self, editorstack_id_str, filename):
for editorstack in self.editorstacks:
if str(id(editorstack)) != editorstack_id_str:
editorstack.blockSignals(True)
index = editorstack.get_index_from_filename(filename)
editorstack.close_file(index, force=True)
editorstack.blockSignals(False)
@Slot(str, str, str)
def file_saved_in_editorstack(self, editorstack_id_str,
original_filename, filename):
"""A file was saved in editorstack, this notifies others"""
for editorstack in self.editorstacks:
if str(id(editorstack)) != editorstack_id_str:
editorstack.file_saved_in_other_editorstack(original_filename,
filename)
@Slot(str, str, str)
def file_renamed_in_data_in_editorstack(self, editorstack_id_str,
original_filename, filename):
"""A file was renamed in data in editorstack, this notifies others"""
for editorstack in self.editorstacks:
if str(id(editorstack)) != editorstack_id_str:
editorstack.rename_in_data(original_filename, filename)
#------ Handling editor windows
def setup_other_windows(self):
"""Setup toolbars and menus for 'New window' instances"""
self.toolbar_list = ((_("File toolbar"), "file_toolbar",
self.main.file_toolbar_actions),
(_("Search toolbar"), "search_toolbar",
self.main.search_menu_actions),
(_("Source toolbar"), "source_toolbar",
self.main.source_toolbar_actions),
(_("Run toolbar"), "run_toolbar",
self.main.run_toolbar_actions),
(_("Debug toolbar"), "debug_toolbar",
self.main.debug_toolbar_actions),
(_("Edit toolbar"), "edit_toolbar",
self.main.edit_toolbar_actions))
self.menu_list = ((_("&File"), self.main.file_menu_actions),
(_("&Edit"), self.main.edit_menu_actions),
(_("&Search"), self.main.search_menu_actions),
(_("Sour&ce"), self.main.source_menu_actions),
(_("&Run"), self.main.run_menu_actions),
(_("&Tools"), self.main.tools_menu_actions),
(_("&View"), []),
(_("&Help"), self.main.help_menu_actions))
# Create pending new windows:
for layout_settings in self.editorwindows_to_be_created:
win = self.create_new_window()
win.set_layout_settings(layout_settings)
def switch_to_plugin(self):
"""
Reimplemented method to deactivate shortcut when
opening a new window.
"""
if not self.editorwindows:
super(Editor, self).switch_to_plugin()
def create_new_window(self):
oe_options = self.outlineexplorer.explorer.get_options()
window = EditorMainWindow(
self, self.stack_menu_actions, self.toolbar_list, self.menu_list,
outline_explorer_options=oe_options)
window.add_toolbars_to_menu("&View", window.get_toolbars())
window.load_toolbars()
window.resize(self.size())
window.show()
window.editorwidget.editorsplitter.editorstack.new_window = True
self.register_editorwindow(window)
window.destroyed.connect(lambda: self.unregister_editorwindow(window))
return window
def register_editorwindow(self, window):
self.editorwindows.append(window)
def unregister_editorwindow(self, window):
self.editorwindows.pop(self.editorwindows.index(window))
#------ Accessors
def get_filenames(self):
return [finfo.filename for finfo in self.editorstacks[0].data]
def get_filename_index(self, filename):
return self.editorstacks[0].has_filename(filename)
def get_current_editorstack(self, editorwindow=None):
if self.editorstacks is not None:
if len(self.editorstacks) == 1:
editorstack = self.editorstacks[0]
else:
editorstack = self.__get_focus_editorstack()
if editorstack is None or editorwindow is not None:
editorstack = self.get_last_focus_editorstack(editorwindow)
if editorstack is None:
editorstack = self.editorstacks[0]
return editorstack
def get_current_editor(self):
editorstack = self.get_current_editorstack()
if editorstack is not None:
return editorstack.get_current_editor()
def get_current_finfo(self):
editorstack = self.get_current_editorstack()
if editorstack is not None:
return editorstack.get_current_finfo()
def get_current_filename(self):
editorstack = self.get_current_editorstack()
if editorstack is not None:
return editorstack.get_current_filename()
def is_file_opened(self, filename=None):
return self.editorstacks[0].is_file_opened(filename)
def set_current_filename(self, filename, editorwindow=None, focus=True):
"""Set focus to *filename* if this file has been opened.
Return the editor instance associated to *filename*.
"""
editorstack = self.get_current_editorstack(editorwindow)
return editorstack.set_current_filename(filename, focus)
def set_path(self):
# TODO: Fix this
for finfo in self.editorstacks[0].data:
finfo.path = self.main.get_spyder_pythonpath()
#if self.introspector:
# self.introspector.change_extra_path(
# self.main.get_spyder_pythonpath())
#------ Refresh methods
def refresh_file_dependent_actions(self):
"""Enable/disable file dependent actions
(only if dockwidget is visible)"""
if self.dockwidget and self.dockwidget.isVisible():
enable = self.get_current_editor() is not None
for action in self.file_dependent_actions:
action.setEnabled(enable)
def refresh_save_all_action(self):
"""Enable 'Save All' if there are files to be saved"""
editorstack = self.get_current_editorstack()
if editorstack:
state = any(finfo.editor.document().isModified() or finfo.newly_created
for finfo in editorstack.data)
self.save_all_action.setEnabled(state)
def update_warning_menu(self):
"""Update warning list menu"""
editor = self.get_current_editor()
check_results = editor.get_current_warnings()
self.warning_menu.clear()
filename = self.get_current_filename()
for message, line_number in check_results:
error = 'syntax' in message
text = message[:1].upper() + message[1:]
icon = ima.icon('error') if error else ima.icon('warning')
slot = lambda _checked, _l=line_number: self.load(filename, goto=_l)
action = create_action(self, text=text, icon=icon, triggered=slot)
self.warning_menu.addAction(action)
def update_todo_menu(self):
"""Update todo list menu"""
editorstack = self.get_current_editorstack()
results = editorstack.get_todo_results()
self.todo_menu.clear()
filename = self.get_current_filename()
for text, line0 in results:
icon = ima.icon('todo')
slot = lambda _checked, _l=line0: self.load(filename, goto=_l)
action = create_action(self, text=text, icon=icon, triggered=slot)
self.todo_menu.addAction(action)
self.update_todo_actions()
def todo_results_changed(self):
"""
Synchronize todo results between editorstacks
Refresh todo list navigation buttons
"""
editorstack = self.get_current_editorstack()
results = editorstack.get_todo_results()
index = editorstack.get_stack_index()
if index != -1:
filename = editorstack.data[index].filename
for other_editorstack in self.editorstacks:
if other_editorstack is not editorstack:
other_editorstack.set_todo_results(filename, results)
self.update_todo_actions()
def refresh_eol_chars(self, os_name):
os_name = to_text_string(os_name)
self.__set_eol_chars = False
if os_name == 'nt':
self.win_eol_action.setChecked(True)
elif os_name == 'posix':
self.linux_eol_action.setChecked(True)
else:
self.mac_eol_action.setChecked(True)
self.__set_eol_chars = True
#------ Slots
def opened_files_list_changed(self):
"""
Opened files list has changed:
--> open/close file action
--> modification ('*' added to title)
--> current edited file has changed
"""
# Refresh Python file dependent actions:
editor = self.get_current_editor()
if editor:
python_enable = editor.is_python()
cython_enable = python_enable or (
programs.is_module_installed('Cython') and editor.is_cython())
for action in self.pythonfile_dependent_actions:
if action in self.cythonfile_compatible_actions:
enable = cython_enable
else:
enable = python_enable
if action is self.winpdb_action:
action.setEnabled(enable and WINPDB_PATH is not None)
else:
action.setEnabled(enable)
self.open_file_update.emit(self.get_current_filename())
def update_code_analysis_actions(self):
"""Update actions in the warnings menu."""
editor = self.get_current_editor()
# To fix an error at startup
if editor is None:
return
results = editor.get_current_warnings()
# Update code analysis actions
state = results is not None and len(results)
for action in (self.warning_list_action, self.previous_warning_action,
self.next_warning_action):
if state is not None:
action.setEnabled(state)
def update_todo_actions(self):
editorstack = self.get_current_editorstack()
results = editorstack.get_todo_results()
state = (self.get_option('todo_list') and
results is not None and len(results))
if state is not None:
self.todo_list_action.setEnabled(state)
def rehighlight_cells(self):
"""Rehighlight cells of current editor"""
editor = self.get_current_editor()
editor.rehighlight_cells()
QApplication.processEvents()
@Slot(set)
def update_active_languages(self, languages):
self.main.completions.update_client_status(languages)
# ------ Bookmarks
def save_bookmarks(self, filename, bookmarks):
"""Receive bookmark changes and save them."""
filename = to_text_string(filename)
bookmarks = to_text_string(bookmarks)
filename = osp.normpath(osp.abspath(filename))
bookmarks = eval(bookmarks)
save_bookmarks(filename, bookmarks)
#------ File I/O
def __load_temp_file(self):
"""Load temporary file from a text file in user home directory"""
if not osp.isfile(self.TEMPFILE_PATH):
# Creating temporary file
default = ['# -*- coding: utf-8 -*-',
'"""', _("Spyder Editor"), '',
_("This is a temporary script file."),
'"""', '', '']
text = os.linesep.join([encoding.to_unicode(qstr)
for qstr in default])
try:
encoding.write(to_text_string(text), self.TEMPFILE_PATH,
'utf-8')
except EnvironmentError:
self.new()
return
self.load(self.TEMPFILE_PATH)
@Slot()
def __set_workdir(self):
"""Set current script directory as working directory"""
fname = self.get_current_filename()
if fname is not None:
directory = osp.dirname(osp.abspath(fname))
self.open_dir.emit(directory)
def __add_recent_file(self, fname):
"""Add to recent file list"""
if fname is None:
return
if fname in self.recent_files:
self.recent_files.remove(fname)
self.recent_files.insert(0, fname)
if len(self.recent_files) > self.get_option('max_recent_files'):
self.recent_files.pop(-1)
def _clone_file_everywhere(self, finfo):
"""Clone file (*src_editor* widget) in all editorstacks
Cloning from the first editorstack in which every single new editor
is created (when loading or creating a new file)"""
for editorstack in self.editorstacks[1:]:
editor = editorstack.clone_editor_from(finfo, set_current=False)
self.register_widget_shortcuts(editor)
@Slot()
@Slot(str)
def new(self, fname=None, editorstack=None, text=None):
"""
Create a new file - Untitled
fname=None --> fname will be 'untitledXX.py' but do not create file
fname=<basestring> --> create file
"""
# If no text is provided, create default content
empty = False
try:
if text is None:
default_content = True
text, enc = encoding.read(self.TEMPLATE_PATH)
enc_match = re.search(r'-*- coding: ?([a-z0-9A-Z\-]*) -*-',
text)
if enc_match:
enc = enc_match.group(1)
# Initialize template variables
# Windows
username = encoding.to_unicode_from_fs(
os.environ.get('USERNAME', ''))
# Linux, Mac OS X
if not username:
username = encoding.to_unicode_from_fs(
os.environ.get('USER', '-'))
VARS = {
'date': time.ctime(),
'username': username,
}
try:
text = text % VARS
except Exception:
pass
else:
default_content = False
enc = encoding.read(self.TEMPLATE_PATH)[1]
except (IOError, OSError):
text = ''
enc = 'utf-8'
default_content = True
empty = True
create_fname = lambda n: to_text_string(_("untitled")) + ("%d.py" % n)
# Creating editor widget
if editorstack is None:
current_es = self.get_current_editorstack()
else:
current_es = editorstack
created_from_here = fname is None
if created_from_here:
while True:
fname = create_fname(self.untitled_num)
self.untitled_num += 1
if not osp.isfile(fname):
break
basedir = getcwd_or_home()
if self.main.projects.get_active_project() is not None:
basedir = self.main.projects.get_active_project_path()
else:
c_fname = self.get_current_filename()
if c_fname is not None and c_fname != self.TEMPFILE_PATH:
basedir = osp.dirname(c_fname)
fname = osp.abspath(osp.join(basedir, fname))
else:
# QString when triggered by a Qt signal
fname = osp.abspath(to_text_string(fname))
index = current_es.has_filename(fname)
if index is not None and not current_es.close_file(index):
return
# Creating the editor widget in the first editorstack (the one that
# can't be destroyed), then cloning this editor widget in all other
# editorstacks:
finfo = self.editorstacks[0].new(fname, enc, text, default_content,
empty)
finfo.path = self.main.get_spyder_pythonpath()
self._clone_file_everywhere(finfo)
current_editor = current_es.set_current_filename(finfo.filename)
self.register_widget_shortcuts(current_editor)
if not created_from_here:
self.save(force=True)
def edit_template(self):
"""Edit new file template"""
self.load(self.TEMPLATE_PATH)
def update_recent_file_menu(self):
"""Update recent file menu"""
recent_files = []
for fname in self.recent_files:
if osp.isfile(fname):
recent_files.append(fname)
self.recent_file_menu.clear()
if recent_files:
for fname in recent_files:
action = create_action(
self, fname,
icon=ima.get_icon_by_extension_or_type(
fname, scale_factor=1.0),
triggered=self.load)
action.setData(to_qvariant(fname))
self.recent_file_menu.addAction(action)
self.clear_recent_action.setEnabled(len(recent_files) > 0)
add_actions(self.recent_file_menu, (None, self.max_recent_action,
self.clear_recent_action))
@Slot()
def clear_recent_files(self):
"""Clear recent files list"""
self.recent_files = []
@Slot()
def change_max_recent_files(self):
"Change max recent files entries"""
editorstack = self.get_current_editorstack()
mrf, valid = QInputDialog.getInt(editorstack, _('Editor'),
_('Maximum number of recent files'),
self.get_option('max_recent_files'), 1, 35)
if valid:
self.set_option('max_recent_files', mrf)
@Slot()
@Slot(str)
@Slot(str, int, str)
@Slot(str, int, str, object)
def load(self, filenames=None, goto=None, word='',
editorwindow=None, processevents=True, start_column=None,
set_focus=True, add_where='end'):
"""
Load a text file
editorwindow: load in this editorwindow (useful when clicking on
outline explorer with multiple editor windows)
processevents: determines if processEvents() should be called at the
end of this method (set to False to prevent keyboard events from
creeping through to the editor during debugging)
"""
# Switch to editor before trying to load a file
try:
self.switch_to_plugin()
except AttributeError:
pass
editor0 = self.get_current_editor()
if editor0 is not None:
position0 = editor0.get_position('cursor')
filename0 = self.get_current_filename()
else:
position0, filename0 = None, None
if not filenames:
# Recent files action
action = self.sender()
if isinstance(action, QAction):
filenames = from_qvariant(action.data(), to_text_string)
if not filenames:
basedir = getcwd_or_home()
if self.edit_filetypes is None:
self.edit_filetypes = get_edit_filetypes()
if self.edit_filters is None:
self.edit_filters = get_edit_filters()
c_fname = self.get_current_filename()
if c_fname is not None and c_fname != self.TEMPFILE_PATH:
basedir = osp.dirname(c_fname)
self.redirect_stdio.emit(False)
parent_widget = self.get_current_editorstack()
if filename0 is not None:
selectedfilter = get_filter(self.edit_filetypes,
osp.splitext(filename0)[1])
else:
selectedfilter = ''
if not running_under_pytest():
filenames, _sf = getopenfilenames(
parent_widget,
_("Open file"), basedir,
self.edit_filters,
selectedfilter=selectedfilter,
options=QFileDialog.HideNameFilterDetails)
else:
# Use a Qt (i.e. scriptable) dialog for pytest
dialog = QFileDialog(parent_widget, _("Open file"),
options=QFileDialog.DontUseNativeDialog)
if dialog.exec_():
filenames = dialog.selectedFiles()
self.redirect_stdio.emit(True)
if filenames:
filenames = [osp.normpath(fname) for fname in filenames]
else:
return
focus_widget = QApplication.focusWidget()
if self.editorwindows and not self.dockwidget.isVisible():
# We override the editorwindow variable to force a focus on
# the editor window instead of the hidden editor dockwidget.
# See spyder-ide/spyder#5742.
if editorwindow not in self.editorwindows:
editorwindow = self.editorwindows[0]
editorwindow.setFocus()
editorwindow.raise_()
elif (self.dockwidget and not self._ismaximized
and not self.dockwidget.isAncestorOf(focus_widget)
and not isinstance(focus_widget, CodeEditor)):
self.switch_to_plugin()
def _convert(fname):
fname = osp.abspath(encoding.to_unicode_from_fs(fname))
if os.name == 'nt' and len(fname) >= 2 and fname[1] == ':':
fname = fname[0].upper()+fname[1:]
return fname
if hasattr(filenames, 'replaceInStrings'):
# This is a QStringList instance (PyQt API #1), converting to list:
filenames = list(filenames)
if not isinstance(filenames, list):
filenames = [_convert(filenames)]
else:
filenames = [_convert(fname) for fname in list(filenames)]
if isinstance(goto, int):
goto = [goto]
elif goto is not None and len(goto) != len(filenames):
goto = None
for index, filename in enumerate(filenames):
# -- Do not open an already opened file
focus = set_focus and index == 0
current_editor = self.set_current_filename(filename,
editorwindow,
focus=focus)
if current_editor is None:
# -- Not a valid filename:
if not osp.isfile(filename):
continue
# --
current_es = self.get_current_editorstack(editorwindow)
# Creating the editor widget in the first editorstack
# (the one that can't be destroyed), then cloning this
# editor widget in all other editorstacks:
finfo = self.editorstacks[0].load(
filename, set_current=False, add_where=add_where,
processevents=processevents)
finfo.path = self.main.get_spyder_pythonpath()
self._clone_file_everywhere(finfo)
current_editor = current_es.set_current_filename(filename,
focus=focus)
current_editor.debugger.load_breakpoints()
current_editor.set_bookmarks(load_bookmarks(filename))
self.register_widget_shortcuts(current_editor)
current_es.analyze_script()
self.__add_recent_file(filename)
if goto is not None: # 'word' is assumed to be None as well
current_editor.go_to_line(goto[index], word=word,
start_column=start_column)
position = current_editor.get_position('cursor')
self.cursor_moved(filename0, position0, filename, position)
current_editor.clearFocus()
current_editor.setFocus()
current_editor.window().raise_()
if processevents:
QApplication.processEvents()
else:
# processevents is false only when calling from debugging
current_editor.sig_debug_stop.emit(goto[index])
current_sw = self.main.ipyconsole.get_current_shellwidget()
current_sw.sig_prompt_ready.connect(
current_editor.sig_debug_stop[()].emit)
@Slot()
def print_file(self):
"""Print current file"""
editor = self.get_current_editor()
filename = self.get_current_filename()
printer = Printer(mode=QPrinter.HighResolution,
header_font=self.get_font())
printDialog = QPrintDialog(printer, editor)
if editor.has_selected_text():
printDialog.setOption(QAbstractPrintDialog.PrintSelection, True)
self.redirect_stdio.emit(False)
answer = printDialog.exec_()
self.redirect_stdio.emit(True)
if answer == QDialog.Accepted:
self.starting_long_process(_("Printing..."))
printer.setDocName(filename)
editor.print_(printer)
self.ending_long_process()
@Slot()
def print_preview(self):
"""Print preview for current file"""
from qtpy.QtPrintSupport import QPrintPreviewDialog
editor = self.get_current_editor()
printer = Printer(mode=QPrinter.HighResolution,
header_font=self.get_font())
preview = QPrintPreviewDialog(printer, self)
preview.setWindowFlags(Qt.Window)
preview.paintRequested.connect(lambda printer: editor.print_(printer))
self.redirect_stdio.emit(False)
preview.exec_()
self.redirect_stdio.emit(True)
@Slot()
def close_file(self):
"""Close current file"""
editorstack = self.get_current_editorstack()
editorstack.close_file()
@Slot()
def close_all_files(self):
"""Close all opened scripts"""
self.editorstacks[0].close_all_files()
@Slot()
def save(self, index=None, force=False):
"""Save file"""
editorstack = self.get_current_editorstack()
return editorstack.save(index=index, force=force)
@Slot()
def save_as(self):
"""Save *as* the currently edited file"""
editorstack = self.get_current_editorstack()
if editorstack.save_as():
fname = editorstack.get_current_filename()
self.__add_recent_file(fname)
@Slot()
def save_copy_as(self):
"""Save *copy as* the currently edited file"""
editorstack = self.get_current_editorstack()
editorstack.save_copy_as()
@Slot()
def save_all(self, save_new_files=True):
"""Save all opened files"""
self.get_current_editorstack().save_all(save_new_files=save_new_files)
@Slot()
def revert(self):
"""Revert the currently edited file from disk"""
editorstack = self.get_current_editorstack()
editorstack.revert()
@Slot()
def find(self):
"""Find slot"""
editorstack = self.get_current_editorstack()
editorstack.find_widget.show()
editorstack.find_widget.search_text.setFocus()
@Slot()
def find_next(self):
"""Fnd next slot"""
editorstack = self.get_current_editorstack()
editorstack.find_widget.find_next()
@Slot()
def find_previous(self):
"""Find previous slot"""
editorstack = self.get_current_editorstack()
editorstack.find_widget.find_previous()
@Slot()
def replace(self):
"""Replace slot"""
editorstack = self.get_current_editorstack()
editorstack.find_widget.show_replace()
def open_last_closed(self):
""" Reopens the last closed tab."""
editorstack = self.get_current_editorstack()
last_closed_files = editorstack.get_last_closed_files()
if (len(last_closed_files) > 0):
file_to_open = last_closed_files[0]
last_closed_files.remove(file_to_open)
editorstack.set_last_closed_files(last_closed_files)
self.load(file_to_open)
#------ Explorer widget
def close_file_from_name(self, filename):
"""Close file from its name"""
filename = osp.abspath(to_text_string(filename))
index = self.editorstacks[0].has_filename(filename)
if index is not None:
self.editorstacks[0].close_file(index)
def removed(self, filename):
"""File was removed in file explorer widget or in project explorer"""
self.close_file_from_name(filename)
def removed_tree(self, dirname):
"""Directory was removed in project explorer widget"""
dirname = osp.abspath(to_text_string(dirname))
for fname in self.get_filenames():
if osp.abspath(fname).startswith(dirname):
self.close_file_from_name(fname)
def renamed(self, source, dest):
"""
Propagate file rename to editor stacks and autosave component.
This function is called when a file is renamed in the file explorer
widget or the project explorer.
"""
filename = osp.abspath(to_text_string(source))
index = self.editorstacks[0].has_filename(filename)
if index is not None:
for editorstack in self.editorstacks:
editorstack.rename_in_data(filename,
new_filename=to_text_string(dest))
self.editorstacks[0].autosave.file_renamed(
filename, to_text_string(dest))
def renamed_tree(self, source, dest):
"""Directory was renamed in file explorer or in project explorer."""
dirname = osp.abspath(to_text_string(source))
tofile = to_text_string(dest)
for fname in self.get_filenames():
if osp.abspath(fname).startswith(dirname):
new_filename = fname.replace(dirname, tofile)
self.renamed(source=fname, dest=new_filename)
#------ Source code
@Slot()
def indent(self):
"""Indent current line or selection"""
editor = self.get_current_editor()
if editor is not None:
editor.indent()
@Slot()
def unindent(self):
"""Unindent current line or selection"""
editor = self.get_current_editor()
if editor is not None:
editor.unindent()
@Slot()
def text_uppercase (self):
"""Change current line or selection to uppercase."""
editor = self.get_current_editor()
if editor is not None:
editor.transform_to_uppercase()
@Slot()
def text_lowercase(self):
"""Change current line or selection to lowercase."""
editor = self.get_current_editor()
if editor is not None:
editor.transform_to_lowercase()
@Slot()
def toggle_comment(self):
"""Comment current line or selection"""
editor = self.get_current_editor()
if editor is not None:
editor.toggle_comment()
@Slot()
def blockcomment(self):
"""Block comment current line or selection"""
editor = self.get_current_editor()
if editor is not None:
editor.blockcomment()
@Slot()
def unblockcomment(self):
"""Un-block comment current line or selection"""
editor = self.get_current_editor()
if editor is not None:
editor.unblockcomment()
@Slot()
def go_to_next_todo(self):
self.switch_to_plugin()
editor = self.get_current_editor()
position = editor.go_to_next_todo()
filename = self.get_current_filename()
self.add_cursor_position_to_history(filename, position)
@Slot()
def go_to_next_warning(self):
self.switch_to_plugin()
editor = self.get_current_editor()
position = editor.go_to_next_warning()
filename = self.get_current_filename()
self.add_cursor_position_to_history(filename, position)
@Slot()
def go_to_previous_warning(self):
self.switch_to_plugin()
editor = self.get_current_editor()
position = editor.go_to_previous_warning()
filename = self.get_current_filename()
self.add_cursor_position_to_history(filename, position)
@Slot()
def run_winpdb(self):
"""Run winpdb to debug current file"""
if self.save():
fname = self.get_current_filename()
runconf = get_run_configuration(fname)
if runconf is None:
args = []
wdir = None
else:
args = runconf.get_arguments().split()
wdir = runconf.get_working_directory()
# Handle the case where wdir comes back as an empty string
# when the working directory dialog checkbox is unchecked.
# (subprocess "cwd" default is None, so empty str
# must be changed to None in this case.)
programs.run_program(WINPDB_PATH, [fname] + args, cwd=wdir or None)
def toggle_eol_chars(self, os_name, checked):
if checked:
editor = self.get_current_editor()
if self.__set_eol_chars:
self.switch_to_plugin()
editor.set_eol_chars(sourcecode.get_eol_chars_from_os_name(os_name))
@Slot()
def remove_trailing_spaces(self):
self.switch_to_plugin()
editorstack = self.get_current_editorstack()
editorstack.remove_trailing_spaces()
@Slot()
def fix_indentation(self):
self.switch_to_plugin()
editorstack = self.get_current_editorstack()
editorstack.fix_indentation()
#------ Cursor position history management
def update_cursorpos_actions(self):
self.previous_edit_cursor_action.setEnabled(
self.last_edit_cursor_pos is not None)
self.previous_cursor_action.setEnabled(
self.cursor_pos_index is not None and self.cursor_pos_index > 0)
self.next_cursor_action.setEnabled(self.cursor_pos_index is not None \
and self.cursor_pos_index < len(self.cursor_pos_history)-1)
def add_cursor_position_to_history(self, filename, position, fc=False):
if self.__ignore_cursor_position:
return
for index, (fname, pos) in enumerate(self.cursor_pos_history[:]):
if fname == filename:
if pos == position or pos == 0:
if fc:
self.cursor_pos_history[index] = (filename, position)
self.cursor_pos_index = index
self.update_cursorpos_actions()
return
else:
if self.cursor_pos_index >= index:
self.cursor_pos_index -= 1
self.cursor_pos_history.pop(index)
break
if self.cursor_pos_index is not None:
self.cursor_pos_history = \
self.cursor_pos_history[:self.cursor_pos_index+1]
self.cursor_pos_history.append((filename, position))
self.cursor_pos_index = len(self.cursor_pos_history)-1
self.update_cursorpos_actions()
def cursor_moved(self, filename0, position0, filename1, position1):
"""Cursor was just moved: 'go to'"""
if position0 is not None:
self.add_cursor_position_to_history(filename0, position0)
self.add_cursor_position_to_history(filename1, position1)
def text_changed_at(self, filename, position):
self.last_edit_cursor_pos = (to_text_string(filename), position)
def current_file_changed(self, filename, position):
self.add_cursor_position_to_history(to_text_string(filename), position,
fc=True)
# Hide any open tooltips
current_stack = self.get_current_editorstack()
if current_stack is not None:
current_stack.hide_tooltip()
@Slot()
def go_to_last_edit_location(self):
if self.last_edit_cursor_pos is not None:
filename, position = self.last_edit_cursor_pos
if not osp.isfile(filename):
self.last_edit_cursor_pos = None
return
else:
self.load(filename)
editor = self.get_current_editor()
if position < editor.document().characterCount():
editor.set_cursor_position(position)
def __move_cursor_position(self, index_move):
"""
Move the cursor position forward or backward in the cursor
position history by the specified index increment.
"""
if self.cursor_pos_index is None:
return
filename, _position = self.cursor_pos_history[self.cursor_pos_index]
self.cursor_pos_history[self.cursor_pos_index] = (
filename, self.get_current_editor().get_position('cursor'))
self.__ignore_cursor_position = True
old_index = self.cursor_pos_index
self.cursor_pos_index = min(len(self.cursor_pos_history) - 1,
max(0, self.cursor_pos_index + index_move))
filename, position = self.cursor_pos_history[self.cursor_pos_index]
filenames = self.get_current_editorstack().get_filenames()
if not osp.isfile(filename) and filename not in filenames:
self.cursor_pos_history.pop(self.cursor_pos_index)
if self.cursor_pos_index <= old_index:
old_index -= 1
self.cursor_pos_index = old_index
else:
self.load(filename)
editor = self.get_current_editor()
if position < editor.document().characterCount():
editor.set_cursor_position(position)
self.__ignore_cursor_position = False
self.update_cursorpos_actions()
@Slot()
def go_to_previous_cursor_position(self):
self.switch_to_plugin()
self.__move_cursor_position(-1)
@Slot()
def go_to_next_cursor_position(self):
self.switch_to_plugin()
self.__move_cursor_position(1)
@Slot()
def go_to_line(self, line=None):
"""Open 'go to line' dialog"""
editorstack = self.get_current_editorstack()
if editorstack is not None:
editorstack.go_to_line(line)
@Slot()
def set_or_clear_breakpoint(self):
"""Set/Clear breakpoint"""
editorstack = self.get_current_editorstack()
if editorstack is not None:
self.switch_to_plugin()
editorstack.set_or_clear_breakpoint()
@Slot()
def set_or_edit_conditional_breakpoint(self):
"""Set/Edit conditional breakpoint"""
editorstack = self.get_current_editorstack()
if editorstack is not None:
self.switch_to_plugin()
editorstack.set_or_edit_conditional_breakpoint()
@Slot()
def clear_all_breakpoints(self):
"""Clear breakpoints in all files"""
self.switch_to_plugin()
clear_all_breakpoints()
self.breakpoints_saved.emit()
editorstack = self.get_current_editorstack()
if editorstack is not None:
for data in editorstack.data:
data.editor.debugger.clear_breakpoints()
self.refresh_plugin()
def clear_breakpoint(self, filename, lineno):
"""Remove a single breakpoint"""
clear_breakpoint(filename, lineno)
self.breakpoints_saved.emit()
editorstack = self.get_current_editorstack()
if editorstack is not None:
index = self.is_file_opened(filename)
if index is not None:
editorstack.data[index].editor.debugger.toogle_breakpoint(
lineno)
def debug_command(self, command):
"""Debug actions"""
self.switch_to_plugin()
self.main.ipyconsole.pdb_execute(command, hidden=True, echo_code=False)
focus_widget = self.main.ipyconsole.get_focus_widget()
if focus_widget:
focus_widget.setFocus()
#------ Run Python script
@Slot()
def edit_run_configurations(self):
dialog = RunConfigDialog(self)
dialog.size_change.connect(lambda s: self.set_dialog_size(s))
if self.dialog_size is not None:
dialog.resize(self.dialog_size)
fname = osp.abspath(self.get_current_filename())
dialog.setup(fname)
if dialog.exec_():
fname = dialog.file_to_run
if fname is not None:
self.load(fname)
self.run_file()
@Slot()
def run_file(self, debug=False):
"""Run script inside current interpreter or in a new one"""
editorstack = self.get_current_editorstack()
if editorstack.save(save_new_files=False):
editor = self.get_current_editor()
fname = osp.abspath(self.get_current_filename())
# Get fname's dirname before we escape the single and double
# quotes. Fixes spyder-ide/spyder#6771.
dirname = osp.dirname(fname)
# Escape single and double quotes in fname and dirname.
# Fixes spyder-ide/spyder#2158.
fname = fname.replace("'", r"\'").replace('"', r'\"')
dirname = dirname.replace("'", r"\'").replace('"', r'\"')
runconf = get_run_configuration(fname)
if runconf is None:
dialog = RunConfigOneDialog(self)
dialog.size_change.connect(lambda s: self.set_dialog_size(s))
if self.dialog_size is not None:
dialog.resize(self.dialog_size)
dialog.setup(fname)
if CONF.get('run', 'open_at_least_once',
not running_under_pytest()):
# Open Run Config dialog at least once: the first time
# a script is ever run in Spyder, so that the user may
# see it at least once and be conscious that it exists
show_dlg = True
CONF.set('run', 'open_at_least_once', False)
else:
# Open Run Config dialog only
# if ALWAYS_OPEN_FIRST_RUN_OPTION option is enabled
show_dlg = CONF.get('run', ALWAYS_OPEN_FIRST_RUN_OPTION)
if show_dlg and not dialog.exec_():
return
runconf = dialog.get_configuration()
args = runconf.get_arguments()
python_args = runconf.get_python_arguments()
interact = runconf.interact
post_mortem = runconf.post_mortem
current = runconf.current
systerm = runconf.systerm
clear_namespace = runconf.clear_namespace
console_namespace = runconf.console_namespace
if runconf.file_dir:
wdir = dirname
elif runconf.cw_dir:
wdir = ''
elif osp.isdir(runconf.dir):
wdir = runconf.dir
else:
wdir = ''
python = True # Note: in the future, it may be useful to run
# something in a terminal instead of a Python interp.
self.__last_ec_exec = (fname, wdir, args, interact, debug,
python, python_args, current, systerm,
post_mortem, clear_namespace,
console_namespace)
self.re_run_file(save_new_files=False)
if not interact and not debug:
# If external console dockwidget is hidden, it will be
# raised in top-level and so focus will be given to the
# current external shell automatically
# (see SpyderPluginWidget.visibility_changed method)
editor.setFocus()
def set_dialog_size(self, size):
self.dialog_size = size
@Slot()
def debug_file(self):
"""Debug current script"""
self.switch_to_plugin()
current_editor = self.get_current_editor()
if current_editor is not None:
current_editor.sig_debug_start.emit()
self.run_file(debug=True)
@Slot()
def re_run_file(self, save_new_files=True):
"""Re-run last script"""
if self.get_option('save_all_before_run'):
self.save_all(save_new_files=save_new_files)
if self.__last_ec_exec is None:
return
(fname, wdir, args, interact, debug,
python, python_args, current, systerm,
post_mortem, clear_namespace,
console_namespace) = self.__last_ec_exec
if not systerm:
self.run_in_current_ipyclient.emit(fname, wdir, args,
debug, post_mortem,
current, clear_namespace,
console_namespace)
else:
self.main.open_external_console(fname, wdir, args, interact,
debug, python, python_args,
systerm, post_mortem)
@Slot()
def run_selection(self):
"""Run selection or current line in external console"""
editorstack = self.get_current_editorstack()
editorstack.run_selection()
@Slot()
def run_cell(self):
"""Run current cell"""
editorstack = self.get_current_editorstack()
editorstack.run_cell()
@Slot()
def run_cell_and_advance(self):
"""Run current cell and advance to the next one"""
editorstack = self.get_current_editorstack()
editorstack.run_cell_and_advance()
@Slot()
def debug_cell(self):
'''Debug Current cell.'''
editorstack = self.get_current_editorstack()
editorstack.debug_cell()
@Slot()
def re_run_last_cell(self):
"""Run last executed cell."""
editorstack = self.get_current_editorstack()
editorstack.re_run_last_cell()
# ------ Code bookmarks
@Slot(int)
def save_bookmark(self, slot_num):
"""Save current line and position as bookmark."""
bookmarks = CONF.get('editor', 'bookmarks')
editorstack = self.get_current_editorstack()
if slot_num in bookmarks:
filename, line_num, column = bookmarks[slot_num]
if osp.isfile(filename):
index = editorstack.has_filename(filename)
if index is not None:
block = (editorstack.tabs.widget(index).document()
.findBlockByNumber(line_num))
block.userData().bookmarks.remove((slot_num, column))
if editorstack is not None:
self.switch_to_plugin()
editorstack.set_bookmark(slot_num)
@Slot(int)
def load_bookmark(self, slot_num):
"""Set cursor to bookmarked file and position."""
bookmarks = CONF.get('editor', 'bookmarks')
if slot_num in bookmarks:
filename, line_num, column = bookmarks[slot_num]
else:
return
if not osp.isfile(filename):
self.last_edit_cursor_pos = None
return
self.load(filename)
editor = self.get_current_editor()
if line_num < editor.document().lineCount():
linelength = len(editor.document()
.findBlockByNumber(line_num).text())
if column <= linelength:
editor.go_to_line(line_num + 1, column)
else:
# Last column
editor.go_to_line(line_num + 1, linelength)
#------ Zoom in/out/reset
def zoom(self, factor):
"""Zoom in/out/reset"""
editor = self.get_current_editorstack().get_current_editor()
if factor == 0:
font = self.get_font()
editor.set_font(font)
else:
font = editor.font()
size = font.pointSize() + factor
if size > 0:
font.setPointSize(size)
editor.set_font(font)
editor.update_tab_stop_width_spaces()
#------ Options
def apply_plugin_settings(self, options):
"""Apply configuration file's plugin settings"""
if self.editorstacks is not None:
# --- syntax highlight and text rendering settings
color_scheme_n = 'color_scheme_name'
color_scheme_o = self.get_color_scheme()
currentline_n = 'highlight_current_line'
currentline_o = self.get_option(currentline_n)
currentcell_n = 'highlight_current_cell'
currentcell_o = self.get_option(currentcell_n)
occurrence_n = 'occurrence_highlighting'
occurrence_o = self.get_option(occurrence_n)
occurrence_timeout_n = 'occurrence_highlighting/timeout'
occurrence_timeout_o = self.get_option(occurrence_timeout_n)
focus_to_editor_n = 'focus_to_editor'
focus_to_editor_o = self.get_option(focus_to_editor_n)
for editorstack in self.editorstacks:
if color_scheme_n in options:
editorstack.set_color_scheme(color_scheme_o)
if currentline_n in options:
editorstack.set_highlight_current_line_enabled(
currentline_o)
if currentcell_n in options:
editorstack.set_highlight_current_cell_enabled(
currentcell_o)
if occurrence_n in options:
editorstack.set_occurrence_highlighting_enabled(occurrence_o)
if occurrence_timeout_n in options:
editorstack.set_occurrence_highlighting_timeout(
occurrence_timeout_o)
if focus_to_editor_n in options:
editorstack.set_focus_to_editor(focus_to_editor_o)
# --- everything else
tabbar_n = 'show_tab_bar'
tabbar_o = self.get_option(tabbar_n)
classfuncdropdown_n = 'show_class_func_dropdown'
classfuncdropdown_o = self.get_option(classfuncdropdown_n)
linenb_n = 'line_numbers'
linenb_o = self.get_option(linenb_n)
blanks_n = 'blank_spaces'
blanks_o = self.get_option(blanks_n)
scrollpastend_n = 'scroll_past_end'
scrollpastend_o = self.get_option(scrollpastend_n)
edgeline_n = 'edge_line'
edgeline_o = self.get_option(edgeline_n)
edgelinecols_n = 'edge_line_columns'
edgelinecols_o = self.get_option(edgelinecols_n)
wrap_n = 'wrap'
wrap_o = self.get_option(wrap_n)
indentguides_n = 'indent_guides'
indentguides_o = self.get_option(indentguides_n)
tabindent_n = 'tab_always_indent'
tabindent_o = self.get_option(tabindent_n)
stripindent_n = 'strip_trailing_spaces_on_modify'
stripindent_o = self.get_option(stripindent_n)
ibackspace_n = 'intelligent_backspace'
ibackspace_o = self.get_option(ibackspace_n)
autocompletions_n = 'automatic_completions'
autocompletions_o = self.get_option(autocompletions_n)
completionshint_n = 'completions_hint'
completionshint_o = self.get_option(completionshint_n)
removetrail_n = 'always_remove_trailing_spaces'
removetrail_o = self.get_option(removetrail_n)
converteol_n = 'convert_eol_on_save'
converteol_o = self.get_option(converteol_n)
converteolto_n = 'convert_eol_on_save_to'
converteolto_o = self.get_option(converteolto_n)
runcellcopy_n = 'run_cell_copy'
runcellcopy_o = self.get_option(runcellcopy_n)
closepar_n = 'close_parentheses'
closepar_o = self.get_option(closepar_n)
close_quotes_n = 'close_quotes'
close_quotes_o = self.get_option(close_quotes_n)
add_colons_n = 'add_colons'
add_colons_o = self.get_option(add_colons_n)
autounindent_n = 'auto_unindent'
autounindent_o = self.get_option(autounindent_n)
indent_chars_n = 'indent_chars'
indent_chars_o = self.get_option(indent_chars_n)
tab_stop_width_spaces_n = 'tab_stop_width_spaces'
tab_stop_width_spaces_o = self.get_option(tab_stop_width_spaces_n)
help_n = 'connect_to_oi'
help_o = CONF.get('help', 'connect/editor')
todo_n = 'todo_list'
todo_o = self.get_option(todo_n)
finfo = self.get_current_finfo()
for editorstack in self.editorstacks:
# Checkable options
if blanks_n in options:
editorstack.set_blanks_enabled(blanks_o)
if scrollpastend_n in options:
editorstack.set_scrollpastend_enabled(scrollpastend_o)
if indentguides_n in options:
editorstack.set_indent_guides(indentguides_o)
if classfuncdropdown_n in options:
editorstack.set_classfunc_dropdown_visible(
classfuncdropdown_o)
if tabbar_n in options:
editorstack.set_tabbar_visible(tabbar_o)
if linenb_n in options:
editorstack.set_linenumbers_enabled(linenb_o,
current_finfo=finfo)
if autocompletions_n in options:
editorstack.set_automatic_completions_enabled(
autocompletions_o)
if completionshint_n in options:
editorstack.set_completions_hint_enabled(completionshint_o)
if edgeline_n in options:
editorstack.set_edgeline_enabled(edgeline_o)
if edgelinecols_n in options:
editorstack.set_edgeline_columns(edgelinecols_o)
if wrap_n in options:
editorstack.set_wrap_enabled(wrap_o)
if tabindent_n in options:
editorstack.set_tabmode_enabled(tabindent_o)
if stripindent_n in options:
editorstack.set_stripmode_enabled(stripindent_o)
if ibackspace_n in options:
editorstack.set_intelligent_backspace_enabled(ibackspace_o)
if removetrail_n in options:
editorstack.set_always_remove_trailing_spaces(removetrail_o)
if converteol_n in options:
editorstack.set_convert_eol_on_save(converteol_o)
if converteolto_n in options:
editorstack.set_convert_eol_on_save_to(converteolto_o)
if runcellcopy_n in options:
editorstack.set_run_cell_copy(runcellcopy_o)
if closepar_n in options:
editorstack.set_close_parentheses_enabled(closepar_o)
if close_quotes_n in options:
editorstack.set_close_quotes_enabled(close_quotes_o)
if add_colons_n in options:
editorstack.set_add_colons_enabled(add_colons_o)
if autounindent_n in options:
editorstack.set_auto_unindent_enabled(autounindent_o)
if indent_chars_n in options:
editorstack.set_indent_chars(indent_chars_o)
if tab_stop_width_spaces_n in options:
editorstack.set_tab_stop_width_spaces(tab_stop_width_spaces_o)
if help_n in options:
editorstack.set_help_enabled(help_o)
if todo_n in options:
editorstack.set_todolist_enabled(todo_o,
current_finfo=finfo)
for name, action in self.checkable_actions.items():
if name in options:
# Avoid triggering the action when this action changes state
action.blockSignals(True)
state = self.get_option(name)
action.setChecked(state)
action.blockSignals(False)
# See: spyder-ide/spyder#9915
# Multiply by 1000 to convert seconds to milliseconds
self.autosave.interval = (
self.get_option('autosave_interval') * 1000)
self.autosave.enabled = self.get_option('autosave_enabled')
# We must update the current editor after the others:
# (otherwise, code analysis buttons state would correspond to the
# last editor instead of showing the one of the current editor)
if finfo is not None:
# TODO: Connect this to the LSP
if todo_n in options and todo_o:
finfo.run_todo_finder()
# --- Open files
def get_open_filenames(self):
"""Get the list of open files in the current stack"""
editorstack = self.editorstacks[0]
filenames = []
filenames += [finfo.filename for finfo in editorstack.data]
return filenames
def set_open_filenames(self):
"""
Set the recent opened files on editor based on active project.
If no project is active, then editor filenames are saved, otherwise
the opened filenames are stored in the project config info.
"""
if self.projects is not None:
if not self.projects.get_active_project():
filenames = self.get_open_filenames()
self.set_option('filenames', filenames)
def setup_open_files(self):
"""
Open the list of saved files per project.
Also open any files that the user selected in the recovery dialog.
"""
self.set_create_new_file_if_empty(False)
active_project_path = None
if self.projects is not None:
active_project_path = self.projects.get_active_project_path()
if active_project_path:
filenames = self.projects.get_project_filenames()
else:
filenames = self.get_option('filenames', default=[])
self.close_all_files()
all_filenames = self.autosave.recover_files_to_open + filenames
if all_filenames and any([osp.isfile(f) for f in all_filenames]):
layout = self.get_option('layout_settings', None)
# Check if no saved layout settings exist, e.g. clean prefs file.
# If not, load with default focus/layout, to fix
# spyder-ide/spyder#8458.
if layout:
is_vertical, cfname, clines = layout.get('splitsettings')[0]
if cfname in filenames:
index = filenames.index(cfname)
# First we load the last focused file.
self.load(filenames[index], goto=clines[index], set_focus=True)
# Then we load the files located to the left of the last
# focused file in the tabbar, while keeping the focus on
# the last focused file.
if index > 0:
self.load(filenames[index::-1], goto=clines[index::-1],
set_focus=False, add_where='start')
# Then we load the files located to the right of the last
# focused file in the tabbar, while keeping the focus on
# the last focused file.
if index < (len(filenames) - 1):
self.load(filenames[index+1:], goto=clines[index:],
set_focus=False, add_where='end')
# Finally we load any recovered files at the end of the tabbar,
# while keeping focus on the last focused file.
if self.autosave.recover_files_to_open:
self.load(self.autosave.recover_files_to_open,
set_focus=False, add_where='end')
else:
if filenames:
self.load(filenames, goto=clines)
if self.autosave.recover_files_to_open:
self.load(self.autosave.recover_files_to_open)
else:
if filenames:
self.load(filenames)
if self.autosave.recover_files_to_open:
self.load(self.autosave.recover_files_to_open)
if self.__first_open_files_setup:
self.__first_open_files_setup = False
if layout is not None:
self.editorsplitter.set_layout_settings(
layout,
dont_goto=filenames[0])
win_layout = self.get_option('windows_layout_settings', [])
if win_layout:
for layout_settings in win_layout:
self.editorwindows_to_be_created.append(
layout_settings)
self.set_last_focus_editorstack(self, self.editorstacks[0])
else:
self.__load_temp_file()
self.set_create_new_file_if_empty(True)
def save_open_files(self):
"""Save the list of open files"""
self.set_option('filenames', self.get_open_filenames())
def set_create_new_file_if_empty(self, value):
"""Change the value of create_new_file_if_empty"""
for editorstack in self.editorstacks:
editorstack.create_new_file_if_empty = value
# --- File Menu actions (Mac only)
@Slot()
def go_to_next_file(self):
"""Switch to next file tab on the current editor stack."""
editorstack = self.get_current_editorstack()
editorstack.tabs.tab_navigate(+1)
@Slot()
def go_to_previous_file(self):
"""Switch to previous file tab on the current editor stack."""
editorstack = self.get_current_editorstack()
editorstack.tabs.tab_navigate(-1)
|
[] |
[] |
[
"USER",
"USERNAME"
] |
[]
|
["USER", "USERNAME"]
|
python
| 2 | 0 | |
src/train.py
|
#!/usr/bin/env python
"""
Main training workflow
"""
from __future__ import division
import argparse
import glob
import os
import random
import signal
import time
import torch
from pytorch_pretrained_bert import BertConfig
import distributed
from models import data_loader, model_builder
from models.data_loader import load_dataset
from models.model_builder import Summarizer
from models.trainer import build_trainer
from others.logging import logger, init_logger
import matplotlib.pyplot as plt
from src.utils import save_pickle
model_flags = ['hidden_size', 'ff_size', 'heads', 'inter_layers','encoder','ff_actv', 'use_interval','rnn_size']
def str2bool(v):
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
def multi_main(args):
""" Spawns 1 process per GPU """
init_logger()
nb_gpu = args.world_size
mp = torch.multiprocessing.get_context('spawn')
# Create a thread to listen for errors in the child processes.
error_queue = mp.SimpleQueue()
error_handler = ErrorHandler(error_queue)
# Train with multiprocessing.
procs = []
for i in range(nb_gpu):
device_id = i
procs.append(mp.Process(target=run, args=(args,
device_id, error_queue,), daemon=True))
procs[i].start()
logger.info(" Starting process pid: %d " % procs[i].pid)
error_handler.add_child(procs[i].pid)
for p in procs:
p.join()
def run(args, device_id, error_queue):
""" run process """
setattr(args, 'gpu_ranks', [int(i) for i in args.gpu_ranks])
try:
gpu_rank = distributed.multi_init(device_id, args.world_size, args.gpu_ranks)
print('gpu_rank %d' %gpu_rank)
if gpu_rank != args.gpu_ranks[device_id]:
raise AssertionError("An error occurred in \
Distributed initialization")
train(args,device_id)
except KeyboardInterrupt:
pass # killed by parent, do nothing
except Exception:
# propagate exception to parent process, keeping original traceback
import traceback
error_queue.put((args.gpu_ranks[device_id], traceback.format_exc()))
class ErrorHandler(object):
"""A class that listens for exceptions in children processes and propagates
the tracebacks to the parent process."""
def __init__(self, error_queue):
""" init error handler """
import signal
import threading
self.error_queue = error_queue
self.children_pids = []
self.error_thread = threading.Thread(
target=self.error_listener, daemon=True)
self.error_thread.start()
signal.signal(signal.SIGUSR1, self.signal_handler)
def add_child(self, pid):
""" error handler """
self.children_pids.append(pid)
def error_listener(self):
""" error listener """
(rank, original_trace) = self.error_queue.get()
self.error_queue.put((rank, original_trace))
os.kill(os.getpid(), signal.SIGUSR1)
def signal_handler(self, signalnum, stackframe):
""" signal handler """
for pid in self.children_pids:
os.kill(pid, signal.SIGINT) # kill children processes
(rank, original_trace) = self.error_queue.get()
msg = """\n\n-- Tracebacks above this line can probably
be ignored --\n\n"""
msg += original_trace
raise Exception(msg)
def wait_and_validate(args, device_id):
timestep = 0
if (args.test_all):
print("In here!")
cp_files = sorted(glob.glob(os.path.join(args.model_path, 'model_step_*.pt')))
cp_files.sort(key=os.path.getmtime)
print("Cp files", cp_files)
xent_lst = []
for i, cp in enumerate(cp_files):
step = int(cp.split('.')[-2].split('_')[-1])
xent = validate(args, device_id, cp, step)
xent_lst.append((xent, cp))
max_step = xent_lst.index(min(xent_lst))
if (i - max_step > 10):
break
xent_lst = sorted(xent_lst, key=lambda x: x[0])[:3]
logger.info('PPL %s' % str(xent_lst))
for xent, cp in xent_lst:
step = int(cp.split('.')[-2].split('_')[-1])
test(args, device_id, cp, step)
else:
while (True):
cp_files = sorted(glob.glob(os.path.join(args.model_path, 'model_step_*.pt')))
cp_files.sort(key=os.path.getmtime)
if (cp_files):
cp = cp_files[-1]
time_of_cp = os.path.getmtime(cp)
if (not os.path.getsize(cp) > 0):
time.sleep(60)
continue
if (time_of_cp > timestep):
timestep = time_of_cp
step = int(cp.split('.')[-2].split('_')[-1])
validate(args, device_id, cp, step)
test(args, device_id, cp, step)
cp_files = sorted(glob.glob(os.path.join(args.model_path, 'model_step_*.pt')))
cp_files.sort(key=os.path.getmtime)
if (cp_files):
cp = cp_files[-1]
time_of_cp = os.path.getmtime(cp)
if (time_of_cp > timestep):
continue
else:
time.sleep(300)
def validate(args, device_id, pt, step):
device = "cpu" if args.visible_gpus == '-1' else "cuda"
if (pt != ''):
test_from = pt
else:
test_from = args.test_from
logger.info('Loading checkpoint from %s' % test_from)
checkpoint = torch.load(test_from, map_location=lambda storage, loc: storage)
opt = vars(checkpoint['opt'])
for k in opt.keys():
if (k in model_flags):
setattr(args, k, opt[k])
print(args)
config = BertConfig.from_json_file(args.bert_config_path)
model = Summarizer(args, device, load_pretrained_bert=False, bert_config = config)
model.load_cp(checkpoint)
model.eval()
valid_iter =data_loader.Dataloader(args, load_dataset(args, 'valid', shuffle=False),
args.batch_size, device,
shuffle=False, is_test=False)
trainer = build_trainer(args, device_id, model, None)
stats = trainer.validate(valid_iter, step)
print("Stats: {}".format(stats.xent()))
return stats.xent()
def test(args, device_id, pt, step):
device = "cpu" if args.visible_gpus == '-1' else "cuda"
if (pt != ''):
test_from = pt
else:
test_from = args.test_from
logger.info('Loading checkpoint from %s' % test_from)
checkpoint = torch.load(test_from, map_location=lambda storage, loc: storage)
opt = vars(checkpoint['opt'])
for k in opt.keys():
if (k in model_flags):
setattr(args, k, opt[k])
print(args)
config = BertConfig.from_json_file(args.bert_config_path)
model = Summarizer(args, device, load_pretrained_bert=False, bert_config = config)
model.load_cp(checkpoint)
model.eval()
test_iter =data_loader.Dataloader(args, load_dataset(args, 'test', shuffle=False),
args.batch_size, device,
shuffle=False, is_test=True)
trainer = build_trainer(args, device_id, model, None)
trainer.test(test_iter,step)
def baseline(args, cal_lead=False, cal_oracle=False):
test_iter =data_loader.Dataloader(args, load_dataset(args, 'test', shuffle=False),
args.batch_size, device,
shuffle=False, is_test=True)
trainer = build_trainer(args, device_id, None, None)
#
if (cal_lead):
trainer.test(test_iter, 0, cal_lead=True)
elif (cal_oracle):
trainer.test(test_iter, 0, cal_oracle=True)
def train(args, device_id):
init_logger(args.log_file)
device = "cpu" if args.visible_gpus == '-1' else "cuda"
logger.info('Device ID %d' % device_id)
logger.info('Device %s' % device)
torch.manual_seed(args.seed)
random.seed(args.seed)
torch.backends.cudnn.deterministic = True
if device_id >= 0:
torch.cuda.set_device(device_id)
torch.cuda.manual_seed(args.seed)
torch.manual_seed(args.seed)
random.seed(args.seed)
torch.backends.cudnn.deterministic = True
def train_iter_fct():
return data_loader.Dataloader(args, load_dataset(args, 'train', shuffle=True), args.batch_size, device,
shuffle=True, is_test=False)
model = Summarizer(args, device, load_pretrained_bert=True)
if args.train_from != '':
logger.info('Loading checkpoint from %s' % args.train_from)
checkpoint = torch.load(args.train_from,
map_location=lambda storage, loc: storage)
opt = vars(checkpoint['opt'])
for k in opt.keys():
if (k in model_flags):
setattr(args, k, opt[k])
model.load_cp(checkpoint)
optim = model_builder.build_optim(args, model, checkpoint)
else:
optim = model_builder.build_optim(args, model, None)
logger.info(model)
trainer = build_trainer(args, device_id, model, optim)
losses, n_docs = trainer.train(train_iter_fct, args.train_steps)
save_pickle(losses, 'losses_classifier')
save_pickle(n_docs, 'docs_classifier')
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("-encoder", default='classifier', type=str, choices=['classifier','transformer','rnn','baseline', 'classifierDummy', 'gnn'])
parser.add_argument("-mode", default='train', type=str, choices=['train','validate','test'])
parser.add_argument("-bert_data_path", default='../bert_data/cnndm')
parser.add_argument("-model_path", default='../models/')
parser.add_argument("-result_path", default='../results/cnndm')
parser.add_argument("-temp_dir", default='../temp')
parser.add_argument("-bert_config_path", default='../bert_config_uncased_base.json')
parser.add_argument("-batch_size", default=1000, type=int)
parser.add_argument("-use_interval", type=str2bool, nargs='?',const=True,default=True)
parser.add_argument("-hidden_size", default=128, type=int)
parser.add_argument("-ff_size", default=512, type=int)
parser.add_argument("-heads", default=4, type=int)
parser.add_argument("-inter_layers", default=2, type=int)
parser.add_argument("-rnn_size", default=512, type=int)
parser.add_argument("-param_init", default=0, type=float)
parser.add_argument("-param_init_glorot", type=str2bool, nargs='?',const=True,default=True)
parser.add_argument("-dropout", default=0.1, type=float)
parser.add_argument("-optim", default='adam', type=str)
parser.add_argument("-lr", default=1, type=float)
parser.add_argument("-beta1", default= 0.9, type=float)
parser.add_argument("-beta2", default=0.999, type=float)
parser.add_argument("-decay_method", default='', type=str)
parser.add_argument("-warmup_steps", default=8000, type=int)
parser.add_argument("-max_grad_norm", default=0, type=float)
parser.add_argument("-save_checkpoint_steps", default=5, type=int)
parser.add_argument("-accum_count", default=1, type=int)
parser.add_argument("-world_size", default=1, type=int)
parser.add_argument("-report_every", default=1, type=int)
parser.add_argument("-train_steps", default=1000, type=int)
parser.add_argument("-recall_eval", type=str2bool, nargs='?',const=True,default=False)
parser.add_argument('-visible_gpus', default='-1', type=str)
parser.add_argument('-gpu_ranks', default='0', type=str)
parser.add_argument('-log_file', default='../logs/cnndm.log')
parser.add_argument('-dataset', default='')
parser.add_argument('-seed', default=666, type=int)
parser.add_argument("-test_all", type=str2bool, nargs='?',const=True,default=False)
parser.add_argument("-test_from", default='')
parser.add_argument("-train_from", default='')
parser.add_argument("-report_rouge", type=str2bool, nargs='?',const=True,default=True)
parser.add_argument("-block_trigram", type=str2bool, nargs='?', const=True, default=True)
args = parser.parse_args()
args.gpu_ranks = [int(i) for i in args.gpu_ranks.split(',')]
os.environ["CUDA_VISIBLE_DEVICES"] = args.visible_gpus
init_logger(args.log_file)
device = "cpu" if args.visible_gpus == '-1' else "cuda"
device_id = 0 if device == "cuda" else -1
if(args.world_size>1):
multi_main(args)
elif (args.mode == 'train'):
train(args, device_id)
elif (args.mode == 'validate'):
wait_and_validate(args, device_id)
elif (args.mode == 'lead'):
baseline(args, cal_lead=True)
elif (args.mode == 'oracle'):
baseline(args, cal_oracle=True)
elif (args.mode == 'test'):
cp = args.test_from
try:
step = int(cp.split('.')[-2].split('_')[-1])
except:
step = 0
test(args, device_id, cp, step)
|
[] |
[] |
[
"CUDA_VISIBLE_DEVICES"
] |
[]
|
["CUDA_VISIBLE_DEVICES"]
|
python
| 1 | 0 | |
hugolib/testhelpers_test.go
|
package hugolib
import (
"image/jpeg"
"io"
"math/rand"
"path/filepath"
"runtime"
"strconv"
"testing"
"time"
"unicode/utf8"
"github.com/gohugoio/hugo/htesting"
"github.com/gohugoio/hugo/output"
"github.com/gohugoio/hugo/parser/metadecoders"
"github.com/google/go-cmp/cmp"
"github.com/gohugoio/hugo/parser"
"github.com/pkg/errors"
"bytes"
"fmt"
"regexp"
"strings"
"text/template"
"github.com/fsnotify/fsnotify"
"github.com/gohugoio/hugo/common/herrors"
"github.com/gohugoio/hugo/config"
"github.com/gohugoio/hugo/deps"
"github.com/gohugoio/hugo/resources/page"
"github.com/sanity-io/litter"
"github.com/spf13/afero"
"github.com/spf13/cast"
"github.com/gohugoio/hugo/helpers"
"github.com/gohugoio/hugo/tpl"
"github.com/spf13/viper"
"os"
"github.com/gohugoio/hugo/resources/resource"
qt "github.com/frankban/quicktest"
"github.com/gohugoio/hugo/common/loggers"
"github.com/gohugoio/hugo/hugofs"
)
var (
deepEqualsPages = qt.CmpEquals(cmp.Comparer(func(p1, p2 *pageState) bool { return p1 == p2 }))
deepEqualsOutputFormats = qt.CmpEquals(cmp.Comparer(func(o1, o2 output.Format) bool {
return o1.Name == o2.Name && o1.MediaType.Type() == o2.MediaType.Type()
}))
)
type sitesBuilder struct {
Cfg config.Provider
environ []string
Fs *hugofs.Fs
T testing.TB
depsCfg deps.DepsCfg
*qt.C
logger *loggers.Logger
rnd *rand.Rand
dumper litter.Options
// Used to test partial rebuilds.
changedFiles []string
removedFiles []string
// Aka the Hugo server mode.
running bool
H *HugoSites
theme string
// Default toml
configFormat string
configFileSet bool
viperSet bool
// Default is empty.
// TODO(bep) revisit this and consider always setting it to something.
// Consider this in relation to using the BaseFs.PublishFs to all publishing.
workingDir string
addNothing bool
// Base data/content
contentFilePairs []filenameContent
templateFilePairs []filenameContent
i18nFilePairs []filenameContent
dataFilePairs []filenameContent
// Additional data/content.
// As in "use the base, but add these on top".
contentFilePairsAdded []filenameContent
templateFilePairsAdded []filenameContent
i18nFilePairsAdded []filenameContent
dataFilePairsAdded []filenameContent
}
type filenameContent struct {
filename string
content string
}
func newTestSitesBuilder(t testing.TB) *sitesBuilder {
v := viper.New()
fs := hugofs.NewMem(v)
litterOptions := litter.Options{
HidePrivateFields: true,
StripPackageNames: true,
Separator: " ",
}
return &sitesBuilder{T: t, C: qt.New(t), Fs: fs, configFormat: "toml",
dumper: litterOptions, rnd: rand.New(rand.NewSource(time.Now().Unix()))}
}
func newTestSitesBuilderFromDepsCfg(t testing.TB, d deps.DepsCfg) *sitesBuilder {
c := qt.New(t)
litterOptions := litter.Options{
HidePrivateFields: true,
StripPackageNames: true,
Separator: " ",
}
b := &sitesBuilder{T: t, C: c, depsCfg: d, Fs: d.Fs, dumper: litterOptions, rnd: rand.New(rand.NewSource(time.Now().Unix()))}
workingDir := d.Cfg.GetString("workingDir")
b.WithWorkingDir(workingDir)
return b.WithViper(d.Cfg.(*viper.Viper))
}
func (s *sitesBuilder) Running() *sitesBuilder {
s.running = true
return s
}
func (s *sitesBuilder) WithNothingAdded() *sitesBuilder {
s.addNothing = true
return s
}
func (s *sitesBuilder) WithLogger(logger *loggers.Logger) *sitesBuilder {
s.logger = logger
return s
}
func (s *sitesBuilder) WithWorkingDir(dir string) *sitesBuilder {
s.workingDir = filepath.FromSlash(dir)
return s
}
func (s *sitesBuilder) WithEnviron(env ...string) *sitesBuilder {
for i := 0; i < len(env); i += 2 {
s.environ = append(s.environ, fmt.Sprintf("%s=%s", env[i], env[i+1]))
}
return s
}
func (s *sitesBuilder) WithConfigTemplate(data interface{}, format, configTemplate string) *sitesBuilder {
s.T.Helper()
if format == "" {
format = "toml"
}
templ, err := template.New("test").Parse(configTemplate)
if err != nil {
s.Fatalf("Template parse failed: %s", err)
}
var b bytes.Buffer
templ.Execute(&b, data)
return s.WithConfigFile(format, b.String())
}
func (s *sitesBuilder) WithViper(v *viper.Viper) *sitesBuilder {
s.T.Helper()
if s.configFileSet {
s.T.Fatal("WithViper: use Viper or config.toml, not both")
}
defer func() {
s.viperSet = true
}()
// Write to a config file to make sure the tests follow the same code path.
var buff bytes.Buffer
m := v.AllSettings()
s.Assert(parser.InterfaceToConfig(m, metadecoders.TOML, &buff), qt.IsNil)
return s.WithConfigFile("toml", buff.String())
}
func (s *sitesBuilder) WithConfigFile(format, conf string) *sitesBuilder {
s.T.Helper()
if s.viperSet {
s.T.Fatal("WithConfigFile: use Viper or config.toml, not both")
}
s.configFileSet = true
filename := s.absFilename("config." + format)
writeSource(s.T, s.Fs, filename, conf)
s.configFormat = format
return s
}
func (s *sitesBuilder) WithThemeConfigFile(format, conf string) *sitesBuilder {
s.T.Helper()
if s.theme == "" {
s.theme = "test-theme"
}
filename := filepath.Join("themes", s.theme, "config."+format)
writeSource(s.T, s.Fs, s.absFilename(filename), conf)
return s
}
func (s *sitesBuilder) WithSourceFile(filenameContent ...string) *sitesBuilder {
s.T.Helper()
for i := 0; i < len(filenameContent); i += 2 {
writeSource(s.T, s.Fs, s.absFilename(filenameContent[i]), filenameContent[i+1])
}
return s
}
func (s *sitesBuilder) absFilename(filename string) string {
filename = filepath.FromSlash(filename)
if filepath.IsAbs(filename) {
return filename
}
if s.workingDir != "" && !strings.HasPrefix(filename, s.workingDir) {
filename = filepath.Join(s.workingDir, filename)
}
return filename
}
const commonConfigSections = `
[services]
[services.disqus]
shortname = "disqus_shortname"
[services.googleAnalytics]
id = "ga_id"
[privacy]
[privacy.disqus]
disable = false
[privacy.googleAnalytics]
respectDoNotTrack = true
anonymizeIP = true
[privacy.instagram]
simple = true
[privacy.twitter]
enableDNT = true
[privacy.vimeo]
disable = false
[privacy.youtube]
disable = false
privacyEnhanced = true
`
func (s *sitesBuilder) WithSimpleConfigFile() *sitesBuilder {
s.T.Helper()
return s.WithSimpleConfigFileAndBaseURL("http://example.com/")
}
func (s *sitesBuilder) WithSimpleConfigFileAndBaseURL(baseURL string) *sitesBuilder {
s.T.Helper()
config := fmt.Sprintf("baseURL = %q", baseURL)
config = config + commonConfigSections
return s.WithConfigFile("toml", config)
}
func (s *sitesBuilder) WithDefaultMultiSiteConfig() *sitesBuilder {
var defaultMultiSiteConfig = `
baseURL = "http://example.com/blog"
paginate = 1
disablePathToLower = true
defaultContentLanguage = "en"
defaultContentLanguageInSubdir = true
[permalinks]
other = "/somewhere/else/:filename"
[blackfriday]
angledQuotes = true
[Taxonomies]
tag = "tags"
[Languages]
[Languages.en]
weight = 10
title = "In English"
languageName = "English"
[Languages.en.blackfriday]
angledQuotes = false
[[Languages.en.menu.main]]
url = "/"
name = "Home"
weight = 0
[Languages.fr]
weight = 20
title = "Le Français"
languageName = "Français"
[Languages.fr.Taxonomies]
plaque = "plaques"
[Languages.nn]
weight = 30
title = "På nynorsk"
languageName = "Nynorsk"
paginatePath = "side"
[Languages.nn.Taxonomies]
lag = "lag"
[[Languages.nn.menu.main]]
url = "/"
name = "Heim"
weight = 1
[Languages.nb]
weight = 40
title = "På bokmål"
languageName = "Bokmål"
paginatePath = "side"
[Languages.nb.Taxonomies]
lag = "lag"
` + commonConfigSections
return s.WithConfigFile("toml", defaultMultiSiteConfig)
}
func (s *sitesBuilder) WithSunset(in string) {
// Write a real image into one of the bundle above.
src, err := os.Open(filepath.FromSlash("testdata/sunset.jpg"))
s.Assert(err, qt.IsNil)
out, err := s.Fs.Source.Create(filepath.FromSlash(filepath.Join(s.workingDir, in)))
s.Assert(err, qt.IsNil)
_, err = io.Copy(out, src)
s.Assert(err, qt.IsNil)
out.Close()
src.Close()
}
func (s *sitesBuilder) createFilenameContent(pairs []string) []filenameContent {
var slice []filenameContent
s.appendFilenameContent(&slice, pairs...)
return slice
}
func (s *sitesBuilder) appendFilenameContent(slice *[]filenameContent, pairs ...string) {
if len(pairs)%2 != 0 {
panic("file content mismatch")
}
for i := 0; i < len(pairs); i += 2 {
c := filenameContent{
filename: pairs[i],
content: pairs[i+1],
}
*slice = append(*slice, c)
}
}
func (s *sitesBuilder) WithContent(filenameContent ...string) *sitesBuilder {
s.appendFilenameContent(&s.contentFilePairs, filenameContent...)
return s
}
func (s *sitesBuilder) WithContentAdded(filenameContent ...string) *sitesBuilder {
s.appendFilenameContent(&s.contentFilePairsAdded, filenameContent...)
return s
}
func (s *sitesBuilder) WithTemplates(filenameContent ...string) *sitesBuilder {
s.appendFilenameContent(&s.templateFilePairs, filenameContent...)
return s
}
func (s *sitesBuilder) WithTemplatesAdded(filenameContent ...string) *sitesBuilder {
s.appendFilenameContent(&s.templateFilePairsAdded, filenameContent...)
return s
}
func (s *sitesBuilder) WithData(filenameContent ...string) *sitesBuilder {
s.appendFilenameContent(&s.dataFilePairs, filenameContent...)
return s
}
func (s *sitesBuilder) WithDataAdded(filenameContent ...string) *sitesBuilder {
s.appendFilenameContent(&s.dataFilePairsAdded, filenameContent...)
return s
}
func (s *sitesBuilder) WithI18n(filenameContent ...string) *sitesBuilder {
s.appendFilenameContent(&s.i18nFilePairs, filenameContent...)
return s
}
func (s *sitesBuilder) WithI18nAdded(filenameContent ...string) *sitesBuilder {
s.appendFilenameContent(&s.i18nFilePairsAdded, filenameContent...)
return s
}
func (s *sitesBuilder) EditFiles(filenameContent ...string) *sitesBuilder {
for i := 0; i < len(filenameContent); i += 2 {
filename, content := filepath.FromSlash(filenameContent[i]), filenameContent[i+1]
absFilename := s.absFilename(filename)
s.changedFiles = append(s.changedFiles, absFilename)
writeSource(s.T, s.Fs, absFilename, content)
}
return s
}
func (s *sitesBuilder) RemoveFiles(filenames ...string) *sitesBuilder {
for _, filename := range filenames {
absFilename := s.absFilename(filename)
s.removedFiles = append(s.removedFiles, absFilename)
s.Assert(s.Fs.Source.Remove(absFilename), qt.IsNil)
}
return s
}
func (s *sitesBuilder) writeFilePairs(folder string, files []filenameContent) *sitesBuilder {
// We have had some "filesystem ordering" bugs that we have not discovered in
// our tests running with the in memory filesystem.
// That file system is backed by a map so not sure how this helps, but some
// randomness in tests doesn't hurt.
s.rnd.Shuffle(len(files), func(i, j int) { files[i], files[j] = files[j], files[i] })
for _, fc := range files {
target := folder
// TODO(bep) clean up this magic.
if strings.HasPrefix(fc.filename, folder) {
target = ""
}
if s.workingDir != "" {
target = filepath.Join(s.workingDir, target)
}
writeSource(s.T, s.Fs, filepath.Join(target, fc.filename), fc.content)
}
return s
}
func (s *sitesBuilder) CreateSites() *sitesBuilder {
if err := s.CreateSitesE(); err != nil {
herrors.PrintStackTraceFromErr(err)
s.Fatalf("Failed to create sites: %s", err)
}
return s
}
func (s *sitesBuilder) LoadConfig() error {
if !s.configFileSet {
s.WithSimpleConfigFile()
}
cfg, _, err := LoadConfig(ConfigSourceDescriptor{
WorkingDir: s.workingDir,
Fs: s.Fs.Source,
Logger: s.logger,
Environ: s.environ,
Filename: "config." + s.configFormat}, func(cfg config.Provider) error {
return nil
})
if err != nil {
return err
}
s.Cfg = cfg
return nil
}
func (s *sitesBuilder) CreateSitesE() error {
if !s.addNothing {
if _, ok := s.Fs.Source.(*afero.OsFs); ok {
for _, dir := range []string{
"content/sect",
"layouts/_default",
"layouts/_default/_markup",
"layouts/partials",
"layouts/shortcodes",
"data",
"i18n",
} {
if err := os.MkdirAll(filepath.Join(s.workingDir, dir), 0777); err != nil {
return errors.Wrapf(err, "failed to create %q", dir)
}
}
}
s.addDefaults()
s.writeFilePairs("content", s.contentFilePairsAdded)
s.writeFilePairs("layouts", s.templateFilePairsAdded)
s.writeFilePairs("data", s.dataFilePairsAdded)
s.writeFilePairs("i18n", s.i18nFilePairsAdded)
s.writeFilePairs("i18n", s.i18nFilePairs)
s.writeFilePairs("data", s.dataFilePairs)
s.writeFilePairs("content", s.contentFilePairs)
s.writeFilePairs("layouts", s.templateFilePairs)
}
if err := s.LoadConfig(); err != nil {
return errors.Wrap(err, "failed to load config")
}
s.Fs.Destination = hugofs.NewCreateCountingFs(s.Fs.Destination)
depsCfg := s.depsCfg
depsCfg.Fs = s.Fs
depsCfg.Cfg = s.Cfg
depsCfg.Logger = s.logger
depsCfg.Running = s.running
sites, err := NewHugoSites(depsCfg)
if err != nil {
return errors.Wrap(err, "failed to create sites")
}
s.H = sites
return nil
}
func (s *sitesBuilder) BuildE(cfg BuildCfg) error {
if s.H == nil {
s.CreateSites()
}
return s.H.Build(cfg)
}
func (s *sitesBuilder) Build(cfg BuildCfg) *sitesBuilder {
s.T.Helper()
return s.build(cfg, false)
}
func (s *sitesBuilder) BuildFail(cfg BuildCfg) *sitesBuilder {
s.T.Helper()
return s.build(cfg, true)
}
func (s *sitesBuilder) changeEvents() []fsnotify.Event {
var events []fsnotify.Event
for _, v := range s.changedFiles {
events = append(events, fsnotify.Event{
Name: v,
Op: fsnotify.Write,
})
}
for _, v := range s.removedFiles {
events = append(events, fsnotify.Event{
Name: v,
Op: fsnotify.Remove,
})
}
return events
}
func (s *sitesBuilder) build(cfg BuildCfg, shouldFail bool) *sitesBuilder {
s.Helper()
defer func() {
s.changedFiles = nil
}()
if s.H == nil {
s.CreateSites()
}
err := s.H.Build(cfg, s.changeEvents()...)
if err == nil {
logErrorCount := s.H.NumLogErrors()
if logErrorCount > 0 {
err = fmt.Errorf("logged %d errors", logErrorCount)
}
}
if err != nil && !shouldFail {
herrors.PrintStackTraceFromErr(err)
s.Fatalf("Build failed: %s", err)
} else if err == nil && shouldFail {
s.Fatalf("Expected error")
}
return s
}
func (s *sitesBuilder) addDefaults() {
var (
contentTemplate = `---
title: doc1
weight: 1
tags:
- tag1
date: "2018-02-28"
---
# doc1
*some "content"*
{{< shortcode >}}
{{< lingo >}}
`
defaultContent = []string{
"content/sect/doc1.en.md", contentTemplate,
"content/sect/doc1.fr.md", contentTemplate,
"content/sect/doc1.nb.md", contentTemplate,
"content/sect/doc1.nn.md", contentTemplate,
}
listTemplateCommon = "{{ $p := .Paginator }}{{ $p.PageNumber }}|{{ .Title }}|{{ i18n \"hello\" }}|{{ .Permalink }}|Pager: {{ template \"_internal/pagination.html\" . }}|Kind: {{ .Kind }}|Content: {{ .Content }}|Len Pages: {{ len .Pages }}|Len RegularPages: {{ len .RegularPages }}| HasParent: {{ if .Parent }}YES{{ else }}NO{{ end }}"
defaultTemplates = []string{
"_default/single.html", "Single: {{ .Title }}|{{ i18n \"hello\" }}|{{.Language.Lang}}|RelPermalink: {{ .RelPermalink }}|Permalink: {{ .Permalink }}|{{ .Content }}|Resources: {{ range .Resources }}{{ .MediaType }}: {{ .RelPermalink}} -- {{ end }}|Summary: {{ .Summary }}|Truncated: {{ .Truncated }}|Parent: {{ .Parent.Title }}",
"_default/list.html", "List Page " + listTemplateCommon,
"index.html", "{{ $p := .Paginator }}Default Home Page {{ $p.PageNumber }}: {{ .Title }}|{{ .IsHome }}|{{ i18n \"hello\" }}|{{ .Permalink }}|{{ .Site.Data.hugo.slogan }}|String Resource: {{ ( \"Hugo Pipes\" | resources.FromString \"text/pipes.txt\").RelPermalink }}",
"index.fr.html", "{{ $p := .Paginator }}French Home Page {{ $p.PageNumber }}: {{ .Title }}|{{ .IsHome }}|{{ i18n \"hello\" }}|{{ .Permalink }}|{{ .Site.Data.hugo.slogan }}|String Resource: {{ ( \"Hugo Pipes\" | resources.FromString \"text/pipes.txt\").RelPermalink }}",
"_default/terms.html", "Taxonomy Term Page " + listTemplateCommon,
"_default/taxonomy.html", "Taxonomy List Page " + listTemplateCommon,
// Shortcodes
"shortcodes/shortcode.html", "Shortcode: {{ i18n \"hello\" }}",
// A shortcode in multiple languages
"shortcodes/lingo.html", "LingoDefault",
"shortcodes/lingo.fr.html", "LingoFrench",
// Special templates
"404.html", "404|{{ .Lang }}|{{ .Title }}",
"robots.txt", "robots|{{ .Lang }}|{{ .Title }}",
}
defaultI18n = []string{
"en.yaml", `
hello:
other: "Hello"
`,
"fr.yaml", `
hello:
other: "Bonjour"
`,
}
defaultData = []string{
"hugo.toml", "slogan = \"Hugo Rocks!\"",
}
)
if len(s.contentFilePairs) == 0 {
s.writeFilePairs("content", s.createFilenameContent(defaultContent))
}
if len(s.templateFilePairs) == 0 {
s.writeFilePairs("layouts", s.createFilenameContent(defaultTemplates))
}
if len(s.dataFilePairs) == 0 {
s.writeFilePairs("data", s.createFilenameContent(defaultData))
}
if len(s.i18nFilePairs) == 0 {
s.writeFilePairs("i18n", s.createFilenameContent(defaultI18n))
}
}
func (s *sitesBuilder) Fatalf(format string, args ...interface{}) {
s.T.Helper()
s.T.Fatalf(format, args...)
}
func (s *sitesBuilder) AssertFileContentFn(filename string, f func(s string) bool) {
s.T.Helper()
content := s.FileContent(filename)
if !f(content) {
s.Fatalf("Assert failed for %q in content\n%s", filename, content)
}
}
func (s *sitesBuilder) AssertHome(matches ...string) {
s.AssertFileContent("public/index.html", matches...)
}
func (s *sitesBuilder) AssertFileContent(filename string, matches ...string) {
s.T.Helper()
content := s.FileContent(filename)
for _, m := range matches {
lines := strings.Split(m, "\n")
for _, match := range lines {
match = strings.TrimSpace(match)
if match == "" {
continue
}
if !strings.Contains(content, match) {
s.Fatalf("No match for %q in content for %s\n%s\n%q", match, filename, content, content)
}
}
}
}
func (s *sitesBuilder) AssertImage(width, height int, filename string) {
filename = filepath.Join(s.workingDir, filename)
f, err := s.Fs.Destination.Open(filename)
s.Assert(err, qt.IsNil)
defer f.Close()
cfg, err := jpeg.DecodeConfig(f)
s.Assert(err, qt.IsNil)
s.Assert(cfg.Width, qt.Equals, width)
s.Assert(cfg.Height, qt.Equals, height)
}
func (s *sitesBuilder) AssertNoDuplicateWrites() {
s.Helper()
d := s.Fs.Destination.(hugofs.DuplicatesReporter)
s.Assert(d.ReportDuplicates(), qt.Equals, "")
}
func (s *sitesBuilder) FileContent(filename string) string {
s.T.Helper()
filename = filepath.FromSlash(filename)
if !strings.HasPrefix(filename, s.workingDir) {
filename = filepath.Join(s.workingDir, filename)
}
return readDestination(s.T, s.Fs, filename)
}
func (s *sitesBuilder) AssertObject(expected string, object interface{}) {
s.T.Helper()
got := s.dumper.Sdump(object)
expected = strings.TrimSpace(expected)
if expected != got {
fmt.Println(got)
diff := htesting.DiffStrings(expected, got)
s.Fatalf("diff:\n%s\nexpected\n%s\ngot\n%s", diff, expected, got)
}
}
func (s *sitesBuilder) AssertFileContentRe(filename string, matches ...string) {
content := readDestination(s.T, s.Fs, filename)
for _, match := range matches {
r := regexp.MustCompile("(?s)" + match)
if !r.MatchString(content) {
s.Fatalf("No match for %q in content for %s\n%q", match, filename, content)
}
}
}
func (s *sitesBuilder) CheckExists(filename string) bool {
return destinationExists(s.Fs, filepath.Clean(filename))
}
func (s *sitesBuilder) GetPage(ref string) page.Page {
p, err := s.H.Sites[0].getPageNew(nil, ref)
s.Assert(err, qt.IsNil)
return p
}
func (s *sitesBuilder) GetPageRel(p page.Page, ref string) page.Page {
p, err := s.H.Sites[0].getPageNew(p, ref)
s.Assert(err, qt.IsNil)
return p
}
func newTestHelper(cfg config.Provider, fs *hugofs.Fs, t testing.TB) testHelper {
return testHelper{
Cfg: cfg,
Fs: fs,
C: qt.New(t),
}
}
type testHelper struct {
Cfg config.Provider
Fs *hugofs.Fs
*qt.C
}
func (th testHelper) assertFileContent(filename string, matches ...string) {
th.Helper()
filename = th.replaceDefaultContentLanguageValue(filename)
content := readDestination(th, th.Fs, filename)
for _, match := range matches {
match = th.replaceDefaultContentLanguageValue(match)
th.Assert(strings.Contains(content, match), qt.Equals, true, qt.Commentf(match+" not in: \n"+content))
}
}
func (th testHelper) assertFileContentRegexp(filename string, matches ...string) {
filename = th.replaceDefaultContentLanguageValue(filename)
content := readDestination(th, th.Fs, filename)
for _, match := range matches {
match = th.replaceDefaultContentLanguageValue(match)
r := regexp.MustCompile(match)
matches := r.MatchString(content)
if !matches {
fmt.Println(match+":\n", content)
}
th.Assert(matches, qt.Equals, true)
}
}
func (th testHelper) assertFileNotExist(filename string) {
exists, err := helpers.Exists(filename, th.Fs.Destination)
th.Assert(err, qt.IsNil)
th.Assert(exists, qt.Equals, false)
}
func (th testHelper) replaceDefaultContentLanguageValue(value string) string {
defaultInSubDir := th.Cfg.GetBool("defaultContentLanguageInSubDir")
replace := th.Cfg.GetString("defaultContentLanguage") + "/"
if !defaultInSubDir {
value = strings.Replace(value, replace, "", 1)
}
return value
}
func loadTestConfig(fs afero.Fs, withConfig ...func(cfg config.Provider) error) (*viper.Viper, error) {
v, _, err := LoadConfig(ConfigSourceDescriptor{Fs: fs}, withConfig...)
return v, err
}
func newTestCfgBasic() (*viper.Viper, *hugofs.Fs) {
mm := afero.NewMemMapFs()
v := viper.New()
v.Set("defaultContentLanguageInSubdir", true)
fs := hugofs.NewFrom(hugofs.NewBaseFileDecorator(mm), v)
return v, fs
}
func newTestCfg(withConfig ...func(cfg config.Provider) error) (*viper.Viper, *hugofs.Fs) {
mm := afero.NewMemMapFs()
v, err := loadTestConfig(mm, func(cfg config.Provider) error {
// Default is false, but true is easier to use as default in tests
cfg.Set("defaultContentLanguageInSubdir", true)
for _, w := range withConfig {
w(cfg)
}
return nil
})
if err != nil && err != ErrNoConfigFile {
panic(err)
}
fs := hugofs.NewFrom(hugofs.NewBaseFileDecorator(mm), v)
return v, fs
}
func newTestSitesFromConfig(t testing.TB, afs afero.Fs, tomlConfig string, layoutPathContentPairs ...string) (testHelper, *HugoSites) {
if len(layoutPathContentPairs)%2 != 0 {
t.Fatalf("Layouts must be provided in pairs")
}
c := qt.New(t)
writeToFs(t, afs, filepath.Join("content", ".gitkeep"), "")
writeToFs(t, afs, "config.toml", tomlConfig)
cfg, err := LoadConfigDefault(afs)
c.Assert(err, qt.IsNil)
fs := hugofs.NewFrom(afs, cfg)
th := newTestHelper(cfg, fs, t)
for i := 0; i < len(layoutPathContentPairs); i += 2 {
writeSource(t, fs, layoutPathContentPairs[i], layoutPathContentPairs[i+1])
}
h, err := NewHugoSites(deps.DepsCfg{Fs: fs, Cfg: cfg})
c.Assert(err, qt.IsNil)
return th, h
}
func createWithTemplateFromNameValues(additionalTemplates ...string) func(templ tpl.TemplateManager) error {
return func(templ tpl.TemplateManager) error {
for i := 0; i < len(additionalTemplates); i += 2 {
err := templ.AddTemplate(additionalTemplates[i], additionalTemplates[i+1])
if err != nil {
return err
}
}
return nil
}
}
// TODO(bep) replace these with the builder
func buildSingleSite(t testing.TB, depsCfg deps.DepsCfg, buildCfg BuildCfg) *Site {
t.Helper()
return buildSingleSiteExpected(t, false, false, depsCfg, buildCfg)
}
func buildSingleSiteExpected(t testing.TB, expectSiteInitEror, expectBuildError bool, depsCfg deps.DepsCfg, buildCfg BuildCfg) *Site {
t.Helper()
b := newTestSitesBuilderFromDepsCfg(t, depsCfg).WithNothingAdded()
err := b.CreateSitesE()
if expectSiteInitEror {
b.Assert(err, qt.Not(qt.IsNil))
return nil
} else {
b.Assert(err, qt.IsNil)
}
h := b.H
b.Assert(len(h.Sites), qt.Equals, 1)
if expectBuildError {
b.Assert(h.Build(buildCfg), qt.Not(qt.IsNil))
return nil
}
b.Assert(h.Build(buildCfg), qt.IsNil)
return h.Sites[0]
}
func writeSourcesToSource(t *testing.T, base string, fs *hugofs.Fs, sources ...[2]string) {
for _, src := range sources {
writeSource(t, fs, filepath.Join(base, src[0]), src[1])
}
}
func getPage(in page.Page, ref string) page.Page {
p, err := in.GetPage(ref)
if err != nil {
panic(err)
}
return p
}
func content(c resource.ContentProvider) string {
cc, err := c.Content()
if err != nil {
panic(err)
}
ccs, err := cast.ToStringE(cc)
if err != nil {
panic(err)
}
return ccs
}
func dumpPages(pages ...page.Page) {
fmt.Println("---------")
for _, p := range pages {
var meta interface{}
if p.File() != nil && p.File().FileInfo() != nil {
meta = p.File().FileInfo().Meta()
}
fmt.Printf("Kind: %s Title: %-10s RelPermalink: %-10s Path: %-10s sections: %s Lang: %s Meta: %v\n",
p.Kind(), p.Title(), p.RelPermalink(), p.Path(), p.SectionsPath(), p.Lang(), meta)
}
}
func dumpSPages(pages ...*pageState) {
for i, p := range pages {
fmt.Printf("%d: Kind: %s Title: %-10s RelPermalink: %-10s Path: %-10s sections: %s\n",
i+1,
p.Kind(), p.Title(), p.RelPermalink(), p.Path(), p.SectionsPath())
}
}
func printStringIndexes(s string) {
lines := strings.Split(s, "\n")
i := 0
for _, line := range lines {
for _, r := range line {
fmt.Printf("%-3s", strconv.Itoa(i))
i += utf8.RuneLen(r)
}
i++
fmt.Println()
for _, r := range line {
fmt.Printf("%-3s", string(r))
}
fmt.Println()
}
}
func isCI() bool {
return os.Getenv("CI") != ""
}
// See https://github.com/golang/go/issues/19280
// Not in use.
var parallelEnabled = true
func parallel(t *testing.T) {
if parallelEnabled {
t.Parallel()
}
}
func skipSymlink(t *testing.T) {
if runtime.GOOS == "windows" && os.Getenv("CI") == "" {
t.Skip("skip symlink test on local Windows (needs admin)")
}
}
|
[
"\"CI\"",
"\"CI\""
] |
[] |
[
"CI"
] |
[]
|
["CI"]
|
go
| 1 | 0 | |
tests/utils/itutils.go
|
package utils
import (
"bytes"
"crypto/tls"
"fmt"
"io/ioutil"
"log"
"math/rand"
"net/http"
"os"
"os/exec"
"path/filepath"
"strings"
"testing"
"text/template"
"time"
"github.com/ThomasRooney/gexpect"
)
// Deis points to the CLI used to run tests.
var Deis = "deis "
// DeisTestConfig allows tests to be repeated against different
// targets, with different example apps, using specific credentials, and so on.
type DeisTestConfig struct {
AuthKey string
Hosts string
Domain string
SSHKey string
ClusterName string
UserName string
Password string
Email string
ExampleApp string
AppDomain string
AppName string
ProcessNum string
ImageID string
Version string
AppUser string
SSLCertificatePath string
SSLKeyPath string
}
// randomApp is used for the test run if DEIS_TEST_APP isn't set
var randomApp = GetRandomApp()
// GetGlobalConfig returns a test configuration object.
func GetGlobalConfig() *DeisTestConfig {
authKey := os.Getenv("DEIS_TEST_AUTH_KEY")
if authKey == "" {
authKey = "deis"
}
hosts := os.Getenv("DEIS_TEST_HOSTS")
if hosts == "" {
hosts = "172.17.8.100"
}
domain := os.Getenv("DEIS_TEST_DOMAIN")
if domain == "" {
domain = "local.deisapp.com"
}
sshKey := os.Getenv("DEIS_TEST_SSH_KEY")
if sshKey == "" {
sshKey = "~/.vagrant.d/insecure_private_key"
}
exampleApp := os.Getenv("DEIS_TEST_APP")
if exampleApp == "" {
exampleApp = randomApp
}
appDomain := os.Getenv("DEIS_TEST_APP_DOMAIN")
if appDomain == "" {
appDomain = fmt.Sprintf("test.%s", domain)
}
// generate a self-signed certifcate for the app domain
keyOut, err := filepath.Abs(appDomain + ".key")
if err != nil {
log.Fatal(err)
}
certOut, err := filepath.Abs(appDomain + ".cert")
if err != nil {
log.Fatal(err)
}
cmd := exec.Command("openssl", "req", "-new", "-newkey", "rsa:4096", "-nodes", "-x509",
"-days", "1",
"-subj", fmt.Sprintf("/C=US/ST=Colorado/L=Boulder/CN=%s", appDomain),
"-keyout", keyOut,
"-out", certOut)
if err := cmd.Start(); err != nil {
log.Fatal(err)
}
if err := cmd.Wait(); err != nil {
log.Fatal(err)
}
var envCfg = DeisTestConfig{
AuthKey: authKey,
Hosts: hosts,
Domain: domain,
SSHKey: sshKey,
ClusterName: "dev",
UserName: "test",
Password: "asdf1234",
Email: "[email protected]",
ExampleApp: exampleApp,
AppDomain: appDomain,
AppName: "sample",
ProcessNum: "2",
ImageID: "buildtest",
Version: "2",
AppUser: "test1",
SSLCertificatePath: certOut,
SSLKeyPath: keyOut,
}
return &envCfg
}
func doCurl(url string) ([]byte, error) {
// disable security check for self-signed certificates
tr := &http.Transport{
TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
}
client := &http.Client{Transport: tr}
response, err := client.Get(url)
if err != nil {
return nil, err
}
defer response.Body.Close()
body, err := ioutil.ReadAll(response.Body)
if !strings.Contains(string(body), "Powered by") {
return nil, fmt.Errorf("App not started (%d)\nBody: (%s)", response.StatusCode, string(body))
}
return body, nil
}
// Curl connects to an endpoint to see if the endpoint is responding.
func Curl(t *testing.T, url string) {
CurlWithFail(t, url, false, "")
}
// CurlApp is a convenience function to see if the example app is running.
func CurlApp(t *testing.T, cfg DeisTestConfig) {
CurlWithFail(t, fmt.Sprintf("http://%s.%s", cfg.AppName, cfg.Domain), false, "")
}
// CurlWithFail connects to a Deis endpoint to see if the example app is running.
func CurlWithFail(t *testing.T, url string, failFlag bool, expect string) {
// FIXME: try the curl a few times
for i := 0; i < 20; i++ {
body, err := doCurl(url)
if err == nil {
fmt.Println(string(body))
return
}
time.Sleep(1 * time.Second)
}
// once more to fail with an error
body, err := doCurl(url)
switch failFlag {
case true:
if err != nil {
if strings.Contains(string(err.Error()), expect) {
fmt.Println("(Error expected...ok) " + expect)
} else {
t.Fatal(err)
}
} else {
if strings.Contains(string(body), expect) {
fmt.Println("(Error expected...ok) " + expect)
} else {
t.Fatal(err)
}
}
case false:
if err != nil {
t.Fatal(err)
} else {
fmt.Println(string(body))
}
}
}
// AuthPasswd tests whether `deis auth:passwd` updates a user's password.
func AuthPasswd(t *testing.T, params *DeisTestConfig, password string) {
fmt.Println("deis auth:passwd")
child, err := gexpect.Spawn(Deis + " auth:passwd")
if err != nil {
t.Fatalf("command not started\n%v", err)
}
fmt.Println("current password:")
err = child.Expect("current password: ")
if err != nil {
t.Fatalf("expect password failed\n%v", err)
}
child.SendLine(params.Password)
fmt.Println("new password:")
err = child.Expect("new password: ")
if err != nil {
t.Fatalf("expect password failed\n%v", err)
}
child.SendLine(password)
fmt.Println("new password (confirm):")
err = child.Expect("new password (confirm): ")
if err != nil {
t.Fatalf("expect password failed\n%v", err)
}
child.SendLine(password)
err = child.Expect("Password change succeeded")
if err != nil {
t.Fatalf("command executiuon failed\n%v", err)
}
child.Close()
}
// CheckList executes a command and optionally tests whether its output does
// or does not contain a given string.
func CheckList(
t *testing.T, cmd string, params interface{}, contain string, notflag bool) {
var cmdBuf bytes.Buffer
tmpl := template.Must(template.New("cmd").Parse(cmd))
if err := tmpl.Execute(&cmdBuf, params); err != nil {
t.Fatal(err)
}
cmdString := cmdBuf.String()
fmt.Println(cmdString)
var cmdl *exec.Cmd
if strings.Contains(cmd, "cat") {
cmdl = exec.Command("sh", "-c", cmdString)
} else {
cmdl = exec.Command("sh", "-c", Deis+cmdString)
}
stdout, _, err := RunCommandWithStdoutStderr(cmdl)
if err != nil {
t.Fatal(err)
}
if notflag && strings.Contains(stdout.String(), contain) {
t.Fatalf("Didn't expect '%s' in command output:\n%v", contain, stdout)
}
if !notflag && !strings.Contains(stdout.String(), contain) {
t.Fatalf("Expected '%s' in command output:\n%v", contain, stdout)
}
}
// Execute takes command string and parameters required to execute the command,
// a failflag to check whether the command is expected to fail, and an expect
// string to check whether the command has failed according to failflag.
//
// If failflag is true and the command failed, check the stdout and stderr for
// the expect string.
func Execute(t *testing.T, cmd string, params interface{}, failFlag bool, expect string) {
var cmdBuf bytes.Buffer
tmpl := template.Must(template.New("cmd").Parse(cmd))
if err := tmpl.Execute(&cmdBuf, params); err != nil {
t.Fatal(err)
}
cmdString := cmdBuf.String()
fmt.Println(cmdString)
var cmdl *exec.Cmd
if strings.Contains(cmd, "git ") {
cmdl = exec.Command("sh", "-c", cmdString)
} else {
cmdl = exec.Command("sh", "-c", Deis+cmdString)
}
switch failFlag {
case true:
if stdout, stderr, err := RunCommandWithStdoutStderr(cmdl); err != nil {
if strings.Contains(stdout.String(), expect) || strings.Contains(stderr.String(), expect) {
fmt.Println("(Error expected...ok)")
} else {
t.Fatal(err)
}
} else {
if strings.Contains(stdout.String(), expect) || strings.Contains(stderr.String(), expect) {
fmt.Println("(Error expected...ok)" + expect)
} else {
t.Fatal(err)
}
}
case false:
if _, _, err := RunCommandWithStdoutStderr(cmdl); err != nil {
t.Fatal(err)
} else {
fmt.Println("ok")
}
}
}
// AppsDestroyTest destroys a Deis app and checks that it was successful.
func AppsDestroyTest(t *testing.T, params *DeisTestConfig) {
cmd := "apps:destroy --app={{.AppName}} --confirm={{.AppName}}"
if err := Chdir(params.ExampleApp); err != nil {
t.Fatal(err)
}
Execute(t, cmd, params, false, "")
if err := Chdir(".."); err != nil {
t.Fatal(err)
}
if err := Rmdir(params.ExampleApp); err != nil {
t.Fatal(err)
}
}
// GetRandomApp returns a known working example app at random for testing.
func GetRandomApp() string {
rand.Seed(int64(time.Now().Unix()))
apps := []string{
"example-clojure-ring",
// "example-dart",
"example-dockerfile-python",
"example-go",
"example-java-jetty",
"example-nodejs-express",
// "example-php",
"example-play",
"example-python-django",
"example-python-flask",
"example-ruby-sinatra",
"example-scala",
"example-dockerfile-http",
}
return apps[rand.Intn(len(apps))]
}
|
[
"\"DEIS_TEST_AUTH_KEY\"",
"\"DEIS_TEST_HOSTS\"",
"\"DEIS_TEST_DOMAIN\"",
"\"DEIS_TEST_SSH_KEY\"",
"\"DEIS_TEST_APP\"",
"\"DEIS_TEST_APP_DOMAIN\""
] |
[] |
[
"DEIS_TEST_HOSTS",
"DEIS_TEST_DOMAIN",
"DEIS_TEST_AUTH_KEY",
"DEIS_TEST_APP",
"DEIS_TEST_APP_DOMAIN",
"DEIS_TEST_SSH_KEY"
] |
[]
|
["DEIS_TEST_HOSTS", "DEIS_TEST_DOMAIN", "DEIS_TEST_AUTH_KEY", "DEIS_TEST_APP", "DEIS_TEST_APP_DOMAIN", "DEIS_TEST_SSH_KEY"]
|
go
| 6 | 0 | |
docker-workspace/colcon_ws/install/autonomous_exploration/share/autonomous_exploration/turtlebot3_cartographer.launch.py
|
# Copyright 2019 Open Source Robotics Foundation, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: Darby Lim
import os
from ament_index_python.packages import get_package_share_directory
from launch import LaunchDescription
from launch.actions import IncludeLaunchDescription, DeclareLaunchArgument, OpaqueFunction
from launch.substitutions import LaunchConfiguration
from ament_index_python.packages import get_package_share_directory
from launch.launch_description_sources import PythonLaunchDescriptionSource
from launch_ros.actions import Node
TURTLEBOT3_MODEL = os.environ['TURTLEBOT3_MODEL']
def launch_setup(context, *args, **kwargs):
# Define input variables
use_sim_time = LaunchConfiguration('use_sim_time', default='true')
turtlebot3_cartographer_prefix = get_package_share_directory('autonomous_exploration')
cartographer_config_dir = LaunchConfiguration('cartographer_config_dir', default=os.path.join(
turtlebot3_cartographer_prefix))
configuration_basename = LaunchConfiguration('configuration_basename',
default= TURTLEBOT3_MODEL + '.lua')
cartographer_mode = LaunchConfiguration('cartographer_mode', default='mapping').perform(context)
resolution = LaunchConfiguration('resolution', default='0.05')
publish_period_sec = LaunchConfiguration('publish_period_sec', default='1.0')
# Cartographer_node
cartographer_node = Node(
package='cartographer_ros',
executable='cartographer_node',
name='cartographer_node',
output='screen',
parameters=[{'use_sim_time': use_sim_time}],
arguments=['-configuration_directory', cartographer_config_dir,
'-configuration_basename', configuration_basename])
# Cartographer_grid_node
cartographer_grid_node = Node(
package='cartographer_ros',
executable='occupancy_grid_node',
name='occupancy_grid_node',
output='screen',
parameters=[{'use_sim_time': use_sim_time}],
arguments=['-resolution', resolution, '-publish_period_sec', publish_period_sec])
# Start the cartographer nodes depending on the mode
if cartographer_mode == "mapping":
return [cartographer_node, cartographer_grid_node]
elif cartographer_mode == "localization":
return [cartographer_node]
def generate_launch_description():
return LaunchDescription([
DeclareLaunchArgument('use_sim_time', default_value='true'),
DeclareLaunchArgument('resolution', default_value='0.05',
description='Resolution of a grid cell in the published occupancy grid'),
DeclareLaunchArgument('publish_period_sec', default_value='1.0',
description='OccupancyGrid publishing period'),
DeclareLaunchArgument('cartographer_mode', default_value='mapping'),
OpaqueFunction(function = launch_setup)
])
|
[] |
[] |
[
"TURTLEBOT3_MODEL"
] |
[]
|
["TURTLEBOT3_MODEL"]
|
python
| 1 | 0 | |
keyring/keyring.go
|
package keyring
import (
"bufio"
"code.google.com/p/gosshold/ssh"
"crypto/ecdsa"
"crypto/rsa"
"github.com/howeyc/gopass"
"crypto/x509"
"encoding/pem"
"errors"
"fmt"
"io/ioutil"
"io"
"path/filepath"
"os"
"os/user"
)
// keyring implements the ClientKeyring interface
type clientKeyring struct {
//should be interface type supporting PKCS#1, RSA, DSA and ECDSA
//keys []*rsa.PrivateKey
signers []ssh.Signer
}
func (k *clientKeyring) Key(i int) (ssh.PublicKey, error) {
//fmt.Println("Getting key number", i)
if i < 0 || i >= len(k.signers) {
//no more keys but no error. Signifies 'try next authenticator'
return nil, nil
}
return k.signers[i].PublicKey(), nil
}
func (k *clientKeyring) Sign(i int, rand io.Reader, data []byte) (sig []byte, err error) {
//fmt.Println("Signing with key number", i)
return k.signers[i].Sign(rand, data)
}
func (k *clientKeyring) LoadRsa(key *rsa.PrivateKey) error {
return k.load(key)
}
func (k *clientKeyring) LoadEcdsa(key *ecdsa.PrivateKey) error {
return k.load(key)
}
func (k *clientKeyring) load(key interface{}) error {
signer, err := ssh.NewSignerFromKey(key)
if err != nil {
return err
}
//fmt.Printf("adding signer from key %+v\n with pub key: %+v\n", key, signer.PublicKey())
k.signers = append(k.signers, signer)
//fmt.Println("total ", len(k.signers), " signers")
return nil
}
func userDir() string {
u, err := user.Current()
if err != nil {
//probably cross-compiled. Use env
return os.Getenv("HOME")
}
return u.HomeDir
}
func (k *clientKeyring) LoadIdFiles(files []string) []error {
errs := []error{}
for _, file := range files {
err := k.LoadFromPEMFile(file)
errs = append(errs, errors.New(fmt.Sprintf("Error loading file '%s': \n\t%v\n", file, err)))
}
return errs
}
//todo: check openssh (ssh-add) source. should there be a glob on the dir?
func (k *clientKeyring) LoadDefaultIdFiles() []error {
files := []string{
filepath.Join(userDir(), ".ssh", "id_ecdsa"),
filepath.Join(userDir(), ".ssh", "id_rsa"),
// can't get dsa working for now
// filepath.Join(userDir(), ".ssh", "id_dsa")
}
return k.LoadIdFiles(files)
}
func readLines(path string) ([]string, error) {
file, err := os.Open(path)
if err != nil {
return nil, err
}
defer file.Close()
var lines []string
scanner := bufio.NewScanner(file)
for scanner.Scan() {
lines = append(lines, scanner.Text())
}
return lines, scanner.Err()
}
func (k *clientKeyring) LoadFromPEMFile(file string) error {
filebuf, err := ioutil.ReadFile(file)
if err != nil {
return err
}
block, _ := pem.Decode(filebuf)
if block == nil {
return errors.New("ssh: no key found")
}
if x509.IsEncryptedPEMBlock(block) {
fmt.Printf("Password for key '%s':", file)
password := gopass.GetPasswd()
decrypted, err := x509.DecryptPEMBlock(block, password)
if err != nil {
return err
}
//println("decrypted block of type ", block.Type)
block.Bytes = decrypted
}
switch block.Type {
case "RSA PRIVATE KEY":
r, err := x509.ParsePKCS1PrivateKey(block.Bytes)
if err != nil {
return err
}
return k.LoadRsa(r)
case "EC PRIVATE KEY":
d, err := x509.ParseECPrivateKey(block.Bytes)
if err != nil {
return err
}
return k.load(d)
/*
case "DSA PRIVATE KEY":
lines, err := readLines(file)
important := lines[1:len(lines)-1]
filebuf := ""
for _, line := range important {
filebuf += line
}
fmt.Printf("key data b64: %s\n", filebuf)
dbytes, err := base64.StdEncoding.DecodeString(filebuf)
if err != nil {
return err
}
buf := bytes.NewBuffer(dbytes)
var priv dsa.PrivateKey
l := len(dbytes)
fmt.Printf("dsa key length: %d\n", l)
//if l != 448 {
// return errors.New("private key type '"+ block.Type + "' should be 404 bytes, but was not")
//}
//b := []byte{0,0,0,0}
b := buf.Next(4)
fmt.Printf("magic: %s\n", hex.EncodeToString(b))
if err != nil {
return err
}
fmt.Printf("All bits: %s\n", hex.EncodeToString(dbytes))
fmt.Printf("All bits: %s\n", hex.EncodeToString(block.Bytes))
priv.P = new(big.Int).SetBytes(block.Bytes[0:128])
priv.Q = new(big.Int).SetBytes(block.Bytes[128:148])
priv.G = new(big.Int).SetBytes(block.Bytes[148:286])
//what about the other 160 bits?
fmt.Printf("Missing bits: %s\n", hex.EncodeToString(block.Bytes[286:306]))
priv.Y = new(big.Int).SetBytes(block.Bytes[306:424])
priv.X = new(big.Int).SetBytes(block.Bytes[424:448])
fmt.Printf("Missing bits: %s\n", hex.EncodeToString(block.Bytes[424:448]))
return k.load(&priv)
*/
default:
return errors.New("Unsupported private key type '"+ block.Type + "'")
}
}
func LoadKeyring(idFile string) (ssh.ClientAuth, error) {
//load and sign
keyring := clientKeyring{}
err := keyring.LoadFromPEMFile(idFile)
if err != nil {
return nil, err
}
return ssh.ClientAuthKeyring(&keyring), nil
}
|
[
"\"HOME\""
] |
[] |
[
"HOME"
] |
[]
|
["HOME"]
|
go
| 1 | 0 | |
cmd/client/client.go
|
/*
* Copyright (C) 2016 Red Hat, Inc.
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
*/
package client
import (
"os"
"github.com/skydive-project/skydive/config"
"github.com/spf13/cobra"
)
var analyzerAddr string
// ClientCmd describe the skydive client root command
var ClientCmd = &cobra.Command{
Use: "client",
Short: "Skydive client",
Long: "Skydive client",
SilenceUsage: true,
PersistentPreRun: func(cmd *cobra.Command, args []string) {
cmd.Root().PersistentPreRun(cmd.Root(), args)
if analyzerAddr != "" {
config.Set("analyzers", analyzerAddr)
} else {
config.SetDefault("analyzers", []string{"localhost:8082"})
}
},
}
func RegisterClientCommands(cmd *cobra.Command) {
cmd.AddCommand(AlertCmd)
cmd.AddCommand(CaptureCmd)
cmd.AddCommand(PacketInjectorCmd)
cmd.AddCommand(PcapCmd)
cmd.AddCommand(QueryCmd)
cmd.AddCommand(ShellCmd)
cmd.AddCommand(StatusCmd)
cmd.AddCommand(TopologyCmd)
cmd.AddCommand(UserMetadataCmd)
cmd.AddCommand(WorkflowCmd)
cmd.AddCommand(NodeRuleCmd)
cmd.AddCommand(EdgeRuleCmd)
}
func init() {
ClientCmd.PersistentFlags().StringVarP(&AuthenticationOpts.Username, "username", "", os.Getenv("SKYDIVE_USERNAME"), "username auth parameter")
ClientCmd.PersistentFlags().StringVarP(&AuthenticationOpts.Password, "password", "", os.Getenv("SKYDIVE_PASSWORD"), "password auth parameter")
ClientCmd.PersistentFlags().StringVarP(&analyzerAddr, "analyzer", "", os.Getenv("SKYDIVE_ANALYZER"), "analyzer address")
RegisterClientCommands(ClientCmd)
}
|
[
"\"SKYDIVE_USERNAME\"",
"\"SKYDIVE_PASSWORD\"",
"\"SKYDIVE_ANALYZER\""
] |
[] |
[
"SKYDIVE_ANALYZER",
"SKYDIVE_PASSWORD",
"SKYDIVE_USERNAME"
] |
[]
|
["SKYDIVE_ANALYZER", "SKYDIVE_PASSWORD", "SKYDIVE_USERNAME"]
|
go
| 3 | 0 | |
utils.py
|
# -*- coding: utf-8 -*-
"""
These functions is designed for crystal structure formation and
the 3D molecular viewer application
"""
import json
import plotly.graph_objects as go
import dash_html_components as html
import dash_bio as bio
def load_json(file_path):
"""
Loading the structure json file
Parameters
----------
file_path: str
Returns
-------
"""
with open(file_path, 'r', encoding='utf-8') as json_file:
mol_data, style_data = json.load(json_file)
return mol_data, style_data
def th4_plot(df, x_axis_column_name, y_axis_column_name, colour_column_value):
"""
Plotly Figure object for th4 data
Parameters
----------
df: DataFrame
Data table
x_axis_column_name: str
y_axis_column_name: str
colour_column_value: str
Returns
-------
Figure
Plotly figure object
"""
fig = go.Figure()
fig.add_trace(go.Scatter(
# X and Y coordinates from data table
x=df[x_axis_column_name],
y=df[y_axis_column_name],
text=df.index,
mode='markers',
# Set the format of scatter
marker=dict(
symbol='circle',
opacity=0.7,
line=dict(color='rgb(40, 40, 40)', width=0.2),
size=8,
# Colour bar
color=df[colour_column_value],
colorscale='RdBu',
colorbar=dict(
thicknessmode='pixels',
thickness=20,
title=dict(text=colour_column_value, side='right')
),
reversescale=True,
showscale=True
)
))
# Set the format of axes
axis_template = dict(linecolor='#444', tickcolor='#444',
ticks='outside', showline=True, zeroline=False,
gridcolor='lightgray')
fig.update_layout(
xaxis=dict(axis_template, **dict(title=x_axis_column_name)),
yaxis=dict(axis_template, **dict(title=y_axis_column_name)),
clickmode='event+select',
hovermode='closest',
plot_bgcolor='white'
)
return fig
def structure_viewer(df, interactive_data):
"""
The molecular 3D viewer
Parameters
----------
df: Dataframe
Data table
interactive_data: dict
Plotly callback information
Returns
-------
list
A list of viewer object
"""
def single_3d_viewer(json_file, structure_index):
mol_data, style_data = load_json(json_file)
mol_3d = html.Div(
id='viewer',
children=[
html.P('Structure ID: {}'.format(structure_index)),
bio.Molecule3dViewer(
id='mol-3d-viewer',
selectionType='atom',
styles=style_data,
modelData=mol_data
)
]
)
return mol_3d
mol_div = []
# Loading multiple 3D viewer
try:
for i in range(len(interactive_data['points'])):
# Find index from plotly callback information
index = int(interactive_data['points'][i]['pointIndex'])
origin_idx = index
# Get structure name
structure_name = int(df.iloc[index].Name)
# Path of parsed structure file
json_path = './data/th4/{}.json'.format(structure_name)
mol_div.append(single_3d_viewer(json_path, origin_idx))
# Default structure
except TypeError:
json_path = './data/th4/100020031487063.json'
mol_div.append(single_3d_viewer(json_path, 'TH4 global minimum'))
return mol_div
|
[] |
[] |
[] |
[]
|
[]
|
python
| null | null | null |
main.py
|
import discord
from discord.ext import commands
from dotenv import load_dotenv
load_dotenv()
import os
bot = discord.Client()
bot = commands.Bot(command_prefix='&',help_command=None)
for filename in os.listdir('cogs'):
if filename.endswith('.py'):
bot.load_extension(f'cogs.{filename[:-3]}')
@bot.event
async def on_ready():
game = discord.Game('&help')
await bot.change_presence(status=discord.Status.online, activity=game)
print('We have logged in as {0.user}'.format(bot))
print("I'm in " + str(len(bot.guilds)) + " servers!")
bot.run(os.getenv("TOKEN"))
|
[] |
[] |
[
"TOKEN"
] |
[]
|
["TOKEN"]
|
python
| 1 | 0 | |
angel-ps/python/pyangel/find_angel_home.py
|
#
# Tencent is pleased to support the open source community by making Angel available.
#
# Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
#
# Licensed under the BSD 3-Clause License (the "License"); you may not use this file except in
# compliance with the License. You may obtain a copy of the License at
#
# https://opensource.org/licenses/BSD-3-Clause
#
# Unless required by applicable law or agreed to in writing, software distributed under the License is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific language governing permissions and
#
from __future__ import print_function
import os
import sys
def _find_angel_home():
"""Find the ANGEL_HOME."""
# If the enviroment has ANGEL_HOME set trust it.
if "ANGEL_HOME" in os.environ:
return os.environ["ANGEL_HOME"]
def is_angel_home(path):
"""Takes a path and returns true if the provided path could be a reasonable ANGEL_HOME"""
return (os.path.isfile(os.path.join(path, "bin/angel-submit")) and
(os.path.isdir(os.path.join(path, "python")) or
os.path.isdir(os.path.join(path, "lib"))))
paths = ["../", os.path.dirname(os.path.realpath(__file__))]
# Add the path of the PyAngel module if it exists
if sys.version < "3":
import imp
try:
module_home = imp.find_module("pyangel")[1]
paths.append(module_home)
# If we are installed in edit mode also look two dirs up
paths.append(os.path.join(module_home, "../../"))
except ImportError:
# Not pip installed no worries
pass
else:
from importlib.util import find_spec
try:
module_home = os.path.dirname(find_spec("pyangel").origin)
paths.append(module_home)
# If we are installed in edit mode also look two dirs up
paths.append(os.path.join(module_home, "../../"))
except ImportError:
# Not pip installed no worries
pass
# Normalize the paths
paths = [os.path.abspath(p) for p in paths]
try:
return next(path for path in paths if is_angel_home(path))
except StopIteration:
print("Could not find valid ANGEL_HOME while searching {0}".format(paths), file=sys.stderr)
exit(-1)
if __name__ == "__main__":
print(_find_angel_home())
|
[] |
[] |
[
"ANGEL_HOME"
] |
[]
|
["ANGEL_HOME"]
|
python
| 1 | 0 | |
youtube/wsgi.py
|
"""
WSGI config for youtube project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'youtube.settings')
application = get_wsgi_application()
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
scripts/load-data/common/load-us-counties.py
|
#!/usr/bin/env python3
# #############################################################################
# Helper script to generate us-counties.csv file
# * Download counties information from census.gov
# * Download geo-location from datasciencetoolkit.org
# Save result into the '../../common/us/counties.csv' file
#
# requirements: pandas, xlrd
# #############################################################################
import codecs
import csv
import json
import os
import pandas
import re
import sys
from urllib import request, parse as urlparse
from pathlib import Path
codecs.register_error('strict', codecs.ignore_errors)
URL = 'https://www2.census.gov/programs-surveys/popest/geographies/2017/all-geocodes-v2017.xlsx'
# URL = f'file://{(Path(__file__).parent / "all-geocodes-v2017.xlsx").resolve()}'
CORRECT_US_STATES_COUNTIES_COUNT = 3143 # value on end 2017
CORRECT_US_TERRITORIES_COUNTIES_COUNT = 78 # value on 2020
output_file = Path(__file__).parent / '..' / '..' / '..' / 'common' / 'us' / 'counties.csv'
counties = []
# -----------------------------------------------------------------------------
# Show debug output only with the env.DEBUG = true
debug = lambda msg: print(msg) if os.environ.get('DEBUG') == 'true' else lambda msg: None
# -----------------------------------------------------------------------------
# Load all states {state_fips: (state_id, state_name)}
states_file = Path(__file__).parent / '..' / '..' / '..' / 'common' / 'us' / 'states.csv'
print(f'Read csv with states: {states_file}')
states = dict([(r[1].rjust(2, '0'), (r[0], r[2])) for r in csv.reader(states_file.open('r'), delimiter=',', quotechar='"')])
# -----------------------------------------------------------------------------
# Load geo location for a county
def get_geo_location(county_name, state_id):
urlquery = urlparse.quote(f'address={county_name},{state_id},US')
url = f'http://www.datasciencetoolkit.org/maps/api/geocode/json?{urlquery}'
try:
debug(f'Load geolocation from: {url}')
response = codecs.decode(request.urlopen(url).read(), 'utf-8')
debug(f'Parse data')
data = json.loads(response)
location = data['results'][0]['geometry']['location']
debug(f'Found location: {location}')
return (location['lat'], location['lng'])
except Exception as ex:
print(f'Error get/parse data from {url}')
print(ex)
raise ex
# -----------------------------------------------------------------------------
# Safety add county information to the main list
def add_county_to_the_list(fips, state_id, name, latitude=None, longitude=None):
for item in counties:
if item[0] == fips:
debug(f'{fips}, {state_id}, {name} - Skip. Already exists in the list.')
return
if latitude is None or longitude is None:
(latitude, longitude) = get_geo_location(name, state_id)
print(f'{fips}, {state_id}, {name}, {latitude}, {longitude}')
counties.append([fips, state_id, name, latitude, longitude])
# -----------------------------------------------------------------------------
# Main method
print(f'Load data from {URL}')
data = pandas.read_excel(URL, sheet_name=0)
print(f'Parsing data line by line')
name_replace_patterns = [
re.compile(' city', re.IGNORECASE),
re.compile(' town', re.IGNORECASE),
re.compile(' county', re.IGNORECASE)
]
for row in data.itertuples():
level = str(row._1)
state_fips = str(row._2)
county_fips = str(row._3)
subdiv_fips = str(row._4)
place_fips = str(row._5)
city_fips = str(row._6)
name = str(row._7)
if not level.isdigit() or not state_fips.isdigit() or not county_fips.isdigit():
debug(f'Skip header: {row}')
continue # Skip headers
if county_fips == '000' or not (subdiv_fips == '00000' and place_fips == '00000' and city_fips == '00000'):
debug(f'Skip non county: {row}')
continue # skip states and country and non county
fips = f'{state_fips}{county_fips}'
country_id = 'US'
state_id = states[state_fips][0]
if fips in ['02093', '02261']:
# Skip: Valdez–Cordova Census Area (Alaska) - on Jan 2019
debug(f'{fips}, {state_id}, {name} - Skip.')
else:
add_county_to_the_list(fips, state_id, name)
# Correction on 2020
# Jan 2019 update for Valdez–Cordova Census Area (Alaska)
add_county_to_the_list('02063', 'AK', 'Chugach Census Area', 61.130833, -146.348333)
add_county_to_the_list('02066', 'AK', 'Copper River Census Area', 62.109722, -145.557222)
# -----------------------------------------------------------------------------
# Write counties to the csv file
found_total_counties = len(counties)
with output_file.open('w') as file_writer:
csv_writer = csv.writer(file_writer, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
csv_writer.writerow(['fips', 'state_id', 'name', 'latitude', 'longitude'])
for county in sorted(counties, key=lambda x: x[0]):
csv_writer.writerow(county)
# -----------------------------------------------------------------------------
# Show validation result
correct_counties_count = CORRECT_US_STATES_COUNTIES_COUNT + CORRECT_US_TERRITORIES_COUNTIES_COUNT
if found_total_counties == correct_counties_count:
print(f'[OK ] Found {found_total_counties} counties - correct')
else:
print(f'[ERR] Found {found_total_counties} counties - incorrect (should be {correct_counties_count} <- '
f'{CORRECT_US_STATES_COUNTIES_COUNT} + {CORRECT_US_TERRITORIES_COUNTIES_COUNT})')
sys.exit(1)
|
[] |
[] |
[
"DEBUG"
] |
[]
|
["DEBUG"]
|
python
| 1 | 0 | |
matterbridge.go
|
package main
import (
"flag"
"fmt"
"os"
"strings"
"github.com/42wim/matterbridge/bridge/config"
"github.com/42wim/matterbridge/gateway"
"github.com/42wim/matterbridge/gateway/bridgemap"
"github.com/google/gops/agent"
prefixed "github.com/matterbridge/logrus-prefixed-formatter"
"github.com/sirupsen/logrus"
)
var (
version = "1.15.2-dev"
githash string
flagConfig = flag.String("conf", "matterbridge.toml", "config file")
flagDebug = flag.Bool("debug", false, "enable debug")
flagVersion = flag.Bool("version", false, "show version")
flagGops = flag.Bool("gops", false, "enable gops agent")
)
func main() {
flag.Parse()
if *flagVersion {
fmt.Printf("version: %s %s\n", version, githash)
return
}
rootLogger := setupLogger()
logger := rootLogger.WithFields(logrus.Fields{"prefix": "main"})
if *flagGops {
if err := agent.Listen(agent.Options{}); err != nil {
logger.Errorf("Failed to start gops agent: %#v", err)
} else {
defer agent.Close()
}
}
logger.Printf("Running version %s %s", version, githash)
if strings.Contains(version, "-dev") {
logger.Println("WARNING: THIS IS A DEVELOPMENT VERSION. Things may break.")
}
cfg := config.NewConfig(rootLogger, *flagConfig)
cfg.BridgeValues().General.Debug = *flagDebug
r, err := gateway.NewRouter(rootLogger, cfg, bridgemap.FullMap)
if err != nil {
logger.Fatalf("Starting gateway failed: %s", err)
}
if err = r.Start(); err != nil {
logger.Fatalf("Starting gateway failed: %s", err)
}
logger.Printf("Gateway(s) started succesfully. Now relaying messages")
select {}
}
func setupLogger() *logrus.Logger {
logger := &logrus.Logger{
Out: os.Stdout,
Formatter: &prefixed.TextFormatter{
PrefixPadding: 13,
DisableColors: true,
FullTimestamp: true,
},
Level: logrus.InfoLevel,
}
if *flagDebug || os.Getenv("DEBUG") == "1" {
logger.Formatter = &prefixed.TextFormatter{
PrefixPadding: 13,
DisableColors: true,
FullTimestamp: false,
ForceFormatting: true,
}
logger.Level = logrus.DebugLevel
logger.WithFields(logrus.Fields{"prefix": "main"}).Info("Enabling debug logging.")
}
return logger
}
|
[
"\"DEBUG\""
] |
[] |
[
"DEBUG"
] |
[]
|
["DEBUG"]
|
go
| 1 | 0 | |
synwatch.py
|
#!/usr/bin/env python
"""
A command-line program that produces a live plot of the progress
of a SYNAPPS task. Requires the path to the yaml of the running
SYNAPPS instance and path to a running log of the SYNAPPS process
as produced, for example by the following commands:
::: synapps SN2011dh.yaml > synapps.log
::: synapps sn2011dh.yaml | tee synapps.log
Usage: synwatch <synapps.yaml.file> <synapps.running.log> <refresh interval (minutes)>
"""
try:
from pyES import Common, Synpp, Synapps
except ImportError:
from astro.pyES import Common, Synpp, Synapps
from subprocess import Popen, PIPE
import os
import sys
import yaml
import numpy as np
import matplotlib.pyplot as plt
from time import sleep
ion_dict = {100:'H I', 200:'He I', 201:'He II', 300:'Li I', 301:'Li II', 400:'Be I',
401:'Be II', 402:'Be III', 500:'B I', 501:'B II', 502:'B III', 503:'B IV',
600:'C I', 601:'C II', 602:'C III', 603:'C IV', 700:'N I', 701:'N II',
702:'N III', 703:'N IV', 704:'N V', 800:'O I', 801:'O II', 802:'O III',
803:'O IV', 804:'O V', 900:'F I', 901:'F II', 1000:'Ne I', 1100:'Na I',
1200:'Mg I', 1201:'Mg II', 1300:'Al I', 1301:'Al II', 1302:'Al III', 1400:'Si I',
1401:'Si II', 1402:'Si III', 1403:'Si IV', 1500:'P I', 1501:'P II', 1502:'P III',
1600:'S I', 1601:'S II', 1602:'S III', 1700:'Cl I', 1800:'Ar I', 1801:'Ar II',
1900:'K I', 1901:'K II', 2000:'Ca I', 2001:'Ca II', 2100:'Sc I', 2101:'Sc II',
2200:'Ti I', 2201:'Ti II', 2202:'Ti III', 2300:'V I', 2301:'V II', 2302:'V III',
2400:'Cr I', 2401:'Cr II', 2402:'Cr III', 2500:'Mn I', 2501:'Mn II', 2502:'Mn III',
2600:'Fe I', 2601:'Fe II', 2602:'Fe III', 2603:'Fe IV', 2700:'Co I', 2701:'Co II',
2702:'Co III', 2703:'Co IV', 2800:'Ni I', 2801:'Ni II', 2802:'Ni III', 2803:'Ni IV',
2901:'Cu II', 3002:'Zn III', 3801:'Sr II', 5600:'Ba I', 5601:'Ba II'}
def set_OMP():
'''
Manage the setting of OMP_NUM_THREADS to most efficiently
utilize parallelization.
'''
nproc, e = Popen("nproc --all",shell=True,stdout=PIPE).communicate()
os.environ['OMP_NUM_THREADS'] = nproc
def parse_log( yaml_file, log_file, nlines=3, stepsize=1 ):
'''
Parse a yaml and a log, returning a SYN++ input yaml as a string as
well as several other useful things.
nlines: the number of recent models to report back
stepsize: if stepsize > 1, will skip <stepsize> models in between reported models
'''
# Parse the log to find most recent minimum
last_min = None
last_mins = []
for line in open( log_file, "r" ).readlines() :
try:
if line.startswith( "New Min" ) or line.startswith( "Final Min" ) :
last_min = line.rstrip()
last_mins.append(last_min)
if len(last_mins) > nlines*stepsize:
last_mins.pop(0)
except:
continue
if not last_min :
raise Exception("ERROR: No 'New Min' or 'Final Min' lines found in log: %s" % log_file)
last_mins.reverse()
last_mins = last_mins[::stepsize]
for i,lm in enumerate(last_mins):
last_mins[i] = [ float( x ) for x in lm[ lm.find( "[" ) + 1 : lm.find( "]" ) ].split() ]
setups = []
for last_min in last_mins:
# Create a synapps object
synapps = Synapps.Synapps.create( yaml.load( open( yaml_file, "r" ) ) )
# Is the log compatible with the YAML control file?
num_ions = 0
num_active_ions = 0
for ion in synapps.config.ions :
num_ions += 1
num_active_ions += 1 if ion.active else 0
num_params = num_active_ions * 5
num_params += 6
if num_params != len( last_min ) :
raise Exception("ERROR: Incompatible synapps.yaml and synapps.log: %d and %d parameters" % ( num_params, len( last_min ) ))
# Substitute log entries into synapps control object's start slots.
synapps.config.a0.start = last_min.pop( 0 )
synapps.config.a1.start = last_min.pop( 0 )
synapps.config.a2.start = last_min.pop( 0 )
synapps.config.v_phot.start = last_min.pop( 0 )
synapps.config.v_outer.start = last_min.pop( 0 )
synapps.config.t_phot.start = last_min.pop( 0 )
j = 0
for i in range( num_ions ) :
if not synapps.config.ions[ i ].active :
continue
synapps.config.ions[ i ].log_tau.start = last_min[ j + 0 * num_active_ions ]
synapps.config.ions[ i ].v_min.start = last_min[ j + 1 * num_active_ions ]
synapps.config.ions[ i ].v_max.start = last_min[ j + 2 * num_active_ions ]
synapps.config.ions[ i ].aux.start = last_min[ j + 3 * num_active_ions ]
synapps.config.ions[ i ].temp.start = last_min[ j + 4 * num_active_ions ]
j += 1
# Create a syn++ yaml control file.
output = Synpp.Output.create()
grid = synapps.grid
opacity = synapps.opacity
source = synapps.source
spectrum = synapps.spectrum
setup = Synpp.Setup.create()
setup.a0 = synapps.config.a0.start
setup.a1 = synapps.config.a1.start
setup.a2 = synapps.config.a2.start
setup.v_phot = synapps.config.v_phot.start
setup.v_outer = synapps.config.v_outer.start
setup.t_phot = synapps.config.t_phot.start
for i, ion in enumerate( synapps.config.ions ) :
if synapps.config.ions[ i ].active:
setup.ions.append( Synpp.Ion( ion.ion, ion.active, ion.log_tau.start, ion.v_min.start, ion.v_max.start, ion.aux.start, ion.temp.start ) )
setups.append(setup)
return str(Synpp.Synpp( output, grid, opacity, source, spectrum, [setups[0]] )), setups, synapps.evaluator.target_file
def take_snapshot( yaml_file, log_file, verbose=False, tmpfile='synwatch.syn++.yaml', cmd='syn++' ):
'''
Take a snapshot from the logfile and return the spectra and the synapps object.
'''
if verbose: print 'scanning logfile...'
yaml_string, setups, target_file = parse_log(yaml_file,log_file, stepsize=100)
open(tmpfile,'w').write( yaml_string )
if verbose: print 'running syn++'
res = Popen( cmd+' '+tmpfile, shell=True, stderr=PIPE, stdout=PIPE )
o,e = res.communicate()
Popen( 'rm '+tmpfile, shell=True )
synthetic_spec = np.array([ map(float,[l.split(' ')[0],l.split(' ')[1]]) for l in o.strip().split('\n') ])
true_spec = np.loadtxt( target_file )
return synthetic_spec, true_spec, setups
def update_plot( syn_spec, true_spec, setups, name='SYNAPPS' ):
'''
Take the results of a snapshot and update the current plot.
Plot sketch: large panel with both spectra (include error)
large panel below with rows of bar plots:
log_tau/aux (double-axis bar)
v_min/v_max (double-ended bar with ends at v_phot, v_outer)
tmp (bar with hline for t_phot)
right panel spanning all, with:
summary info (file names, elapsed time, number of fits)
a0,a1,a2 bar plot
'''
plt.clf()
# first the spectral plot
ax = plt.subplot2grid( (6,4), (0,0), rowspan=2, colspan=4 )
ax.fill_between( true_spec[:,0], true_spec[:,1]+true_spec[:,2],
true_spec[:,1]-true_spec[:,2], interpolate=True, color='grey', alpha=0.1 )
ax.plot( true_spec[:,0], true_spec[:,1], color='k' )
ax.plot( syn_spec[:,0], syn_spec[:,1], color='r' )
a_str = 'a0: {}\na1: {}\na2: {}'.format( setups[0].a0, setups[0].a1, setups[0].a2 )
ax.annotate( a_str, (.8, .8), xycoords='figure fraction' )
ax.set_title( name )
ax.set_yticks([])
width = 0.3
offsets = [0.0, 0.3, 0.6]
alphas = [1.0, 0.5, 0.25]
axes = [plt.subplot2grid( (6,4), (2,0), colspan=4 ), plt.subplot2grid( (6,4), (3,0), colspan=4 ),
plt.subplot2grid( (6,4), (4,0), colspan=4 ), plt.subplot2grid( (6,4), (5,0), colspan=4 )]
for ijk, setup in enumerate(setups):
# the log_tau plot
ax = axes[0]
x = np.arange(len(setup.ions))
log_tau = [i.log_tau for i in setup.ions]
ax.bar( x+offsets[ijk], log_tau, color='b', alpha=alphas[ijk], width=width )
ax.hlines( 0.0, x[0], x[-1]+1, linestyles='dashed' )
ax.set_ylabel(r'Log $\tau$')
ax.set_xticks([])
# now the velocities plot
ax = axes[1]
v_maxs = [i.v_max for i in setup.ions]
v_mins = [i.v_min for i in setup.ions]
ax.bar( x+offsets[ijk], v_maxs, color='red', width=width, alpha=alphas[ijk] )
ax.bar( x+offsets[ijk], v_mins, color='white', edgecolor='white', width=width )
ax.set_ylim( setup.v_phot, setup.v_outer )
ax.set_ylabel(r'V (km/s)')
ax.set_xticks([])
# now the exponents plot
ax = axes[2]
exponents = [i.aux for i in setup.ions]
ax.bar( x+offsets[ijk], exponents, width=width, color='blue', alpha=alphas[ijk] )
ax.set_ylabel(r'Exponent')
ax.set_xticks([])
# now the temperatures
ax = axes[3]
temps = [i.temp for i in setup.ions]
ax.hlines( setup.t_phot, x[0], x[-1]+1, linestyles='dashed' )
ax.bar( x+offsets[ijk], temps, color='red', width=width, alpha=alphas[ijk] )
ax.set_ylabel(r'T (kK)')
labels = [ion_dict[i.ion] for i in setup.ions]
ax.set_xticks( x+0.4 )
ax.set_xticklabels( labels )
ax.annotate('photosphere', (x[-1], setup.t_phot+0.5))
plt.draw()
plt.show()
if __name__ == '__main__':
try:
assert( os.path.isfile(sys.argv[-3]) )
assert( os.path.isfile(sys.argv[-2]) )
refresh_interval = float( sys.argv[-1] ) * 60
except:
raise Exception('Usage: synwatch <synapps.yaml.file> <synapps.running.log> <refresh interval (minutes)>')
# plt.ion()
set_OMP()
fig = plt.figure( figsize=(12,8) )
nframe = 0
while True:
update_plot( *take_snapshot( sys.argv[-3], sys.argv[-2], True ), name=sys.argv[-3]+' - '+str(nframe) )
nframe +=1
print '{}: sleeping for {} minutes'.format(nframe, sys.argv[-1])
sleep( refresh_interval )
|
[] |
[] |
[
"OMP_NUM_THREADS"
] |
[]
|
["OMP_NUM_THREADS"]
|
python
| 1 | 0 | |
src/os/exec/exec.go
|
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package exec runs external commands. It wraps os.StartProcess to make it
// easier to remap stdin and stdout, connect I/O with pipes, and do other
// adjustments.
//
// Unlike the "system" library call from C and other languages, the
// os/exec package intentionally does not invoke the system shell and
// does not expand any glob patterns or handle other expansions,
// pipelines, or redirections typically done by shells. The package
// behaves more like C's "exec" family of functions. To expand glob
// patterns, either call the shell directly, taking care to escape any
// dangerous input, or use the path/filepath package's Glob function.
// To expand environment variables, use package os's ExpandEnv.
//
// Note that the examples in this package assume a Unix system.
// They may not run on Windows, and they do not run in the Go Playground
// used by golang.org and godoc.org.
package exec
import (
"bytes"
"context"
"errors"
"internal/syscall/execenv"
"io"
"os"
"path/filepath"
"runtime"
"strconv"
"strings"
"sync"
"syscall"
)
// Error is returned by LookPath when it fails to classify a file as an
// executable.
type Error struct {
// Name is the file name for which the error occurred.
Name string
// Err is the underlying error.
Err error
}
func (e *Error) Error() string {
return "exec: " + strconv.Quote(e.Name) + ": " + e.Err.Error()
}
func (e *Error) Unwrap() error { return e.Err }
// Cmd represents an external command being prepared or run.
//
// A Cmd cannot be reused after calling its Run, Output or CombinedOutput
// methods.
type Cmd struct {
// Path is the path of the command to run.
//
// This is the only field that must be set to a non-zero
// value. If Path is relative, it is evaluated relative
// to Dir.
Path string
// Args holds command line arguments, including the command as Args[0].
// If the Args field is empty or nil, Run uses {Path}.
//
// In typical use, both Path and Args are set by calling Command.
Args []string
// Env specifies the environment of the process.
// Each entry is of the form "key=value".
// If Env is nil, the new process uses the current process's
// environment.
// If Env contains duplicate environment keys, only the last
// value in the slice for each duplicate key is used.
// As a special case on Windows, SYSTEMROOT is always added if
// missing and not explicitly set to the empty string.
Env []string
// Dir specifies the working directory of the command.
// If Dir is the empty string, Run runs the command in the
// calling process's current directory.
Dir string
// Stdin specifies the process's standard input.
//
// If Stdin is nil, the process reads from the null device (os.DevNull).
//
// If Stdin is an *os.File, the process's standard input is connected
// directly to that file.
//
// Otherwise, during the execution of the command a separate
// goroutine reads from Stdin and delivers that data to the command
// over a pipe. In this case, Wait does not complete until the goroutine
// stops copying, either because it has reached the end of Stdin
// (EOF or a read error) or because writing to the pipe returned an error.
Stdin io.Reader
// Stdout and Stderr specify the process's standard output and error.
//
// If either is nil, Run connects the corresponding file descriptor
// to the null device (os.DevNull).
//
// If either is an *os.File, the corresponding output from the process
// is connected directly to that file.
//
// Otherwise, during the execution of the command a separate goroutine
// reads from the process over a pipe and delivers that data to the
// corresponding Writer. In this case, Wait does not complete until the
// goroutine reaches EOF or encounters an error.
//
// If Stdout and Stderr are the same writer, and have a type that can
// be compared with ==, at most one goroutine at a time will call Write.
Stdout io.Writer
Stderr io.Writer
// ExtraFiles specifies additional open files to be inherited by the
// new process. It does not include standard input, standard output, or
// standard error. If non-nil, entry i becomes file descriptor 3+i.
//
// ExtraFiles is not supported on Windows.
ExtraFiles []*os.File
// SysProcAttr holds optional, operating system-specific attributes.
// Run passes it to os.StartProcess as the os.ProcAttr's Sys field.
SysProcAttr *syscall.SysProcAttr
// Process is the underlying process, once started.
Process *os.Process
// ProcessState contains information about an exited process,
// available after a call to Wait or Run.
ProcessState *os.ProcessState
ctx context.Context // nil means none
lookPathErr error // LookPath error, if any.
finished bool // when Wait was called
childFiles []*os.File
closeAfterStart []io.Closer
closeAfterWait []io.Closer
goroutine []func() error
errch chan error // one send per goroutine
waitDone chan struct{}
}
// Command returns the Cmd struct to execute the named program with
// the given arguments.
//
// It sets only the Path and Args in the returned structure.
//
// If name contains no path separators, Command uses LookPath to
// resolve name to a complete path if possible. Otherwise it uses name
// directly as Path.
//
// The returned Cmd's Args field is constructed from the command name
// followed by the elements of arg, so arg should not include the
// command name itself. For example, Command("echo", "hello").
// Args[0] is always name, not the possibly resolved Path.
//
// On Windows, processes receive the whole command line as a single string
// and do their own parsing. Command combines and quotes Args into a command
// line string with an algorithm compatible with applications using
// CommandLineToArgvW (which is the most common way). Notable exceptions are
// msiexec.exe and cmd.exe (and thus, all batch files), which have a different
// unquoting algorithm. In these or other similar cases, you can do the
// quoting yourself and provide the full command line in SysProcAttr.CmdLine,
// leaving Args empty.
func Command(name string, arg ...string) *Cmd {
cmd := &Cmd{
Path: name,
Args: append([]string{name}, arg...),
}
if filepath.Base(name) == name {
if lp, err := LookPath(name); err != nil {
cmd.lookPathErr = err
} else {
cmd.Path = lp
}
}
return cmd
}
// CommandContext is like Command but includes a context.
//
// The provided context is used to kill the process (by calling
// os.Process.Kill) if the context becomes done before the command
// completes on its own.
func CommandContext(ctx context.Context, name string, arg ...string) *Cmd {
if ctx == nil {
panic("nil Context")
}
cmd := Command(name, arg...)
cmd.ctx = ctx
return cmd
}
// String returns a human-readable description of c.
// It is intended only for debugging.
// In particular, it is not suitable for use as input to a shell.
// The output of String may vary across Go releases.
func (c *Cmd) String() string {
if c.lookPathErr != nil {
// failed to resolve path; report the original requested path (plus args)
return strings.Join(c.Args, " ")
}
// report the exact executable path (plus args)
b := new(strings.Builder)
b.WriteString(c.Path)
for _, a := range c.Args[1:] {
b.WriteByte(' ')
b.WriteString(a)
}
return b.String()
}
// interfaceEqual protects against panics from doing equality tests on
// two interfaces with non-comparable underlying types.
func interfaceEqual(a, b any) bool {
defer func() {
recover()
}()
return a == b
}
func (c *Cmd) argv() []string {
if len(c.Args) > 0 {
return c.Args
}
return []string{c.Path}
}
func (c *Cmd) stdin() (f *os.File, err error) {
if c.Stdin == nil {
f, err = os.Open(os.DevNull)
if err != nil {
return
}
c.closeAfterStart = append(c.closeAfterStart, f)
return
}
if f, ok := c.Stdin.(*os.File); ok {
return f, nil
}
pr, pw, err := os.Pipe()
if err != nil {
return
}
c.closeAfterStart = append(c.closeAfterStart, pr)
c.closeAfterWait = append(c.closeAfterWait, pw)
c.goroutine = append(c.goroutine, func() error {
_, err := io.Copy(pw, c.Stdin)
if skipStdinCopyError(err) {
err = nil
}
if err1 := pw.Close(); err == nil {
err = err1
}
return err
})
return pr, nil
}
func (c *Cmd) stdout() (f *os.File, err error) {
return c.writerDescriptor(c.Stdout)
}
func (c *Cmd) stderr() (f *os.File, err error) {
if c.Stderr != nil && interfaceEqual(c.Stderr, c.Stdout) {
return c.childFiles[1], nil
}
return c.writerDescriptor(c.Stderr)
}
func (c *Cmd) writerDescriptor(w io.Writer) (f *os.File, err error) {
if w == nil {
f, err = os.OpenFile(os.DevNull, os.O_WRONLY, 0)
if err != nil {
return
}
c.closeAfterStart = append(c.closeAfterStart, f)
return
}
if f, ok := w.(*os.File); ok {
return f, nil
}
pr, pw, err := os.Pipe()
if err != nil {
return
}
c.closeAfterStart = append(c.closeAfterStart, pw)
c.closeAfterWait = append(c.closeAfterWait, pr)
c.goroutine = append(c.goroutine, func() error {
_, err := io.Copy(w, pr)
pr.Close() // in case io.Copy stopped due to write error
return err
})
return pw, nil
}
func (c *Cmd) closeDescriptors(closers []io.Closer) {
for _, fd := range closers {
fd.Close()
}
}
// Run starts the specified command and waits for it to complete.
//
// The returned error is nil if the command runs, has no problems
// copying stdin, stdout, and stderr, and exits with a zero exit
// status.
//
// If the command starts but does not complete successfully, the error is of
// type *ExitError. Other error types may be returned for other situations.
//
// If the calling goroutine has locked the operating system thread
// with runtime.LockOSThread and modified any inheritable OS-level
// thread state (for example, Linux or Plan 9 name spaces), the new
// process will inherit the caller's thread state.
func (c *Cmd) Run() error {
if err := c.Start(); err != nil {
return err
}
return c.Wait()
}
// lookExtensions finds windows executable by its dir and path.
// It uses LookPath to try appropriate extensions.
// lookExtensions does not search PATH, instead it converts `prog` into `.\prog`.
func lookExtensions(path, dir string) (string, error) {
if filepath.Base(path) == path {
path = filepath.Join(".", path)
}
if dir == "" {
return LookPath(path)
}
if filepath.VolumeName(path) != "" {
return LookPath(path)
}
if len(path) > 1 && os.IsPathSeparator(path[0]) {
return LookPath(path)
}
dirandpath := filepath.Join(dir, path)
// We assume that LookPath will only add file extension.
lp, err := LookPath(dirandpath)
if err != nil {
return "", err
}
ext := strings.TrimPrefix(lp, dirandpath)
return path + ext, nil
}
// Start starts the specified command but does not wait for it to complete.
//
// If Start returns successfully, the c.Process field will be set.
//
// The Wait method will return the exit code and release associated resources
// once the command exits.
func (c *Cmd) Start() error {
if c.lookPathErr != nil {
c.closeDescriptors(c.closeAfterStart)
c.closeDescriptors(c.closeAfterWait)
return c.lookPathErr
}
if runtime.GOOS == "windows" {
lp, err := lookExtensions(c.Path, c.Dir)
if err != nil {
c.closeDescriptors(c.closeAfterStart)
c.closeDescriptors(c.closeAfterWait)
return err
}
c.Path = lp
}
if c.Process != nil {
return errors.New("exec: already started")
}
if c.ctx != nil {
select {
case <-c.ctx.Done():
c.closeDescriptors(c.closeAfterStart)
c.closeDescriptors(c.closeAfterWait)
return c.ctx.Err()
default:
}
}
c.childFiles = make([]*os.File, 0, 3+len(c.ExtraFiles))
type F func(*Cmd) (*os.File, error)
for _, setupFd := range []F{(*Cmd).stdin, (*Cmd).stdout, (*Cmd).stderr} {
fd, err := setupFd(c)
if err != nil {
c.closeDescriptors(c.closeAfterStart)
c.closeDescriptors(c.closeAfterWait)
return err
}
c.childFiles = append(c.childFiles, fd)
}
c.childFiles = append(c.childFiles, c.ExtraFiles...)
env, err := c.environ()
if err != nil {
return err
}
c.Process, err = os.StartProcess(c.Path, c.argv(), &os.ProcAttr{
Dir: c.Dir,
Files: c.childFiles,
Env: env,
Sys: c.SysProcAttr,
})
if err != nil {
c.closeDescriptors(c.closeAfterStart)
c.closeDescriptors(c.closeAfterWait)
return err
}
c.closeDescriptors(c.closeAfterStart)
// Don't allocate the channel unless there are goroutines to fire.
if len(c.goroutine) > 0 {
c.errch = make(chan error, len(c.goroutine))
for _, fn := range c.goroutine {
go func(fn func() error) {
c.errch <- fn()
}(fn)
}
}
if c.ctx != nil {
c.waitDone = make(chan struct{})
go func() {
select {
case <-c.ctx.Done():
c.Process.Kill()
case <-c.waitDone:
}
}()
}
return nil
}
// An ExitError reports an unsuccessful exit by a command.
type ExitError struct {
*os.ProcessState
// Stderr holds a subset of the standard error output from the
// Cmd.Output method if standard error was not otherwise being
// collected.
//
// If the error output is long, Stderr may contain only a prefix
// and suffix of the output, with the middle replaced with
// text about the number of omitted bytes.
//
// Stderr is provided for debugging, for inclusion in error messages.
// Users with other needs should redirect Cmd.Stderr as needed.
Stderr []byte
}
func (e *ExitError) Error() string {
return e.ProcessState.String()
}
// Wait waits for the command to exit and waits for any copying to
// stdin or copying from stdout or stderr to complete.
//
// The command must have been started by Start.
//
// The returned error is nil if the command runs, has no problems
// copying stdin, stdout, and stderr, and exits with a zero exit
// status.
//
// If the command fails to run or doesn't complete successfully, the
// error is of type *ExitError. Other error types may be
// returned for I/O problems.
//
// If any of c.Stdin, c.Stdout or c.Stderr are not an *os.File, Wait also waits
// for the respective I/O loop copying to or from the process to complete.
//
// Wait releases any resources associated with the Cmd.
func (c *Cmd) Wait() error {
if c.Process == nil {
return errors.New("exec: not started")
}
if c.finished {
return errors.New("exec: Wait was already called")
}
c.finished = true
state, err := c.Process.Wait()
if c.waitDone != nil {
close(c.waitDone)
}
c.ProcessState = state
var copyError error
for range c.goroutine {
if err := <-c.errch; err != nil && copyError == nil {
copyError = err
}
}
c.closeDescriptors(c.closeAfterWait)
if err != nil {
return err
} else if !state.Success() {
return &ExitError{ProcessState: state}
}
return copyError
}
// Output runs the command and returns its standard output.
// Any returned error will usually be of type *ExitError.
// If c.Stderr was nil, Output populates ExitError.Stderr.
func (c *Cmd) Output() ([]byte, error) {
if c.Stdout != nil {
return nil, errors.New("exec: Stdout already set")
}
var stdout bytes.Buffer
c.Stdout = &stdout
captureErr := c.Stderr == nil
if captureErr {
c.Stderr = &prefixSuffixSaver{N: 32 << 10}
}
err := c.Run()
if err != nil && captureErr {
if ee, ok := err.(*ExitError); ok {
ee.Stderr = c.Stderr.(*prefixSuffixSaver).Bytes()
}
}
return stdout.Bytes(), err
}
// CombinedOutput runs the command and returns its combined standard
// output and standard error.
func (c *Cmd) CombinedOutput() ([]byte, error) {
if c.Stdout != nil {
return nil, errors.New("exec: Stdout already set")
}
if c.Stderr != nil {
return nil, errors.New("exec: Stderr already set")
}
var b bytes.Buffer
c.Stdout = &b
c.Stderr = &b
err := c.Run()
return b.Bytes(), err
}
// StdinPipe returns a pipe that will be connected to the command's
// standard input when the command starts.
// The pipe will be closed automatically after Wait sees the command exit.
// A caller need only call Close to force the pipe to close sooner.
// For example, if the command being run will not exit until standard input
// is closed, the caller must close the pipe.
func (c *Cmd) StdinPipe() (io.WriteCloser, error) {
if c.Stdin != nil {
return nil, errors.New("exec: Stdin already set")
}
if c.Process != nil {
return nil, errors.New("exec: StdinPipe after process started")
}
pr, pw, err := os.Pipe()
if err != nil {
return nil, err
}
c.Stdin = pr
c.closeAfterStart = append(c.closeAfterStart, pr)
wc := &closeOnce{File: pw}
c.closeAfterWait = append(c.closeAfterWait, wc)
return wc, nil
}
type closeOnce struct {
*os.File
once sync.Once
err error
}
func (c *closeOnce) Close() error {
c.once.Do(c.close)
return c.err
}
func (c *closeOnce) close() {
c.err = c.File.Close()
}
// StdoutPipe returns a pipe that will be connected to the command's
// standard output when the command starts.
//
// Wait will close the pipe after seeing the command exit, so most callers
// need not close the pipe themselves. It is thus incorrect to call Wait
// before all reads from the pipe have completed.
// For the same reason, it is incorrect to call Run when using StdoutPipe.
// See the example for idiomatic usage.
func (c *Cmd) StdoutPipe() (io.ReadCloser, error) {
if c.Stdout != nil {
return nil, errors.New("exec: Stdout already set")
}
if c.Process != nil {
return nil, errors.New("exec: StdoutPipe after process started")
}
pr, pw, err := os.Pipe()
if err != nil {
return nil, err
}
c.Stdout = pw
c.closeAfterStart = append(c.closeAfterStart, pw)
c.closeAfterWait = append(c.closeAfterWait, pr)
return pr, nil
}
// StderrPipe returns a pipe that will be connected to the command's
// standard error when the command starts.
//
// Wait will close the pipe after seeing the command exit, so most callers
// need not close the pipe themselves. It is thus incorrect to call Wait
// before all reads from the pipe have completed.
// For the same reason, it is incorrect to use Run when using StderrPipe.
// See the StdoutPipe example for idiomatic usage.
func (c *Cmd) StderrPipe() (io.ReadCloser, error) {
if c.Stderr != nil {
return nil, errors.New("exec: Stderr already set")
}
if c.Process != nil {
return nil, errors.New("exec: StderrPipe after process started")
}
pr, pw, err := os.Pipe()
if err != nil {
return nil, err
}
c.Stderr = pw
c.closeAfterStart = append(c.closeAfterStart, pw)
c.closeAfterWait = append(c.closeAfterWait, pr)
return pr, nil
}
// prefixSuffixSaver is an io.Writer which retains the first N bytes
// and the last N bytes written to it. The Bytes() methods reconstructs
// it with a pretty error message.
type prefixSuffixSaver struct {
N int // max size of prefix or suffix
prefix []byte
suffix []byte // ring buffer once len(suffix) == N
suffixOff int // offset to write into suffix
skipped int64
// TODO(bradfitz): we could keep one large []byte and use part of it for
// the prefix, reserve space for the '... Omitting N bytes ...' message,
// then the ring buffer suffix, and just rearrange the ring buffer
// suffix when Bytes() is called, but it doesn't seem worth it for
// now just for error messages. It's only ~64KB anyway.
}
func (w *prefixSuffixSaver) Write(p []byte) (n int, err error) {
lenp := len(p)
p = w.fill(&w.prefix, p)
// Only keep the last w.N bytes of suffix data.
if overage := len(p) - w.N; overage > 0 {
p = p[overage:]
w.skipped += int64(overage)
}
p = w.fill(&w.suffix, p)
// w.suffix is full now if p is non-empty. Overwrite it in a circle.
for len(p) > 0 { // 0, 1, or 2 iterations.
n := copy(w.suffix[w.suffixOff:], p)
p = p[n:]
w.skipped += int64(n)
w.suffixOff += n
if w.suffixOff == w.N {
w.suffixOff = 0
}
}
return lenp, nil
}
// fill appends up to len(p) bytes of p to *dst, such that *dst does not
// grow larger than w.N. It returns the un-appended suffix of p.
func (w *prefixSuffixSaver) fill(dst *[]byte, p []byte) (pRemain []byte) {
if remain := w.N - len(*dst); remain > 0 {
add := minInt(len(p), remain)
*dst = append(*dst, p[:add]...)
p = p[add:]
}
return p
}
func (w *prefixSuffixSaver) Bytes() []byte {
if w.suffix == nil {
return w.prefix
}
if w.skipped == 0 {
return append(w.prefix, w.suffix...)
}
var buf bytes.Buffer
buf.Grow(len(w.prefix) + len(w.suffix) + 50)
buf.Write(w.prefix)
buf.WriteString("\n... omitting ")
buf.WriteString(strconv.FormatInt(w.skipped, 10))
buf.WriteString(" bytes ...\n")
buf.Write(w.suffix[w.suffixOff:])
buf.Write(w.suffix[:w.suffixOff])
return buf.Bytes()
}
func minInt(a, b int) int {
if a < b {
return a
}
return b
}
// environ returns a best-effort copy of the environment in which the command
// would be run as it is currently configured. If an error occurs in computing
// the environment, it is returned alongside the best-effort copy.
func (c *Cmd) environ() ([]string, error) {
var err error
env := c.Env
if env == nil {
env, err = execenv.Default(c.SysProcAttr)
if err != nil {
env = os.Environ()
// Note that the non-nil err is preserved despite env being overridden.
}
if c.Dir != "" {
switch runtime.GOOS {
case "windows", "plan9":
// Windows and Plan 9 do not use the PWD variable, so we don't need to
// keep it accurate.
default:
// On POSIX platforms, PWD represents “an absolute pathname of the
// current working directory.” Since we are changing the working
// directory for the command, we should also update PWD to reflect that.
//
// Unfortunately, we didn't always do that, so (as proposed in
// https://go.dev/issue/50599) to avoid unintended collateral damage we
// only implicitly update PWD when Env is nil. That way, we're much
// less likely to override an intentional change to the variable.
if pwd, absErr := filepath.Abs(c.Dir); absErr == nil {
env = append(env, "PWD="+pwd)
} else if err == nil {
err = absErr
}
}
}
}
return addCriticalEnv(dedupEnv(env)), err
}
// Environ returns a copy of the environment in which the command would be run
// as it is currently configured.
func (c *Cmd) Environ() []string {
// Intentionally ignore errors: environ returns a best-effort environment no matter what.
env, _ := c.environ()
return env
}
// dedupEnv returns a copy of env with any duplicates removed, in favor of
// later values.
// Items not of the normal environment "key=value" form are preserved unchanged.
func dedupEnv(env []string) []string {
return dedupEnvCase(runtime.GOOS == "windows", env)
}
// dedupEnvCase is dedupEnv with a case option for testing.
// If caseInsensitive is true, the case of keys is ignored.
func dedupEnvCase(caseInsensitive bool, env []string) []string {
// Construct the output in reverse order, to preserve the
// last occurrence of each key.
out := make([]string, 0, len(env))
saw := make(map[string]bool, len(env))
for n := len(env); n > 0; n-- {
kv := env[n-1]
i := strings.Index(kv, "=")
if i == 0 {
// We observe in practice keys with a single leading "=" on Windows.
// TODO(#49886): Should we consume only the first leading "=" as part
// of the key, or parse through arbitrarily many of them until a non-"="?
i = strings.Index(kv[1:], "=") + 1
}
if i < 0 {
if kv != "" {
// The entry is not of the form "key=value" (as it is required to be).
// Leave it as-is for now.
// TODO(#52436): should we strip or reject these bogus entries?
out = append(out, kv)
}
continue
}
k := kv[:i]
if caseInsensitive {
k = strings.ToLower(k)
}
if saw[k] {
continue
}
saw[k] = true
out = append(out, kv)
}
// Now reverse the slice to restore the original order.
for i := 0; i < len(out)/2; i++ {
j := len(out) - i - 1
out[i], out[j] = out[j], out[i]
}
return out
}
// addCriticalEnv adds any critical environment variables that are required
// (or at least almost always required) on the operating system.
// Currently this is only used for Windows.
func addCriticalEnv(env []string) []string {
if runtime.GOOS != "windows" {
return env
}
for _, kv := range env {
k, _, ok := strings.Cut(kv, "=")
if !ok {
continue
}
if strings.EqualFold(k, "SYSTEMROOT") {
// We already have it.
return env
}
}
return append(env, "SYSTEMROOT="+os.Getenv("SYSTEMROOT"))
}
|
[
"\"SYSTEMROOT\""
] |
[] |
[
"SYSTEMROOT"
] |
[]
|
["SYSTEMROOT"]
|
go
| 1 | 0 | |
neva/asgi.py
|
"""
ASGI config for neva project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'neva.settings')
application = get_asgi_application()
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
main.go
|
package main // import "github.com/sourcegraph/go-langserver"
import (
"context"
"crypto/tls"
"flag"
"fmt"
"io"
"log"
"net"
"net/http"
"os"
"path/filepath"
"runtime/debug"
"time"
"github.com/keegancsmith/tmpfriend"
"github.com/pkg/errors"
"github.com/sourcegraph/go-langserver/debugserver"
"github.com/sourcegraph/go-langserver/tracer"
"github.com/sourcegraph/go-langserver/vfsutil"
"github.com/prometheus/client_golang/prometheus"
"github.com/gorilla/websocket"
"github.com/sourcegraph/go-langserver/buildserver"
"github.com/sourcegraph/go-langserver/langserver"
"github.com/sourcegraph/jsonrpc2"
wsjsonrpc2 "github.com/sourcegraph/jsonrpc2/websocket"
_ "net/http/pprof"
)
var (
mode = flag.String("mode", "stdio", "communication mode (stdio|tcp|websocket)")
addr = flag.String("addr", ":4389", "server listen address (tcp or websocket)")
trace = flag.Bool("trace", false, "print all requests and responses")
logfile = flag.String("logfile", "", "also log to this file (in addition to stderr)")
printVersion = flag.Bool("version", false, "print version and exit")
pprof = flag.String("pprof", "", "start a pprof http server (https://golang.org/pkg/net/http/pprof/)")
freeosmemory = flag.Bool("freeosmemory", true, "aggressively free memory back to the OS")
useBuildServer = flag.Bool("usebuildserver", false, "use a build server to fetch dependencies, fetch files via Zip URL, etc.")
cacheDir = flag.String("cachedir", "/tmp", "directory to store cached archives")
// Default Config, can be overridden by InitializationOptions
usebinarypkgcache = flag.Bool("usebinarypkgcache", true, "use $GOPATH/pkg binary .a files (improves performance). Can be overridden by InitializationOptions.")
maxparallelism = flag.Int("maxparallelism", 0, "use at max N parallel goroutines to fulfill requests. Can be overridden by InitializationOptions.")
gocodecompletion = flag.Bool("gocodecompletion", false, "enable completion (extra memory burden). Can be overridden by InitializationOptions.")
diagnostics = flag.Bool("diagnostics", false, "enable diagnostics (extra memory burden). Can be overridden by InitializationOptions.")
funcSnippetEnabled = flag.Bool("func-snippet-enabled", true, "enable argument snippets on func completion. Can be overridden by InitializationOptions.")
formatTool = flag.String("format-tool", "goimports", "which tool is used to format documents. Supported: goimports and gofmt. Can be overridden by InitializationOptions.")
lintTool = flag.String("lint-tool", "none", "which tool is used to linting. Supported: none and golint. Can be overridden by InitializationOptions.")
openGauge = prometheus.NewGauge(prometheus.GaugeOpts{
Namespace: "golangserver",
Subsystem: "build",
Name: "open_connections",
Help: "Number of open connections to the language server.",
})
)
func init() {
prometheus.MustRegister(openGauge)
}
// version is the version field we report back. If you are releasing a new version:
// 1. Create commit without -dev suffix.
// 2. Create commit with version incremented and -dev suffix
// 3. Push to master
// 4. Tag the commit created in (1) with the value of the version string
const version = "v3-dev"
func main() {
flag.Parse()
log.SetFlags(0)
vfsutil.ArchiveCacheDir = filepath.Join(*cacheDir, "lang-go-archive-cache")
// Start pprof server, if desired.
if *pprof != "" {
go func() {
log.Println(http.ListenAndServe(*pprof, nil))
}()
}
if *freeosmemory {
go freeOSMemory()
}
cfg := langserver.NewDefaultConfig()
cfg.FuncSnippetEnabled = *funcSnippetEnabled
cfg.GocodeCompletionEnabled = *gocodecompletion
cfg.DiagnosticsEnabled = *diagnostics
cfg.UseBinaryPkgCache = *usebinarypkgcache
cfg.FormatTool = *formatTool
cfg.LintTool = *lintTool
if *maxparallelism > 0 {
cfg.MaxParallelism = *maxparallelism
}
if err := run(cfg); err != nil {
fmt.Fprintln(os.Stderr, err)
os.Exit(1)
}
}
func run(cfg langserver.Config) error {
tracer.Init()
go debugserver.Start()
cleanup := tmpfriend.SetupOrNOOP()
defer cleanup()
if *useBuildServer {
// If go-langserver crashes, all the archives it has cached are not
// evicted. Over time this leads to us filling up the disk. This is a
// simple fix were we do a best-effort purge of the cache.
// https://github.com/sourcegraph/sourcegraph/issues/6090
_ = os.RemoveAll(vfsutil.ArchiveCacheDir)
// PERF: Hide latency of fetching golang/go from the first typecheck
go buildserver.FetchCommonDeps()
}
listen := func(addr string) (*net.Listener, error) {
listener, err := net.Listen("tcp", addr)
if err != nil {
log.Fatalf("Could not bind to address %s: %v", addr, err)
return nil, err
}
if os.Getenv("TLS_CERT") != "" && os.Getenv("TLS_KEY") != "" {
cert, err := tls.X509KeyPair([]byte(os.Getenv("TLS_CERT")), []byte(os.Getenv("TLS_KEY")))
if err != nil {
return nil, err
}
listener = tls.NewListener(listener, &tls.Config{
Certificates: []tls.Certificate{cert},
})
}
return &listener, nil
}
if *printVersion {
fmt.Println(version)
return nil
}
var logW io.Writer
if *logfile == "" {
logW = os.Stderr
} else {
f, err := os.Create(*logfile)
if err != nil {
return err
}
defer f.Close()
logW = io.MultiWriter(os.Stderr, f)
}
log.SetOutput(logW)
var connOpt []jsonrpc2.ConnOpt
if *trace {
connOpt = append(connOpt, jsonrpc2.LogMessages(log.New(logW, "", 0)))
}
newHandler := func() jsonrpc2.Handler {
if *useBuildServer {
return jsonrpc2.AsyncHandler(buildserver.NewHandler(cfg))
}
return langserver.NewHandler(cfg)
}
switch *mode {
case "tcp":
lis, err := listen(*addr)
if err != nil {
return err
}
defer (*lis).Close()
log.Println("langserver-go: listening for TCP connections on", *addr)
connectionCount := 0
for {
conn, err := (*lis).Accept()
if err != nil {
return err
}
connectionCount = connectionCount + 1
connectionID := connectionCount
log.Printf("langserver-go: received incoming connection #%d\n", connectionID)
openGauge.Inc()
jsonrpc2Connection := jsonrpc2.NewConn(context.Background(), jsonrpc2.NewBufferedStream(conn, jsonrpc2.VSCodeObjectCodec{}), newHandler(), connOpt...)
go func() {
<-jsonrpc2Connection.DisconnectNotify()
log.Printf("langserver-go: connection #%d closed\n", connectionID)
openGauge.Dec()
}()
}
case "websocket":
mux := http.NewServeMux()
upgrader := websocket.Upgrader{CheckOrigin: func(r *http.Request) bool { return true }}
connectionCount := 0
mux.HandleFunc("/", func(w http.ResponseWriter, request *http.Request) {
connection, err := upgrader.Upgrade(w, request, nil)
if err != nil {
log.Println("error upgrading HTTP to WebSocket:", err)
http.Error(w, errors.Wrap(err, "could not upgrade to WebSocket").Error(), http.StatusBadRequest)
return
}
defer connection.Close()
connectionCount = connectionCount + 1
connectionID := connectionCount
openGauge.Inc()
log.Printf("langserver-go: received incoming connection #%d\n", connectionID)
<-jsonrpc2.NewConn(context.Background(), wsjsonrpc2.NewObjectStream(connection), newHandler(), connOpt...).DisconnectNotify()
log.Printf("langserver-go: connection #%d closed\n", connectionID)
openGauge.Dec()
})
l, err := listen(*addr)
if err != nil {
log.Println(err)
return err
}
server := &http.Server{
Handler: mux,
ReadTimeout: 75 * time.Second,
WriteTimeout: 60 * time.Second,
}
log.Println("langserver-go: listening for WebSocket connections on", *addr)
err = server.Serve(*l)
log.Println(errors.Wrap(err, "HTTP server"))
return err
case "stdio":
log.Println("langserver-go: reading on stdin, writing on stdout")
<-jsonrpc2.NewConn(context.Background(), jsonrpc2.NewBufferedStream(stdrwc{}, jsonrpc2.VSCodeObjectCodec{}), newHandler(), connOpt...).DisconnectNotify()
log.Println("connection closed")
return nil
default:
return fmt.Errorf("invalid mode %q", *mode)
}
}
type stdrwc struct{}
func (stdrwc) Read(p []byte) (int, error) {
return os.Stdin.Read(p)
}
func (stdrwc) Write(p []byte) (int, error) {
return os.Stdout.Write(p)
}
func (stdrwc) Close() error {
if err := os.Stdin.Close(); err != nil {
return err
}
return os.Stdout.Close()
}
// freeOSMemory should be called in a goroutine, it invokes
// runtime/debug.FreeOSMemory() more aggressively than the runtime default of
// 5 minutes after GC.
//
// There is a long-standing known issue with Go in which memory is not returned
// to the OS aggressively enough[1], which coincidently harms our application
// quite a lot because we perform so many short-burst heap allocations during
// the type-checking phase.
//
// This function should only be invoked in editor mode, not in sourcegraph.com
// mode, because users running the language server as part of their editor
// generally expect much lower memory usage. In contrast, on sourcegraph.com we
// can give our servers plenty of RAM and allow Go to consume as much as it
// wants. Go does reuse the memory not free'd to the OS, and as such enabling
// this does _technically_ make our application perform less optimally -- but
// in practice this has no observable effect in editor mode.
//
// The end effect of performing this is that repeating "hover over code" -> "make an edit"
// 10 times inside a large package like github.com/docker/docker/cmd/dockerd:
//
//
// | Real Before | Real After | Real Change | Go Before | Go After | Go Change |
// |-------------|------------|-------------|-----------|----------|-----------|
// | 7.61GB | 4.12GB | -45.86% | 3.92GB | 3.33GB | -15.05% |
//
// Where `Real` means real memory reported by OS X Activity Monitor, and `Go`
// means memory reported by Go as being in use.
//
// TL;DR: 46% less memory consumption for users running with the vscode-go extension.
//
// [1] https://github.com/golang/go/issues/14735#issuecomment-194470114
func freeOSMemory() {
for {
time.Sleep(1 * time.Second)
debug.FreeOSMemory()
}
}
|
[
"\"TLS_CERT\"",
"\"TLS_KEY\"",
"\"TLS_CERT\"",
"\"TLS_KEY\""
] |
[] |
[
"TLS_CERT",
"TLS_KEY"
] |
[]
|
["TLS_CERT", "TLS_KEY"]
|
go
| 2 | 0 | |
test/integration/conftest.py
|
import logging
import os
import re
import subprocess
import uuid
import pytest
logger = logging.getLogger(__name__)
TAG_PREFIX = 'quay.io/example/builder-test'
KEEP_IMAGES = bool(os.environ.get('KEEP_IMAGES', False))
@pytest.fixture
def build_dir_and_ee_yml(tmp_path):
"""Fixture to return temporary file maker."""
def tmp_dir_and_file(ee_contents):
tmp_file = tmp_path / 'ee.txt'
tmp_file.write_text(ee_contents)
return tmp_path, tmp_file
return tmp_dir_and_file
def run(args, *a, allow_error=False, **kw):
kw["encoding"] = "utf-8"
if "check" not in kw:
# By default we want to fail if a command fails to run. Tests that
# want to skip this can pass check=False when calling this fixture
kw["check"] = True
if "stdout" not in kw:
kw["stdout"] = subprocess.PIPE
if "stderr" not in kw:
kw["stderr"] = subprocess.PIPE
kw.setdefault("env", os.environ.copy()).update({"LANG": "en_US.UTF-8"})
for i, arg in enumerate(args):
if not isinstance(arg, str):
raise pytest.fail(
f'Argument {arg} in {i} position is not string, args:\n{args}'
)
try:
ret = CompletedProcessProxy(subprocess.run(args, shell=True, *a, **kw))
except subprocess.CalledProcessError as err:
if not allow_error:
# Previously used pytest.fail here, but that missed some error details
print(f"Running following command resulted in a non-zero return code: {err.returncode}")
print(err.cmd)
print('stdout:')
print(err.stdout)
print('stderr:')
print(err.stderr)
raise
err.rc = err.returncode # lazyily make it look like a CompletedProcessProxy
return err
ret.rc = ret.result.returncode
return ret
def gen_image_name(request):
return '_'.join([
TAG_PREFIX,
request.node.name.lower().replace('[', '_').replace(']', '_'),
str(uuid.uuid4())[:10]
])
@pytest.mark.test_all_runtimes
def delete_image(runtime, image_name):
if KEEP_IMAGES:
return
# delete given image, if the test happened to make one
# allow error in case that image was not created
regexp = re.compile(r'(no such image)|(image not known)|(image is in use by a container)', re.IGNORECASE)
r = run(f'{runtime} rmi -f {image_name}', allow_error=True)
if r.rc != 0:
if regexp.search(r.stdout) or regexp.search(r.stderr):
return
else:
raise Exception(f'Teardown failed (rc={r.rc}):\n{r.stdout}\n{r.stderr}')
@pytest.fixture
@pytest.mark.test_all_runtimes
def ee_tag(request, runtime):
image_name = gen_image_name(request)
yield image_name
delete_image(runtime, image_name)
class CompletedProcessProxy(object):
def __init__(self, result):
self.result = result
def __getattr__(self, attr):
return getattr(self.result, attr)
@pytest.fixture
def cli():
return run
|
[] |
[] |
[
"KEEP_IMAGES"
] |
[]
|
["KEEP_IMAGES"]
|
python
| 1 | 0 | |
classification_ModelNet40/test.py
|
"""
python test.py --model pointMLP --msg 20220209053148-404
"""
import argparse
import os
import datetime
import torch
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim
import torch.utils.data
import torch.utils.data.distributed
from torch.utils.data import DataLoader
import models as models
from utils import progress_bar, IOStream
from data import ModelNet40
import sklearn.metrics as metrics
from helper import cal_loss
import numpy as np
import torch.nn.functional as F
model_names = sorted(
name for name in models.__dict__ if callable(models.__dict__[name])
)
def parse_args():
"""Parameters"""
parser = argparse.ArgumentParser("training")
parser.add_argument(
"-c",
"--checkpoint",
type=str,
metavar="PATH",
help="path to save checkpoint (default: checkpoint)",
)
parser.add_argument("--msg", type=str, help="message after checkpoint")
parser.add_argument(
"--batch_size", type=int, default=16, help="batch size in training"
)
parser.add_argument(
"--model", default="pointMLP", help="model name [default: pointnet_cls]"
)
parser.add_argument(
"--num_classes",
default=40,
type=int,
choices=[10, 40],
help="training on ModelNet10/40",
)
parser.add_argument("--num_points", type=int, default=1024, help="Point Number")
return parser.parse_args()
def main():
args = parse_args()
print(f"args: {args}")
os.environ["HDF5_USE_FILE_LOCKING"] = "FALSE"
if torch.cuda.is_available():
device = "cuda"
else:
device = "cpu"
print(f"==> Using device: {device}")
if args.msg is None:
message = str(datetime.datetime.now().strftime("-%Y%m%d%H%M%S"))
else:
message = "-" + args.msg
args.checkpoint = "checkpoints/" + args.model + message
print("==> Preparing data..")
test_loader = DataLoader(
ModelNet40(partition="test", num_points=args.num_points),
num_workers=4,
batch_size=args.batch_size,
shuffle=False,
drop_last=False,
)
# Model
print("==> Building model..")
net = models.__dict__[args.model]()
criterion = cal_loss
net = net.to(device)
checkpoint_path = os.path.join(args.checkpoint, "best_checkpoint.pth")
checkpoint = torch.load(checkpoint_path, map_location=torch.device("cpu"))
# criterion = criterion.to(device)
if device == "cuda":
net = torch.nn.DataParallel(net)
cudnn.benchmark = True
net.load_state_dict(checkpoint["net"])
test_out = validate(net, test_loader, criterion, device)
print(f"Vanilla out: {test_out}")
def validate(net, testloader, criterion, device):
net.eval()
test_loss = 0
correct = 0
total = 0
test_true = []
test_pred = []
time_cost = datetime.datetime.now()
with torch.no_grad():
for batch_idx, (data, label) in enumerate(testloader):
data, label = data.to(device), label.to(device).squeeze()
data = data.permute(0, 2, 1)
logits = net(data)
loss = criterion(logits, label)
test_loss += loss.item()
preds = logits.max(dim=1)[1]
test_true.append(label.cpu().numpy())
test_pred.append(preds.detach().cpu().numpy())
total += label.size(0)
correct += preds.eq(label).sum().item()
progress_bar(
batch_idx,
len(testloader),
"Loss: %.3f | Acc: %.3f%% (%d/%d)"
% (
test_loss / (batch_idx + 1),
100.0 * correct / total,
correct,
total,
),
)
time_cost = int((datetime.datetime.now() - time_cost).total_seconds())
test_true = np.concatenate(test_true)
test_pred = np.concatenate(test_pred)
return {
"loss": float("%.3f" % (test_loss / (batch_idx + 1))),
"acc": float("%.3f" % (100.0 * metrics.accuracy_score(test_true, test_pred))),
"acc_avg": float(
"%.3f" % (100.0 * metrics.balanced_accuracy_score(test_true, test_pred))
),
"time": time_cost,
}
if __name__ == "__main__":
main()
|
[] |
[] |
[
"HDF5_USE_FILE_LOCKING"
] |
[]
|
["HDF5_USE_FILE_LOCKING"]
|
python
| 1 | 0 | |
cmd/static/main.go
|
package main
import (
"errors"
"github.com/clnbs/autorace/internal/pkg/messaging"
"os"
"os/signal"
"syscall"
"time"
"github.com/clnbs/autorace/internal/app/server"
"github.com/clnbs/autorace/pkg/logger"
)
var (
hitCounter = 5
rabbitMQConfig messaging.RabbitConnectionConfiguration
)
func init() {
logger.SetStdLogger("trace", "stdout")
var err error
err = errors.New("dummy")
index := 0
for err != nil && index < hitCounter {
_, err = logger.SetFluentLogger("fluentd", "trace", "dynamic", 24224)
if err != nil {
time.Sleep(5 * time.Second)
}
index++
}
if err != nil {
panic(err)
}
rabbitMQConfig = messaging.RabbitConnectionConfiguration{
Host: os.Getenv("RABBITMQ_HOST"),
Port: os.Getenv("RABBITMQ_PORT"),
User: os.Getenv("RABBITMQ_USER"),
Password: os.Getenv("RABBITMQ_PASS"),
}
}
func main() {
stop := make(chan os.Signal)
signal.Notify(stop, os.Interrupt, syscall.SIGTERM)
readyToReceive := make(chan bool)
srvr, err := server.NewStaticServer(rabbitMQConfig)
if err != nil {
logger.Error("error while creation server :", err)
return
}
logger.Trace("server created")
go func() {
err := srvr.ReceivePlayerCreation(readyToReceive)
if err != nil {
logger.Error("while listening to player creation :", err)
return
}
}()
if !<-readyToReceive {
logger.Error("server could not listen continuously")
return
}
go func() {
err := srvr.PartyListRequest(readyToReceive)
if err != nil {
logger.Error("while listening to party list request :", err)
return
}
logger.Trace("end of ReceivePartyCreation")
}()
if !<-readyToReceive {
logger.Error("server could not listen continuously")
return
}
go func() {
err := srvr.ReceivePartyCreation(readyToReceive)
if err != nil {
logger.Error("while listening to party creation :", err)
return
}
logger.Trace("end of PartyListRequest")
}()
if !<-readyToReceive {
logger.Error("server could not listen continuously")
return
}
logger.Trace("static server started ...")
<-stop
err = srvr.Close()
if err != nil {
logger.Error("while closing ongoing connection :", err)
}
}
|
[
"\"RABBITMQ_HOST\"",
"\"RABBITMQ_PORT\"",
"\"RABBITMQ_USER\"",
"\"RABBITMQ_PASS\""
] |
[] |
[
"RABBITMQ_USER",
"RABBITMQ_HOST",
"RABBITMQ_PORT",
"RABBITMQ_PASS"
] |
[]
|
["RABBITMQ_USER", "RABBITMQ_HOST", "RABBITMQ_PORT", "RABBITMQ_PASS"]
|
go
| 4 | 0 | |
cmd/detectExecuteScan_generated.go
|
// Code generated by piper's step-generator. DO NOT EDIT.
package cmd
import (
"fmt"
"os"
"time"
"github.com/SAP/jenkins-library/pkg/config"
"github.com/SAP/jenkins-library/pkg/log"
"github.com/SAP/jenkins-library/pkg/telemetry"
"github.com/spf13/cobra"
)
type detectExecuteScanOptions struct {
APIToken string `json:"apiToken,omitempty"`
CodeLocation string `json:"codeLocation,omitempty"`
ProjectName string `json:"projectName,omitempty"`
ProjectVersion string `json:"projectVersion,omitempty"`
Scanners []string `json:"scanners,omitempty"`
ScanPaths []string `json:"scanPaths,omitempty"`
ScanProperties []string `json:"scanProperties,omitempty"`
ServerURL string `json:"serverUrl,omitempty"`
}
// DetectExecuteScanCommand Executes Synopsis Detect scan
func DetectExecuteScanCommand() *cobra.Command {
metadata := detectExecuteScanMetadata()
var stepConfig detectExecuteScanOptions
var startTime time.Time
var createDetectExecuteScanCmd = &cobra.Command{
Use: "detectExecuteScan",
Short: "Executes Synopsis Detect scan",
Long: `This step executes [Synopsis Detect](https://synopsys.atlassian.net/wiki/spaces/INTDOCS/pages/62423113/Synopsys+Detect) scans.`,
PreRunE: func(cmd *cobra.Command, args []string) error {
startTime = time.Now()
log.SetStepName("detectExecuteScan")
log.SetVerbose(GeneralConfig.Verbose)
return PrepareConfig(cmd, &metadata, "detectExecuteScan", &stepConfig, config.OpenPiperFile)
},
Run: func(cmd *cobra.Command, args []string) {
telemetryData := telemetry.CustomData{}
telemetryData.ErrorCode = "1"
handler := func() {
telemetryData.Duration = fmt.Sprintf("%v", time.Since(startTime).Milliseconds())
telemetry.Send(&telemetryData)
}
log.DeferExitHandler(handler)
defer handler()
telemetry.Initialize(GeneralConfig.NoTelemetry, "detectExecuteScan")
detectExecuteScan(stepConfig, &telemetryData)
telemetryData.ErrorCode = "0"
},
}
addDetectExecuteScanFlags(createDetectExecuteScanCmd, &stepConfig)
return createDetectExecuteScanCmd
}
func addDetectExecuteScanFlags(cmd *cobra.Command, stepConfig *detectExecuteScanOptions) {
cmd.Flags().StringVar(&stepConfig.APIToken, "apiToken", os.Getenv("PIPER_apiToken"), "Api token to be used for connectivity with Synopsis Detect server.")
cmd.Flags().StringVar(&stepConfig.CodeLocation, "codeLocation", os.Getenv("PIPER_codeLocation"), "An override for the name Detect will use for the scan file it creates.")
cmd.Flags().StringVar(&stepConfig.ProjectName, "projectName", os.Getenv("PIPER_projectName"), "Name of the Synopsis Detect (formerly BlackDuck) project.")
cmd.Flags().StringVar(&stepConfig.ProjectVersion, "projectVersion", os.Getenv("PIPER_projectVersion"), "Version of the Synopsis Detect (formerly BlackDuck) project.")
cmd.Flags().StringSliceVar(&stepConfig.Scanners, "scanners", []string{"signature"}, "List of scanners to be used for Synopsis Detect (formerly BlackDuck) scan.")
cmd.Flags().StringSliceVar(&stepConfig.ScanPaths, "scanPaths", []string{"."}, "List of paths which should be scanned by the Synopsis Detect (formerly BlackDuck) scan.")
cmd.Flags().StringSliceVar(&stepConfig.ScanProperties, "scanProperties", []string{"--blackduck.signature.scanner.memory=4096", "--blackduck.timeout=6000", "--blackduck.trust.cert=true", "--detect.policy.check.fail.on.severities=BLOCKER,CRITICAL,MAJOR", "--detect.report.timeout=4800", "--logging.level.com.synopsys.integration=DEBUG"}, "Properties passed to the Synopsis Detect (formerly BlackDuck) scan. You can find details in the [Synopsis Detect documentation](https://synopsys.atlassian.net/wiki/spaces/INTDOCS/pages/622846/Using+Synopsys+Detect+Properties)")
cmd.Flags().StringVar(&stepConfig.ServerURL, "serverUrl", os.Getenv("PIPER_serverUrl"), "Server url to the Synopsis Detect (formerly BlackDuck) Server.")
cmd.MarkFlagRequired("apiToken")
cmd.MarkFlagRequired("projectName")
cmd.MarkFlagRequired("projectVersion")
}
// retrieve step metadata
func detectExecuteScanMetadata() config.StepData {
var theMetaData = config.StepData{
Metadata: config.StepMetadata{
Name: "detectExecuteScan",
Aliases: []config.Alias{},
},
Spec: config.StepSpec{
Inputs: config.StepInputs{
Parameters: []config.StepParameters{
{
Name: "apiToken",
ResourceRef: []config.ResourceReference{},
Scope: []string{"PARAMETERS", "STAGES", "STEPS"},
Type: "string",
Mandatory: true,
Aliases: []config.Alias{{Name: "detect/apiToken"}},
},
{
Name: "codeLocation",
ResourceRef: []config.ResourceReference{},
Scope: []string{"PARAMETERS", "STAGES", "STEPS"},
Type: "string",
Mandatory: false,
Aliases: []config.Alias{},
},
{
Name: "projectName",
ResourceRef: []config.ResourceReference{},
Scope: []string{"PARAMETERS", "STAGES", "STEPS"},
Type: "string",
Mandatory: true,
Aliases: []config.Alias{{Name: "detect/projectName"}},
},
{
Name: "projectVersion",
ResourceRef: []config.ResourceReference{},
Scope: []string{"PARAMETERS", "STAGES", "STEPS"},
Type: "string",
Mandatory: true,
Aliases: []config.Alias{{Name: "detect/projectVersion"}},
},
{
Name: "scanners",
ResourceRef: []config.ResourceReference{},
Scope: []string{"PARAMETERS", "STAGES", "STEPS"},
Type: "[]string",
Mandatory: false,
Aliases: []config.Alias{{Name: "detect/scanners"}},
},
{
Name: "scanPaths",
ResourceRef: []config.ResourceReference{},
Scope: []string{"PARAMETERS", "STAGES", "STEPS"},
Type: "[]string",
Mandatory: false,
Aliases: []config.Alias{{Name: "detect/scanPaths"}},
},
{
Name: "scanProperties",
ResourceRef: []config.ResourceReference{},
Scope: []string{"PARAMETERS", "STAGES", "STEPS"},
Type: "[]string",
Mandatory: false,
Aliases: []config.Alias{{Name: "detect/scanProperties"}},
},
{
Name: "serverUrl",
ResourceRef: []config.ResourceReference{},
Scope: []string{"PARAMETERS", "STAGES", "STEPS"},
Type: "string",
Mandatory: false,
Aliases: []config.Alias{{Name: "detect/serverUrl"}},
},
},
},
},
}
return theMetaData
}
|
[
"\"PIPER_apiToken\"",
"\"PIPER_codeLocation\"",
"\"PIPER_projectName\"",
"\"PIPER_projectVersion\"",
"\"PIPER_serverUrl\""
] |
[] |
[
"PIPER_codeLocation",
"PIPER_serverUrl",
"PIPER_projectName",
"PIPER_apiToken",
"PIPER_projectVersion"
] |
[]
|
["PIPER_codeLocation", "PIPER_serverUrl", "PIPER_projectName", "PIPER_apiToken", "PIPER_projectVersion"]
|
go
| 5 | 0 | |
utils.go
|
package main
import (
"fmt"
"os"
"path/filepath"
"strings"
"github.com/gabriel-vasile/mimetype"
)
type SongLink struct {
Link string
Title string
}
type Markers struct {
Title string
SongPath string
SongType string
FilesLinks []SongLink
Username string
Password string
Authenticated bool
}
func checkCredentials(username, password string) bool {
return (username == os.Getenv("username") && password == os.Getenv("password"))
}
func loadDirectoryTree(root string) []string {
files := make([]string, 0, 1)
filepath.Walk(
root,
func(path string, info os.FileInfo, err error) error {
if err != nil {
return err
}
files = append(files, path)
return nil
},
)
return files
}
func makeSongsLink(files []string, linkTpl string) []SongLink {
// Remove the `/music/` part of files path and remove all non audio files
audioFiles := make([]SongLink, 0)
for _, path := range files {
mt, err := mimetype.DetectFile(path)
mime := strings.Split(mt.String(), "/")[0] // General type of file
// There is no error and it's an audio file
if err == nil && mime == "audio" {
// Build the song url link
audioTitle := path[len("/music/"):]
audioLink := fmt.Sprintf(linkTpl, audioTitle)
songLink := SongLink{Title: audioTitle, Link: audioLink}
audioFiles = append(audioFiles, songLink)
}
}
return audioFiles
}
func findSong(song string) (title string, songType string, songPath string) {
if song == "" { // The song parameter isn't present
title = "No song playing"
} else { // The song parameter is present
songPath = fmt.Sprintf("/music/%s", song)
// Check if we can acces to the file
if _, err := os.Stat(songPath); err != nil { // We can't acces the file
title = "Error finding file"
fmt.Println(err)
} else { // We can acces the file
// get the mime type of the song
var mimeType string
if mime, err := mimetype.DetectFile(songPath); err == nil {
mimeType = mime.String()
} else {
fmt.Println(err)
mimeType = "application/octet-stream"
}
title = song
songType = mimeType
}
}
return title, songType, songPath
}
|
[
"\"username\"",
"\"password\""
] |
[] |
[
"username",
"password"
] |
[]
|
["username", "password"]
|
go
| 2 | 0 | |
src/cmd_local/go/internal/modfetch/codehost/codehost.go
|
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package codehost defines the interface implemented by a code hosting source,
// along with support code for use by implementations.
package codehost
import (
"bytes"
"crypto/sha256"
"fmt"
exec "internal_local/execabs"
"io"
"io/fs"
"os"
"path/filepath"
"strings"
"sync"
"time"
"cmd_local/go/internal/cfg"
"cmd_local/go/internal/lockedfile"
"cmd_local/go/internal/str"
)
// Downloaded size limits.
const (
MaxGoMod = 16 << 20 // maximum size of go.mod file
MaxLICENSE = 16 << 20 // maximum size of LICENSE file
MaxZipFile = 500 << 20 // maximum size of downloaded zip file
)
// A Repo represents a code hosting source.
// Typical implementations include local version control repositories,
// remote version control servers, and code hosting sites.
// A Repo must be safe for simultaneous use by multiple goroutines.
type Repo interface {
// List lists all tags with the given prefix.
Tags(prefix string) (tags []string, err error)
// Stat returns information about the revision rev.
// A revision can be any identifier known to the underlying service:
// commit hash, branch, tag, and so on.
Stat(rev string) (*RevInfo, error)
// Latest returns the latest revision on the default branch,
// whatever that means in the underlying implementation.
Latest() (*RevInfo, error)
// ReadFile reads the given file in the file tree corresponding to revision rev.
// It should refuse to read more than maxSize bytes.
//
// If the requested file does not exist it should return an error for which
// os.IsNotExist(err) returns true.
ReadFile(rev, file string, maxSize int64) (data []byte, err error)
// ReadFileRevs reads a single file at multiple versions.
// It should refuse to read more than maxSize bytes.
// The result is a map from each requested rev strings
// to the associated FileRev. The map must have a non-nil
// entry for every requested rev (unless ReadFileRevs returned an error).
// A file simply being missing or even corrupted in revs[i]
// should be reported only in files[revs[i]].Err, not in the error result
// from ReadFileRevs.
// The overall call should return an error (and no map) only
// in the case of a problem with obtaining the data, such as
// a network failure.
// Implementations may assume that revs only contain tags,
// not direct commit hashes.
ReadFileRevs(revs []string, file string, maxSize int64) (files map[string]*FileRev, err error)
// ReadZip downloads a zip file for the subdir subdirectory
// of the given revision to a new file in a given temporary directory.
// It should refuse to read more than maxSize bytes.
// It returns a ReadCloser for a streamed copy of the zip file.
// All files in the zip file are expected to be
// nested in a single top-level directory, whose name is not specified.
ReadZip(rev, subdir string, maxSize int64) (zip io.ReadCloser, err error)
// RecentTag returns the most recent tag on rev or one of its predecessors
// with the given prefix. allowed may be used to filter out unwanted versions.
RecentTag(rev, prefix string, allowed func(string) bool) (tag string, err error)
// DescendsFrom reports whether rev or any of its ancestors has the given tag.
//
// DescendsFrom must return true for any tag returned by RecentTag for the
// same revision.
DescendsFrom(rev, tag string) (bool, error)
}
// A Rev describes a single revision in a source code repository.
type RevInfo struct {
Name string // complete ID in underlying repository
Short string // shortened ID, for use in pseudo-version
Version string // version used in lookup
Time time.Time // commit time
Tags []string // known tags for commit
}
// A FileRev describes the result of reading a file at a given revision.
type FileRev struct {
Rev string // requested revision
Data []byte // file data
Err error // error if any; os.IsNotExist(Err)==true if rev exists but file does not exist in that rev
}
// UnknownRevisionError is an error equivalent to fs.ErrNotExist, but for a
// revision rather than a file.
type UnknownRevisionError struct {
Rev string
}
func (e *UnknownRevisionError) Error() string {
return "unknown revision " + e.Rev
}
func (UnknownRevisionError) Is(err error) bool {
return err == fs.ErrNotExist
}
// ErrNoCommits is an error equivalent to fs.ErrNotExist indicating that a given
// repository or module contains no commits.
var ErrNoCommits error = noCommitsError{}
type noCommitsError struct{}
func (noCommitsError) Error() string {
return "no commits"
}
func (noCommitsError) Is(err error) bool {
return err == fs.ErrNotExist
}
// AllHex reports whether the revision rev is entirely lower-case hexadecimal digits.
func AllHex(rev string) bool {
for i := 0; i < len(rev); i++ {
c := rev[i]
if '0' <= c && c <= '9' || 'a' <= c && c <= 'f' {
continue
}
return false
}
return true
}
// ShortenSHA1 shortens a SHA1 hash (40 hex digits) to the canonical length
// used in pseudo-versions (12 hex digits).
func ShortenSHA1(rev string) string {
if AllHex(rev) && len(rev) == 40 {
return rev[:12]
}
return rev
}
// WorkDir returns the name of the cached work directory to use for the
// given repository type and name.
func WorkDir(typ, name string) (dir, lockfile string, err error) {
if cfg.GOMODCACHE == "" {
return "", "", fmt.Errorf("neither GOPATH nor GOMODCACHE are set")
}
// We name the work directory for the SHA256 hash of the type and name.
// We intentionally avoid the actual name both because of possible
// conflicts with valid file system paths and because we want to ensure
// that one checkout is never nested inside another. That nesting has
// led to security problems in the past.
if strings.Contains(typ, ":") {
return "", "", fmt.Errorf("codehost.WorkDir: type cannot contain colon")
}
key := typ + ":" + name
dir = filepath.Join(cfg.GOMODCACHE, "cache/vcs", fmt.Sprintf("%x", sha256.Sum256([]byte(key))))
if cfg.BuildX {
fmt.Fprintf(os.Stderr, "mkdir -p %s # %s %s\n", filepath.Dir(dir), typ, name)
}
if err := os.MkdirAll(filepath.Dir(dir), 0777); err != nil {
return "", "", err
}
lockfile = dir + ".lock"
if cfg.BuildX {
fmt.Fprintf(os.Stderr, "# lock %s", lockfile)
}
unlock, err := lockedfile.MutexAt(lockfile).Lock()
if err != nil {
return "", "", fmt.Errorf("codehost.WorkDir: can't find or create lock file: %v", err)
}
defer unlock()
data, err := os.ReadFile(dir + ".info")
info, err2 := os.Stat(dir)
if err == nil && err2 == nil && info.IsDir() {
// Info file and directory both already exist: reuse.
have := strings.TrimSuffix(string(data), "\n")
if have != key {
return "", "", fmt.Errorf("%s exists with wrong content (have %q want %q)", dir+".info", have, key)
}
if cfg.BuildX {
fmt.Fprintf(os.Stderr, "# %s for %s %s\n", dir, typ, name)
}
return dir, lockfile, nil
}
// Info file or directory missing. Start from scratch.
if cfg.BuildX {
fmt.Fprintf(os.Stderr, "mkdir -p %s # %s %s\n", dir, typ, name)
}
os.RemoveAll(dir)
if err := os.MkdirAll(dir, 0777); err != nil {
return "", "", err
}
if err := os.WriteFile(dir+".info", []byte(key), 0666); err != nil {
os.RemoveAll(dir)
return "", "", err
}
return dir, lockfile, nil
}
type RunError struct {
Cmd string
Err error
Stderr []byte
HelpText string
}
func (e *RunError) Error() string {
text := e.Cmd + ": " + e.Err.Error()
stderr := bytes.TrimRight(e.Stderr, "\n")
if len(stderr) > 0 {
text += ":\n\t" + strings.ReplaceAll(string(stderr), "\n", "\n\t")
}
if len(e.HelpText) > 0 {
text += "\n" + e.HelpText
}
return text
}
var dirLock sync.Map
// Run runs the command line in the given directory
// (an empty dir means the current directory).
// It returns the standard output and, for a non-zero exit,
// a *RunError indicating the command, exit status, and standard error.
// Standard error is unavailable for commands that exit successfully.
func Run(dir string, cmdline ...interface{}) ([]byte, error) {
return RunWithStdin(dir, nil, cmdline...)
}
// bashQuoter escapes characters that have special meaning in double-quoted strings in the bash shell.
// See https://www.gnu.org/software/bash/manual/html_node/Double-Quotes.html.
var bashQuoter = strings.NewReplacer(`"`, `\"`, `$`, `\$`, "`", "\\`", `\`, `\\`)
func RunWithStdin(dir string, stdin io.Reader, cmdline ...interface{}) ([]byte, error) {
if dir != "" {
muIface, ok := dirLock.Load(dir)
if !ok {
muIface, _ = dirLock.LoadOrStore(dir, new(sync.Mutex))
}
mu := muIface.(*sync.Mutex)
mu.Lock()
defer mu.Unlock()
}
cmd := str.StringList(cmdline...)
if os.Getenv("TESTGOVCS") == "panic" {
panic(fmt.Sprintf("use of vcs: %v", cmd))
}
if cfg.BuildX {
text := new(strings.Builder)
if dir != "" {
text.WriteString("cd ")
text.WriteString(dir)
text.WriteString("; ")
}
for i, arg := range cmd {
if i > 0 {
text.WriteByte(' ')
}
switch {
case strings.ContainsAny(arg, "'"):
// Quote args that could be mistaken for quoted args.
text.WriteByte('"')
text.WriteString(bashQuoter.Replace(arg))
text.WriteByte('"')
case strings.ContainsAny(arg, "$`\\*?[\"\t\n\v\f\r \u0085\u00a0"):
// Quote args that contain special characters, glob patterns, or spaces.
text.WriteByte('\'')
text.WriteString(arg)
text.WriteByte('\'')
default:
text.WriteString(arg)
}
}
fmt.Fprintf(os.Stderr, "%s\n", text)
start := time.Now()
defer func() {
fmt.Fprintf(os.Stderr, "%.3fs # %s\n", time.Since(start).Seconds(), text)
}()
}
// TODO: Impose limits on command output size.
// TODO: Set environment to get English error messages.
var stderr bytes.Buffer
var stdout bytes.Buffer
c := exec.Command(cmd[0], cmd[1:]...)
c.Dir = dir
c.Stdin = stdin
c.Stderr = &stderr
c.Stdout = &stdout
err := c.Run()
if err != nil {
err = &RunError{Cmd: strings.Join(cmd, " ") + " in " + dir, Stderr: stderr.Bytes(), Err: err}
}
return stdout.Bytes(), err
}
|
[
"\"TESTGOVCS\""
] |
[] |
[
"TESTGOVCS"
] |
[]
|
["TESTGOVCS"]
|
go
| 1 | 0 | |
hshell/main.go
|
package main
//creates a client and uses that to call a protobuf.
import (
"bytes"
"encoding/json"
"flag"
"fmt"
"github.com/hailo-platform/H2O/hshell/login"
"github.com/hailo-platform/H2O/hshell/parseprotobuf"
"github.com/hailo-platform/H2O/platform/client"
"github.com/cihub/seelog"
gouuid "github.com/nu7hatch/gouuid"
"log"
"os"
"os/exec"
"strings"
"time"
)
var tmpDir string
var setGoPath string
var latestTrace string
func main() {
startCli := flag.Bool("cli", true, "starts the command line interface")
tempGopath := flag.Bool("tempgopath", false, "use a temporary gopath or not")
printTempDir := flag.Bool("printtempdir", false, "prints the location of the temporary directory")
nuke := flag.Bool("nuke", false, "deletes the temp directory and redownloads everything")
timeout := flag.String("timeout", "5000", "default timeout in hshell")
retries := flag.Int("retries", 0, "default number of retries in hshell")
live := flag.Bool("live", false, "If true this will connect via the bastion")
staging := flag.Bool("staging", false, "If true this will connect via the bastion")
bastion := flag.String("bastion", "", "The address of the bastion server to connect through")
bastionUser := flag.String("bastionUser", "", "bastion username")
env := flag.String("env", "", "The environment name you want to connect to (example: tst)")
region := flag.String("region", "eu-west-1", "The region name you want to connect to (example: eu-west-1")
flag.Parse()
var err error
// sets gopath to temp workspace (or not if requested)
if *tempGopath || *printTempDir {
tmpDir, err = CreateTempDir(*nuke) //creates a temporary workspace
if err != nil {
log.Println("error creating tempdir", err)
}
if *printTempDir {
fmt.Println(tmpDir)
return
}
origGoPath := os.Getenv("GOPATH")
os.Setenv("GOPATH", tmpDir)
defer os.Setenv("GOPATH", origGoPath)
}
setGoPath = os.Getenv("GOPATH")
defaultTimeout, _ = time.ParseDuration(*timeout + "ms")
defaultRetries = *retries
//Starts the shell if requested
//if the shell starts the program will never reach further than
//this if statement
if *startCli {
cmdChan := make(chan PurlCommand)
resultChan := make(chan string)
go InteractiveShell(cmdChan, resultChan, *live, *staging, *bastion, *bastionUser, *env, *region)
for cmd := range cmdChan {
out := ""
var err error
service, endpnt := SeparateService(cmd.EndpointStr)
out, err = SendJsonRequest(service, endpnt, cmd.JsonStr, cmd.Timeout, cmd.Retries)
if err != nil {
resultChan <- fmt.Sprintf("error running client: %+v \n%s\n", err, out)
continue
}
resultChan <- fmt.Sprintf("%s\n", out)
}
return
}
}
func GetJsonDefault(importStr string, protoName string) (string, error) {
pbfile := fmt.Sprintf("%s/src/%s/%s.proto", setGoPath, importStr, protoName)
file, err := os.Open(pbfile)
if err != nil {
log.Println("Could not read protos:", pbfile)
return "", err
}
pb := parseprotobuf.ParseProtobufRaw(file, protoName, true, setGoPath) //PARSE PROTOBUF
jsonStr, _ := parseprotobuf.PrintJsonExample(pb) //GET DEFAULT REQUEST
return jsonStr, nil
}
// Creates a temporary go directory in os.TempDir
func CreateTempDir(nuke bool) (string, error) {
tmpDir := os.TempDir()
if nuke {
remerr := os.RemoveAll(tmpDir + "/go")
if remerr != nil {
seelog.Warnf("unable to delete temp dir: %v, error: %v", tmpDir+"/go", remerr)
}
}
err := os.Mkdir(tmpDir+"/go", 0777)
if err != nil {
e, ok := err.(*os.PathError)
if ok {
if e.Err.Error() != "file exists" {
return "", err
}
}
}
return tmpDir + "/go", nil
}
func SeparateService(service string) (string, string) {
endpointStrArr := strings.Split(service, ".")
endpointStr := endpointStrArr[len(endpointStrArr)-1]
service = ""
for i, val := range endpointStrArr {
if i < len(endpointStrArr)-2 {
service += val + "."
} else if i < len(endpointStrArr)-1 {
service += val
}
}
return service, endpointStr
}
//sends a json request on to rabbit
func SendJsonRequest(service string, endpoint string, jsonString string, timeout time.Duration, retries int) (out string, err error) {
req, opts, err := CreateRequest(service, endpoint, jsonString, timeout, retries)
if err != nil {
return
}
latestTrace = req.TraceID()
rsp, dur, err := SendRequest(req, opts)
if err != nil {
out = fmt.Sprintf("Duration: %v\nTraceId: %s\n", dur.String(), latestTrace)
return
}
var buf bytes.Buffer
err = json.Indent(&buf, rsp.Body(), "", " ")
if err != nil {
out = fmt.Sprintf("Duration: %v\nTraceId: %s\n", dur.String(), latestTrace)
return
}
out = fmt.Sprintf("%s\n\nDuration: %v\nTraceId: %s\n", string(buf.Bytes()), dur.String(), latestTrace)
return
}
//sends a json request on to rabbit
func SendRepeatedJsonRequest(service string, endpoint string, json string, timeout time.Duration, retries int, number int) (out string, err error) {
req, opts, err := CreateRequest(service, endpoint, json, timeout, retries)
if err != nil {
return
}
var totalDuration time.Duration
errNum := 0
for i := 0; i < number; i++ {
_, dur, err := SendRequest(req, opts)
if err != nil {
errNum++
}
totalDuration = totalDuration + dur
}
out = fmt.Sprintf("\nDuration: %v\nError Num: %v\n", totalDuration.String(), errNum)
return
}
//sends a json request on to rabbit
func SendRepeatedAsyncJsonRequest(service string, endpoint string, json string, timeout time.Duration, retries int, number int) (out string, err error) {
errNum := 0
complete := make(chan bool)
done := make(chan bool)
go func() {
for i := 0; i < number; i++ {
if !<-done {
errNum++
}
}
complete <- true
}()
now := time.Now()
for i := 0; i < number; i++ {
go func() {
req, opts, err := CreateRequest(service, endpoint, json, timeout, retries)
if err != nil {
//return
done <- false
return
}
_, _, err = SendRequest(req, opts)
if err != nil {
done <- false
return
}
done <- true
}()
}
<-complete
totalDuration := time.Since(now)
out = fmt.Sprintf("\nDuration: %v (%vs)\nThroughput: %v/s\nError Num: %v\n", totalDuration.Nanoseconds(), float64(totalDuration.Nanoseconds())*1.0e-9, float64(number)/(float64(totalDuration.Nanoseconds())*1.0e-9), errNum)
return
}
func SendRequest(req *client.Request, opts client.Options) (*client.Response, time.Duration, error) {
now := time.Now()
rsp, err := client.CustomReq(req, opts)
dur := time.Since(now)
return rsp, dur, err
}
func CreateRequest(service string, endpoint string, json string, timeout time.Duration, retries int) (*client.Request, client.Options, error) {
var req *client.Request
req, err := client.NewJsonRequest(service, endpoint, []byte(json))
if err != nil {
return nil, nil, err
}
u4, _ := gouuid.NewV4()
traceId := u4.String()
req.SetTraceID(traceId)
req.SetTraceShouldPersist(true)
req.SetSessionID(login.Session)
req.SetFrom(login.FromService)
opts := client.Options{
"timeout": timeout,
}
if retries != 0 {
opts["retries"] = retries
}
return req, opts, nil
}
//runs go get on the import path
func GoGet(importStr string, update bool, args ...string) error {
seelog.Info("Getting: " + importStr)
var cmd *exec.Cmd
updateArgs := []string{"get", "-u"}
reuseArgs := []string{"get"}
if update {
for _, arg := range args {
updateArgs = append(updateArgs, arg)
}
updateArgs = append(updateArgs, importStr)
cmd = exec.Command("go", updateArgs...)
} else {
for _, arg := range args {
reuseArgs = append(reuseArgs, arg)
}
reuseArgs = append(reuseArgs, importStr)
cmd = exec.Command("go", reuseArgs...)
}
var outGet bytes.Buffer
cmd.Stdout = &outGet
var outErr bytes.Buffer
cmd.Stderr = &outGet
err := cmd.Start()
if err != nil {
seelog.Info(err)
}
err = cmd.Wait()
if outGet.String() != "" {
seelog.Info(outGet.String())
}
if outErr.String() != "" {
seelog.Info(outErr.String())
}
if err != nil {
seelog.Info("error: %v", err)
return err
}
return nil
}
func GetDependancies(update bool) {
//Get dependancies for client
err := GoGet("github.com/hailo-platform/H2O/protobuf/proto", update)
if err != nil {
log.Println("problem getting code")
return
}
err = GoGet("github.com/hailo-platform/H2O/platform/client", update)
if err != nil {
log.Println("problem getting code")
return
}
err = GoGet("github.com/cihub/seelog", update)
if err != nil {
log.Println("problem getting code")
return
}
}
|
[
"\"GOPATH\"",
"\"GOPATH\""
] |
[] |
[
"GOPATH"
] |
[]
|
["GOPATH"]
|
go
| 1 | 0 | |
mlrun/runtimes/base.py
|
# Copyright 2018 Iguazio
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import getpass
import os
import shlex
import traceback
import typing
import uuid
from abc import ABC, abstractmethod
from ast import literal_eval
from base64 import b64encode
from copy import deepcopy
from datetime import datetime, timedelta, timezone
from os import environ
from typing import Dict, List, Optional, Tuple, Union
import IPython
from kubernetes.client.rest import ApiException
from nuclio.build import mlrun_footer
from sqlalchemy.orm import Session
import mlrun.errors
import mlrun.utils.helpers
import mlrun.utils.regex
from mlrun.api import schemas
from mlrun.api.constants import LogSources
from mlrun.api.db.base import DBInterface
from mlrun.utils.helpers import generate_object_uri, verify_field_regex
from ..config import config
from ..datastore import store_manager
from ..db import RunDBError, get_or_set_dburl, get_run_db
from ..execution import MLClientCtx
from ..k8s_utils import get_k8s_helper
from ..kfpops import mlrun_op, write_kfpmeta
from ..lists import RunList
from ..model import (
BaseMetadata,
HyperParamOptions,
ImageBuilder,
ModelObj,
RunObject,
RunTemplate,
)
from ..secrets import SecretsStore
from ..utils import (
dict_to_json,
dict_to_yaml,
enrich_image_url,
get_in,
get_parsed_docker_registry,
get_ui_url,
is_ipython,
logger,
now_date,
update_in,
)
from .constants import PodPhases, RunStates
from .funcdoc import update_function_entry_points
from .generators import get_generator
from .utils import RunError, calc_hash, results_to_iter
run_modes = ["pass"]
class FunctionStatus(ModelObj):
def __init__(self, state=None, build_pod=None):
self.state = state
self.build_pod = build_pod
class FunctionSpec(ModelObj):
def __init__(
self,
command=None,
args=None,
image=None,
mode=None,
build=None,
entry_points=None,
description=None,
workdir=None,
default_handler=None,
pythonpath=None,
disable_auto_mount=False,
):
self.command = command or ""
self.image = image or ""
self.mode = mode
self.args = args or []
self.rundb = None
self.description = description or ""
self.workdir = workdir
self.pythonpath = pythonpath
self._build = None
self.build = build
self.default_handler = default_handler
# TODO: type verification (FunctionEntrypoint dict)
self.entry_points = entry_points or {}
self.disable_auto_mount = disable_auto_mount
@property
def build(self) -> ImageBuilder:
return self._build
@build.setter
def build(self, build):
self._build = self._verify_dict(build, "build", ImageBuilder)
class BaseRuntime(ModelObj):
kind = "base"
_is_nested = False
_is_remote = False
_dict_fields = ["kind", "metadata", "spec", "status", "verbose"]
def __init__(self, metadata=None, spec=None):
self._metadata = None
self.metadata = metadata
self.kfp = None
self._spec = None
self.spec = spec
self._db_conn = None
self._secrets = None
self._k8s = None
self._is_built = False
self.is_child = False
self._status = None
self.status = None
self._is_api_server = False
self.verbose = False
def set_db_connection(self, conn, is_api=False):
if not self._db_conn:
self._db_conn = conn
self._is_api_server = is_api
@property
def metadata(self) -> BaseMetadata:
return self._metadata
@metadata.setter
def metadata(self, metadata):
self._metadata = self._verify_dict(metadata, "metadata", BaseMetadata)
@property
def spec(self) -> FunctionSpec:
return self._spec
@spec.setter
def spec(self, spec):
self._spec = self._verify_dict(spec, "spec", FunctionSpec)
@property
def status(self) -> FunctionStatus:
return self._status
@status.setter
def status(self, status):
self._status = self._verify_dict(status, "status", FunctionStatus)
def _get_k8s(self):
return get_k8s_helper()
def set_label(self, key, value):
self.metadata.labels[key] = str(value)
return self
@property
def uri(self):
return self._function_uri()
@property
def is_deployed(self):
return True
def _is_remote_api(self):
db = self._get_db()
if db and db.kind == "http":
return True
return False
def _use_remote_api(self):
if (
self._is_remote
and not self._is_api_server
and self._get_db()
and self._get_db().kind == "http"
):
return True
return False
def _function_uri(self, tag=None, hash_key=None):
return generate_object_uri(
self.metadata.project,
self.metadata.name,
tag=tag or self.metadata.tag,
hash_key=hash_key,
)
def _ensure_run_db(self):
self.spec.rundb = self.spec.rundb or get_or_set_dburl()
def _get_db(self):
self._ensure_run_db()
if not self._db_conn:
if self.spec.rundb:
self._db_conn = get_run_db(self.spec.rundb, secrets=self._secrets)
return self._db_conn
# This function is different than the auto_mount function, as it mounts to runtimes based on the configuration.
# That's why it's named differently.
def try_auto_mount_based_on_config(self):
pass
def fill_credentials(self):
if "MLRUN_AUTH_SESSION" in os.environ or "V3IO_ACCESS_KEY" in os.environ:
self.metadata.credentials.access_key = os.environ.get(
"MLRUN_AUTH_SESSION"
) or os.environ.get("V3IO_ACCESS_KEY")
def run(
self,
runspec: RunObject = None,
handler=None,
name: str = "",
project: str = "",
params: dict = None,
inputs: dict = None,
out_path: str = "",
workdir: str = "",
artifact_path: str = "",
watch: bool = True,
schedule: Union[str, schemas.ScheduleCronTrigger] = None,
hyperparams: Dict[str, list] = None,
hyper_param_options: HyperParamOptions = None,
verbose=None,
scrape_metrics: bool = None,
local=False,
local_code_path=None,
) -> RunObject:
"""Run a local or remote task.
:param runspec: run template object or dict (see RunTemplate)
:param handler: pointer or name of a function handler
:param name: execution name
:param project: project name
:param params: input parameters (dict)
:param inputs: input objects (dict of key: path)
:param out_path: default artifact output path
:param artifact_path: default artifact output path (will replace out_path)
:param workdir: default input artifacts path
:param watch: watch/follow run log
:param schedule: ScheduleCronTrigger class instance or a standard crontab expression string
(which will be converted to the class using its `from_crontab` constructor.
see this link for help:
https://apscheduler.readthedocs.io/en/v3.6.3/modules/triggers/cron.html#module-apscheduler.triggers.cron
:param hyperparams: dict of param name and list of values to be enumerated e.g. {"p1": [1,2,3]}
the default strategy is grid search, can specify strategy (grid, list, random)
and other options in the hyper_param_options parameter
:param hyper_param_options: dict or :py:class:`~mlrun.model.HyperParamOptions` struct of
hyper parameter options
:param verbose: add verbose prints/logs
:param scrape_metrics: whether to add the `mlrun/scrape-metrics` label to this run's resources
:param local: run the function locally vs on the runtime/cluster
:param local_code_path: path of the code for local runs & debug
:return: run context object (RunObject) with run metadata, results and status
"""
if self.spec.mode and self.spec.mode not in run_modes:
raise ValueError(f'run mode can only be {",".join(run_modes)}')
# Perform auto-mount if necessary - make sure it only runs on client side (when using remote API)
if self._use_remote_api():
self.try_auto_mount_based_on_config()
self.fill_credentials()
if local:
if schedule is not None:
raise mlrun.errors.MLRunInvalidArgumentError(
"local and schedule cannot be used together"
)
# allow local run simulation with a flip of a flag
command = self
if local_code_path:
project = project or self.metadata.project
name = name or self.metadata.name
command = local_code_path
return mlrun.run_local(
runspec,
command,
name,
self.spec.args,
workdir=workdir,
project=project,
handler=handler,
params=params,
inputs=inputs,
artifact_path=artifact_path,
mode=self.spec.mode,
)
if runspec:
runspec = deepcopy(runspec)
if isinstance(runspec, str):
runspec = literal_eval(runspec)
if not isinstance(runspec, (dict, RunTemplate, RunObject)):
raise ValueError(
"task/runspec is not a valid task object," f" type={type(runspec)}"
)
if isinstance(runspec, RunTemplate):
runspec = RunObject.from_template(runspec)
if isinstance(runspec, dict) or runspec is None:
runspec = RunObject.from_dict(runspec)
runspec.spec.handler = (
handler or runspec.spec.handler or self.spec.default_handler or ""
)
if runspec.spec.handler and self.kind not in ["handler", "dask"]:
runspec.spec.handler = runspec.spec.handler_name
def_name = self.metadata.name
if runspec.spec.handler_name:
def_name += "-" + runspec.spec.handler_name
runspec.metadata.name = name or runspec.metadata.name or def_name
verify_field_regex(
"run.metadata.name", runspec.metadata.name, mlrun.utils.regex.run_name
)
runspec.metadata.project = (
project
or runspec.metadata.project
or self.metadata.project
or config.default_project
)
runspec.spec.parameters = params or runspec.spec.parameters
runspec.spec.inputs = inputs or runspec.spec.inputs
runspec.spec.hyperparams = hyperparams or runspec.spec.hyperparams
runspec.spec.hyper_param_options = (
hyper_param_options or runspec.spec.hyper_param_options
)
runspec.spec.verbose = verbose or runspec.spec.verbose
if scrape_metrics is None:
if runspec.spec.scrape_metrics is None:
scrape_metrics = config.scrape_metrics
else:
scrape_metrics = runspec.spec.scrape_metrics
runspec.spec.scrape_metrics = scrape_metrics
runspec.spec.output_path = out_path or artifact_path or runspec.spec.output_path
runspec.spec.input_path = (
workdir or runspec.spec.input_path or self.spec.workdir
)
spec = runspec.spec
if spec.secret_sources:
self._secrets = SecretsStore.from_list(spec.secret_sources)
# update run metadata (uid, labels) and store in DB
meta = runspec.metadata
meta.uid = meta.uid or uuid.uuid4().hex
runspec.spec.output_path = runspec.spec.output_path or config.artifact_path
if runspec.spec.output_path:
runspec.spec.output_path = runspec.spec.output_path.replace(
"{{run.uid}}", meta.uid
)
runspec.spec.output_path = mlrun.utils.helpers.fill_artifact_path_template(
runspec.spec.output_path, runspec.metadata.project
)
if is_local(runspec.spec.output_path):
logger.warning(
"artifact path is not defined or is local,"
" artifacts will not be visible in the UI"
)
if self.kind not in ["", "local", "handler", "dask"]:
raise ValueError(
"absolute artifact_path must be specified"
" when running remote tasks"
)
db = self._get_db()
if not self.is_deployed:
raise RunError(
"function image is not built/ready, use .deploy() method first"
)
if self.verbose:
logger.info(f"runspec:\n{runspec.to_yaml()}")
if "V3IO_USERNAME" in environ and "v3io_user" not in meta.labels:
meta.labels["v3io_user"] = environ.get("V3IO_USERNAME")
if not self.is_child:
db_str = "self" if self._is_api_server else self.spec.rundb
logger.info(f"starting run {meta.name} uid={meta.uid} DB={db_str}")
meta.labels["kind"] = self.kind
if "owner" not in meta.labels:
meta.labels["owner"] = environ.get("V3IO_USERNAME") or getpass.getuser()
if runspec.spec.output_path:
runspec.spec.output_path = runspec.spec.output_path.replace(
"{{run.user}}", meta.labels["owner"]
)
if db and self.kind != "handler":
struct = self.to_dict()
hash_key = db.store_function(
struct, self.metadata.name, self.metadata.project, versioned=True
)
runspec.spec.function = self._function_uri(hash_key=hash_key)
# execute the job remotely (to a k8s cluster via the API service)
if self._use_remote_api():
if self._secrets:
runspec.spec.secret_sources = self._secrets.to_serial()
try:
resp = db.submit_job(runspec, schedule=schedule)
if schedule:
logger.info(f"task scheduled, {resp}")
return
if resp:
txt = get_in(resp, "status.status_text")
if txt:
logger.info(txt)
if watch or self.kfp:
runspec.logs(True, self._get_db())
resp = self._get_db_run(runspec)
except Exception as err:
logger.error(f"got remote run err, {err}")
result = None
# if we got a schedule no reason to do post_run stuff (it purposed to update the run status with error,
# but there's no run in case of schedule)
if not schedule:
result = self._update_run_state(task=runspec, err=err)
return self._wrap_run_result(
result, runspec, schedule=schedule, err=err
)
return self._wrap_run_result(resp, runspec, schedule=schedule)
elif self._is_remote and not self._is_api_server and not self.kfp:
logger.warning(
"warning!, Api url not set, " "trying to exec remote runtime locally"
)
execution = MLClientCtx.from_dict(
runspec.to_dict(), db, autocommit=False, is_api=self._is_api_server
)
self._pre_run(runspec, execution) # hook for runtime specific prep
# create task generator (for child runs) from spec
task_generator = None
if not self._is_nested:
task_generator = get_generator(spec, execution)
last_err = None
if task_generator:
# multiple runs (based on hyper params or params file)
runner = self._run_many
if hasattr(self, "_parallel_run_many") and task_generator.use_parallel():
runner = self._parallel_run_many
results = runner(task_generator, execution, runspec)
results_to_iter(results, runspec, execution)
result = execution.to_dict()
else:
# single run
try:
resp = self._run(runspec, execution)
if watch and self.kind not in ["", "handler", "local"]:
state = runspec.logs(True, self._get_db())
if state != "succeeded":
logger.warning(f"run ended with state {state}")
result = self._update_run_state(resp, task=runspec)
except RunError as err:
last_err = err
result = self._update_run_state(task=runspec, err=err)
self._post_run(result, execution) # hook for runtime specific cleanup
return self._wrap_run_result(result, runspec, schedule=schedule, err=last_err)
def _wrap_run_result(
self, result: dict, runspec: RunObject, schedule=None, err=None
):
# if the purpose was to schedule (and not to run) nothing to wrap
if schedule:
return
if result and self.kfp and err is None:
write_kfpmeta(result)
# show ipython/jupyter result table widget
results_tbl = RunList()
if result:
results_tbl.append(result)
else:
logger.info("no returned result (job may still be in progress)")
results_tbl.append(runspec.to_dict())
uid = runspec.metadata.uid
project = runspec.metadata.project
if is_ipython and config.ipython_widget:
results_tbl.show()
print()
ui_url = get_ui_url(project, uid)
if ui_url:
ui_url = f' or <a href="{ui_url}" target="_blank">click here</a> to open in UI'
IPython.display.display(
IPython.display.HTML(
f"<b> > to track results use the .show() or .logs() methods {ui_url}</b>"
)
)
elif not self.is_child:
ui_url = get_ui_url(project, uid)
ui_url = f"\nor click {ui_url} for UI" if ui_url else ""
project_flag = f"-p {project}" if project else ""
print(
f"to track results use the CLI:\n"
f"info: mlrun get run {uid} {project_flag}\nlogs: mlrun logs {uid} {project_flag}{ui_url}"
)
if result:
run = RunObject.from_dict(result)
logger.info(f"run executed, status={run.status.state}")
if run.status.state == "error":
if self._is_remote and not self.is_child:
print(f"runtime error: {run.status.error}")
raise RunError(run.status.error)
return run
return None
def _get_db_run(self, task: RunObject = None):
if self._get_db() and task:
project = task.metadata.project
uid = task.metadata.uid
iter = task.metadata.iteration
try:
return self._get_db().read_run(uid, project, iter=iter)
except RunDBError:
return None
if task:
return task.to_dict()
def _generate_runtime_env(self, runobj: RunObject):
runtime_env = {
"MLRUN_EXEC_CONFIG": runobj.to_json(),
"MLRUN_DEFAULT_PROJECT": runobj.metadata.project
or self.metadata.project
or config.default_project,
}
if runobj.spec.verbose:
runtime_env["MLRUN_LOG_LEVEL"] = "DEBUG"
if config.httpdb.api_url:
runtime_env["MLRUN_DBPATH"] = config.httpdb.api_url
if self.metadata.namespace or config.namespace:
runtime_env["MLRUN_NAMESPACE"] = self.metadata.namespace or config.namespace
return runtime_env
def _get_cmd_args(self, runobj: RunObject):
extra_env = self._generate_runtime_env(runobj)
if self.spec.pythonpath:
extra_env["PYTHONPATH"] = self.spec.pythonpath
args = []
command = self.spec.command
code = (
self.spec.build.functionSourceCode if hasattr(self.spec, "build") else None
)
if runobj.spec.handler and self.spec.mode == "pass":
raise ValueError('cannot use "pass" mode with handler')
if code:
extra_env["MLRUN_EXEC_CODE"] = code
load_archive = self.spec.build.load_source_on_run and self.spec.build.source
need_mlrun = code or load_archive or self.spec.mode != "pass"
if need_mlrun:
args = ["run", "--name", runobj.metadata.name, "--from-env"]
if runobj.spec.handler:
args += ["--handler", runobj.spec.handler]
if self.spec.mode:
args += ["--mode", self.spec.mode]
if self.spec.build.origin_filename:
args += ["--origin-file", self.spec.build.origin_filename]
if load_archive:
if code:
raise ValueError("cannot specify both code and source archive")
args += ["--source", self.spec.build.source]
if command:
args += [shlex.quote(command)]
command = "mlrun"
if self.spec.args:
args = args + self.spec.args
else:
command = command.format(**runobj.spec.parameters)
if self.spec.args:
args = [
shlex.quote(arg.format(**runobj.spec.parameters))
for arg in self.spec.args
]
extra_env = [{"name": k, "value": v} for k, v in extra_env.items()]
return command, args, extra_env
def _pre_run(self, runspec: RunObject, execution):
pass
def _post_run(self, results, execution):
pass
def _run(self, runobj: RunObject, execution) -> dict:
pass
def _run_many(self, generator, execution, runobj: RunObject) -> RunList:
results = RunList()
num_errors = 0
tasks = generator.generate(runobj)
for task in tasks:
try:
self.store_run(task)
resp = self._run(task, execution)
resp = self._update_run_state(resp, task=task)
run_results = resp["status"].get("results", {})
if generator.eval_stop_condition(run_results):
logger.info(
f"reached early stop condition ({generator.options.stop_condition}), stopping iterations!"
)
results.append(resp)
break
except RunError as err:
task.status.state = "error"
task.status.error = str(err)
resp = self._update_run_state(task=task, err=err)
num_errors += 1
if num_errors > generator.max_errors:
logger.error("too many errors, stopping iterations!")
results.append(resp)
break
results.append(resp)
return results
def store_run(self, runobj: RunObject):
if self._get_db() and runobj:
project = runobj.metadata.project
uid = runobj.metadata.uid
iter = runobj.metadata.iteration
self._get_db().store_run(runobj.to_dict(), uid, project, iter=iter)
def _store_run_dict(self, rundict: dict):
if self._get_db() and rundict:
project = get_in(rundict, "metadata.project", "")
uid = get_in(rundict, "metadata.uid")
iter = get_in(rundict, "metadata.iteration", 0)
self._get_db().store_run(rundict, uid, project, iter=iter)
def _update_run_state(
self, resp: dict = None, task: RunObject = None, err=None
) -> dict:
"""update the task state in the DB"""
was_none = False
if resp is None and task:
was_none = True
resp = self._get_db_run(task)
if not resp:
self.store_run(task)
return task.to_dict()
if task.status.status_text:
update_in(resp, "status.status_text", task.status.status_text)
if resp is None:
return None
if not isinstance(resp, dict):
raise ValueError(f"post_run called with type {type(resp)}")
updates = None
last_state = get_in(resp, "status.state", "")
if last_state == "error" or err:
updates = {"status.last_update": now_date().isoformat()}
updates["status.state"] = "error"
update_in(resp, "status.state", "error")
if err:
update_in(resp, "status.error", str(err))
err = get_in(resp, "status.error")
if err:
updates["status.error"] = str(err)
elif not was_none and last_state != "completed":
updates = {"status.last_update": now_date().isoformat()}
updates["status.state"] = "completed"
update_in(resp, "status.state", "completed")
if self._get_db() and updates:
project = get_in(resp, "metadata.project")
uid = get_in(resp, "metadata.uid")
iter = get_in(resp, "metadata.iteration", 0)
self._get_db().update_run(updates, uid, project, iter=iter)
return resp
def _force_handler(self, handler):
if not handler:
raise RunError(f"handler must be provided for {self.kind} runtime")
def full_image_path(self, image=None):
image = image or self.spec.image or ""
image = enrich_image_url(image)
if not image.startswith("."):
return image
registry, _ = get_parsed_docker_registry()
if registry:
return f"{registry}/{image[1:]}"
namespace_domain = environ.get("IGZ_NAMESPACE_DOMAIN", None)
if namespace_domain is not None:
return f"docker-registry.{namespace_domain}:80/{image[1:]}"
raise RunError("local container registry is not defined")
def as_step(
self,
runspec: RunObject = None,
handler=None,
name: str = "",
project: str = "",
params: dict = None,
hyperparams=None,
selector="",
hyper_param_options: HyperParamOptions = None,
inputs: dict = None,
outputs: dict = None,
workdir: str = "",
artifact_path: str = "",
image: str = "",
labels: dict = None,
use_db=True,
verbose=None,
scrape_metrics=False,
):
"""Run a local or remote task.
:param runspec: run template object or dict (see RunTemplate)
:param handler: name of the function handler
:param name: execution name
:param project: project name
:param params: input parameters (dict)
:param hyperparams: hyper parameters
:param selector: selection criteria for hyper params
:param inputs: input objects (dict of key: path)
:param outputs: list of outputs which can pass in the workflow
:param artifact_path: default artifact output path (replace out_path)
:param workdir: default input artifacts path
:param image: container image to use
:param labels: labels to tag the job/run with ({key:val, ..})
:param use_db: save function spec in the db (vs the workflow file)
:param verbose: add verbose prints/logs
:param scrape_metrics: whether to add the `mlrun/scrape-metrics` label to this run's resources
:return: KubeFlow containerOp
"""
# if self.spec.image and not image:
# image = self.full_image_path()
if use_db:
# if the same function is built as part of the pipeline we do not use the versioned function
# rather the latest function w the same tag so we can pick up the updated image/status
versioned = False if hasattr(self, "_build_in_pipeline") else False
url = self.save(versioned=versioned, refresh=True)
else:
url = None
if runspec is not None:
verify_field_regex(
"run.metadata.name", runspec.metadata.name, mlrun.utils.regex.run_name
)
return mlrun_op(
name,
project,
function=self,
func_url=url,
runobj=runspec,
handler=handler,
params=params,
hyperparams=hyperparams,
selector=selector,
hyper_param_options=hyper_param_options,
inputs=inputs,
outputs=outputs,
job_image=image,
labels=labels,
out_path=artifact_path,
in_path=workdir,
verbose=verbose,
scrape_metrics=scrape_metrics,
)
def with_code(self, from_file="", body=None, with_doc=True):
"""Update the function code
This function eliminates the need to build container images every time we edit the code
:param from_file: blank for current notebook, or path to .py/.ipynb file
:param body: will use the body as the function code
:param with_doc: update the document of the function parameters
:return: function object
"""
if body and from_file:
raise mlrun.errors.MLRunInvalidArgumentError(
"must provide either body or from_file argument. not both"
)
if (not body and not from_file) or (from_file and from_file.endswith(".ipynb")):
from nuclio import build_file
_, _, body = build_file(from_file, name=self.metadata.name)
else:
if from_file:
with open(from_file) as fp:
body = fp.read()
if self.kind == mlrun.runtimes.RuntimeKinds.serving:
body = body + mlrun_footer.format(
mlrun.runtimes.serving.serving_subkind
)
self.spec.build.functionSourceCode = b64encode(body.encode("utf-8")).decode(
"utf-8"
)
if with_doc:
update_function_entry_points(self, body)
return self
def with_requirements(self, requirements: Union[str, List[str]]):
"""add package requirements from file or list to build spec.
:param requirements: python requirements file path or list of packages
:return: function object
"""
if isinstance(requirements, str):
with open(requirements, "r") as fp:
requirements = fp.read().splitlines()
commands = self.spec.build.commands or []
commands.append("python -m pip install " + " ".join(requirements))
self.spec.build.commands = commands
return self
def export(self, target="", format=".yaml", secrets=None, strip=True):
"""save function spec to a local/remote path (default to./function.yaml)
:param target: target path/url
:param format: `.yaml` (default) or `.json`
:param secrets: optional secrets dict/object for target path (e.g. s3)
:param strip: strip status data
:returns: self
"""
if self.kind == "handler":
raise ValueError(
"cannot export local handler function, use "
+ "code_to_function() to serialize your function"
)
calc_hash(self)
struct = self.to_dict(strip=strip)
if format == ".yaml":
data = dict_to_yaml(struct)
else:
data = dict_to_json(struct)
stores = store_manager.set(secrets)
target = target or "function.yaml"
datastore, subpath = stores.get_or_create_store(target)
datastore.put(subpath, data)
logger.info(f"function spec saved to path: {target}")
return self
def save(self, tag="", versioned=False, refresh=False) -> str:
db = self._get_db()
if not db:
logger.error("database connection is not configured")
return ""
if refresh and self._is_remote_api():
try:
meta = self.metadata
db_func = db.get_function(meta.name, meta.project, meta.tag)
if db_func and "status" in db_func:
self.status = db_func["status"]
if (
self.status.state
and self.status.state == "ready"
and "nuclio_name" not in self.status
):
self.spec.image = get_in(db_func, "spec.image", self.spec.image)
except Exception:
pass
tag = tag or self.metadata.tag
obj = self.to_dict()
logger.debug(f"saving function: {self.metadata.name}, tag: {tag}")
hash_key = db.store_function(
obj, self.metadata.name, self.metadata.project, tag, versioned
)
hash_key = hash_key if versioned else None
return "db://" + self._function_uri(hash_key=hash_key, tag=tag)
def to_dict(self, fields=None, exclude=None, strip=False):
struct = super().to_dict(fields, exclude=exclude)
if strip:
if "status" in struct:
del struct["status"]
return struct
def doc(self):
print("function:", self.metadata.name)
print(self.spec.description)
if self.spec.default_handler:
print("default handler:", self.spec.default_handler)
if self.spec.entry_points:
print("entry points:")
for name, entry in self.spec.entry_points.items():
print(f" {name}: {entry.get('doc', '')}")
params = entry.get("parameters")
if params:
for p in params:
line = p["name"]
if "type" in p:
line += f"({p['type']})"
line += " - " + p.get("doc", "")
if "default" in p:
line += f", default={p['default']}"
print(" " + line)
def is_local(url):
if not url:
return True
return "://" not in url and not url.startswith("/")
class BaseRuntimeHandler(ABC):
# setting here to allow tests to override
wait_for_deletion_interval = 10
@staticmethod
@abstractmethod
def _get_object_label_selector(object_id: str) -> str:
"""
Should return the label selector should be used to get only resources of a specific object (with id object_id)
"""
pass
@staticmethod
@abstractmethod
def _get_possible_mlrun_class_label_values() -> List[str]:
"""
Should return the possible values of the mlrun/class label for runtime resources that are of this runtime
handler kind
"""
pass
def list_resources(
self,
project: str,
object_id: typing.Optional[str] = None,
label_selector: str = None,
group_by: Optional[mlrun.api.schemas.ListRuntimeResourcesGroupByField] = None,
) -> Union[
mlrun.api.schemas.RuntimeResources,
mlrun.api.schemas.GroupedByJobRuntimeResourcesOutput,
mlrun.api.schemas.GroupedByProjectRuntimeResourcesOutput,
]:
# We currently don't support removing runtime resources in non k8s env
if not mlrun.k8s_utils.get_k8s_helper(
silent=True
).is_running_inside_kubernetes_cluster():
return {}
k8s_helper = get_k8s_helper()
namespace = k8s_helper.resolve_namespace()
label_selector = self._resolve_label_selector(
project, object_id, label_selector
)
pods = self._list_pods(namespace, label_selector)
pod_resources = self._build_pod_resources(pods)
crd_objects = self._list_crd_objects(namespace, label_selector)
crd_resources = self._build_crd_resources(crd_objects)
response = self._build_list_resources_response(
pod_resources, crd_resources, group_by
)
response = self._enrich_list_resources_response(
response, namespace, label_selector, group_by
)
return response
def build_output_from_runtime_resources(
self,
runtime_resources_list: List[mlrun.api.schemas.RuntimeResources],
group_by: Optional[mlrun.api.schemas.ListRuntimeResourcesGroupByField] = None,
):
pod_resources = []
crd_resources = []
for runtime_resources in runtime_resources_list:
pod_resources += runtime_resources.pod_resources
crd_resources += runtime_resources.crd_resources
response = self._build_list_resources_response(
pod_resources, crd_resources, group_by
)
response = self._build_output_from_runtime_resources(
response, runtime_resources_list, group_by
)
return response
def delete_resources(
self,
db: DBInterface,
db_session: Session,
label_selector: str = None,
force: bool = False,
grace_period: int = None,
):
if grace_period is None:
grace_period = config.runtime_resources_deletion_grace_period
# We currently don't support removing runtime resources in non k8s env
if not mlrun.k8s_utils.get_k8s_helper(
silent=True
).is_running_inside_kubernetes_cluster():
return
k8s_helper = get_k8s_helper()
namespace = k8s_helper.resolve_namespace()
label_selector = self._resolve_label_selector(
"*", label_selector=label_selector
)
crd_group, crd_version, crd_plural = self._get_crd_info()
if crd_group and crd_version and crd_plural:
deleted_resources = self._delete_crd_resources(
db, db_session, namespace, label_selector, force, grace_period,
)
else:
deleted_resources = self._delete_pod_resources(
db, db_session, namespace, label_selector, force, grace_period,
)
self._delete_resources(
db,
db_session,
namespace,
deleted_resources,
label_selector,
force,
grace_period,
)
def delete_runtime_object_resources(
self,
db: DBInterface,
db_session: Session,
object_id: str,
label_selector: str = None,
force: bool = False,
grace_period: int = None,
):
if grace_period is None:
grace_period = config.runtime_resources_deletion_grace_period
label_selector = self._add_object_label_selector_if_needed(
object_id, label_selector
)
self.delete_resources(db, db_session, label_selector, force, grace_period)
def monitor_runs(self, db: DBInterface, db_session: Session):
k8s_helper = get_k8s_helper()
namespace = k8s_helper.resolve_namespace()
label_selector = self._get_default_label_selector()
crd_group, crd_version, crd_plural = self._get_crd_info()
runtime_resource_is_crd = False
if crd_group and crd_version and crd_plural:
runtime_resource_is_crd = True
runtime_resources = self._list_crd_objects(namespace, label_selector)
else:
runtime_resources = self._list_pods(namespace, label_selector)
project_run_uid_map = self._list_runs_for_monitoring(db, db_session)
for runtime_resource in runtime_resources:
try:
self._monitor_runtime_resource(
db,
db_session,
project_run_uid_map,
runtime_resource,
runtime_resource_is_crd,
namespace,
)
except Exception as exc:
logger.warning(
"Failed monitoring runtime resource. Continuing",
runtime_resource_name=runtime_resource["metadata"]["name"],
namespace=namespace,
exc=str(exc),
traceback=traceback.format_exc(),
)
def _add_object_label_selector_if_needed(
self,
object_id: typing.Optional[str] = None,
label_selector: typing.Optional[str] = None,
):
if object_id:
object_label_selector = self._get_object_label_selector(object_id)
if label_selector:
label_selector = ",".join([object_label_selector, label_selector])
else:
label_selector = object_label_selector
return label_selector
def _enrich_list_resources_response(
self,
response: Union[
mlrun.api.schemas.RuntimeResources,
mlrun.api.schemas.GroupedByJobRuntimeResourcesOutput,
mlrun.api.schemas.GroupedByProjectRuntimeResourcesOutput,
],
namespace: str,
label_selector: str = None,
group_by: Optional[mlrun.api.schemas.ListRuntimeResourcesGroupByField] = None,
) -> Union[
mlrun.api.schemas.RuntimeResources,
mlrun.api.schemas.GroupedByJobRuntimeResourcesOutput,
mlrun.api.schemas.GroupedByProjectRuntimeResourcesOutput,
]:
"""
Override this to list resources other then pods or CRDs (which are handled by the base class)
"""
return response
def _build_output_from_runtime_resources(
self,
response: Union[
mlrun.api.schemas.RuntimeResources,
mlrun.api.schemas.GroupedByJobRuntimeResourcesOutput,
mlrun.api.schemas.GroupedByProjectRuntimeResourcesOutput,
],
runtime_resources_list: List[mlrun.api.schemas.RuntimeResources],
group_by: Optional[mlrun.api.schemas.ListRuntimeResourcesGroupByField] = None,
):
"""
Override this to add runtime resources other then pods or CRDs (which are handled by the base class) to the
output
"""
return response
def _delete_resources(
self,
db: DBInterface,
db_session: Session,
namespace: str,
deleted_resources: List[Dict],
label_selector: str = None,
force: bool = False,
grace_period: int = None,
):
"""
Override this to handle deletion of resources other then pods or CRDs (which are handled by the base class)
Note that this is happening before the deletion of the CRDs or the pods
Note to add this at the beginning:
if grace_period is None:
grace_period = config.runtime_resources_deletion_grace_period
"""
pass
def _resolve_crd_object_status_info(
self, db: DBInterface, db_session: Session, crd_object
) -> Tuple[bool, Optional[datetime], Optional[str]]:
"""
Override this if the runtime has CRD resources.
:return: Tuple with:
1. bool determining whether the crd object is in terminal state
2. datetime of when the crd object got into terminal state (only when the crd object in terminal state)
3. the desired run state matching the crd object state
"""
return False, None, None
def _update_ui_url(
self,
db: DBInterface,
db_session: Session,
project: str,
uid: str,
crd_object,
run: Dict = None,
):
"""
Update the UI URL for relevant jobs.
"""
pass
def _resolve_pod_status_info(
self, db: DBInterface, db_session: Session, pod: Dict
) -> Tuple[bool, Optional[datetime], Optional[str]]:
"""
:return: Tuple with:
1. bool determining whether the pod is in terminal state
2. datetime of when the pod got into terminal state (only when the pod in terminal state)
3. the run state matching the pod state
"""
in_terminal_state = pod["status"]["phase"] in PodPhases.terminal_phases()
run_state = PodPhases.pod_phase_to_run_state(pod["status"]["phase"])
last_container_completion_time = None
if in_terminal_state:
for container_status in pod["status"].get("container_statuses", []):
if container_status.get("state", {}).get("terminated"):
container_completion_time = container_status["state"][
"terminated"
].get("finished_at")
# take latest completion time
if (
not last_container_completion_time
or last_container_completion_time < container_completion_time
):
last_container_completion_time = container_completion_time
return in_terminal_state, last_container_completion_time, run_state
def _get_default_label_selector(self) -> str:
"""
Override this to add a default label selector
"""
class_values = self._get_possible_mlrun_class_label_values()
if not class_values:
return ""
if len(class_values) == 1:
return f"mlrun/class={class_values[0]}"
return f"mlrun/class in ({', '.join(class_values)})"
@staticmethod
def _get_crd_info() -> Tuple[str, str, str]:
"""
Override this if the runtime has CRD resources. this should return the CRD info:
crd group, crd version, crd plural
"""
return "", "", ""
@staticmethod
def _are_resources_coupled_to_run_object() -> bool:
"""
Some resources are tightly coupled to mlrun Run object, for example, for each Run of a Function of the job kind
a kubernetes job is being generated, on the opposite a Function of the daskjob kind generates a dask cluster,
and every Run is being executed using this cluster, i.e. no resources are created for the Run.
This function should return true for runtimes in which Run are coupled to the underlying resources and therefore
aspects of the Run (like its state) should be taken into consideration on resources deletion
"""
return False
@staticmethod
def _expect_pods_without_uid() -> bool:
return False
def _list_pods(self, namespace: str, label_selector: str = None) -> List:
k8s_helper = get_k8s_helper()
pods = k8s_helper.list_pods(namespace, selector=label_selector)
# when we work with custom objects (list_namespaced_custom_object) it's always a dict, to be able to generalize
# code working on runtime resource (either a custom object or a pod) we're transforming to dicts
pods = [pod.to_dict() for pod in pods]
return pods
def _list_crd_objects(self, namespace: str, label_selector: str = None) -> List:
k8s_helper = get_k8s_helper()
crd_group, crd_version, crd_plural = self._get_crd_info()
crd_objects = []
if crd_group and crd_version and crd_plural:
try:
crd_objects = k8s_helper.crdapi.list_namespaced_custom_object(
crd_group,
crd_version,
namespace,
crd_plural,
label_selector=label_selector,
)
except ApiException as exc:
# ignore error if crd is not defined
if exc.status != 404:
raise
else:
crd_objects = crd_objects["items"]
return crd_objects
def _resolve_label_selector(
self,
project: str,
object_id: typing.Optional[str] = None,
label_selector: typing.Optional[str] = None,
) -> str:
default_label_selector = self._get_default_label_selector()
if label_selector:
label_selector = ",".join([default_label_selector, label_selector])
else:
label_selector = default_label_selector
if project and project != "*":
label_selector = ",".join([label_selector, f"mlrun/project={project}"])
label_selector = self._add_object_label_selector_if_needed(
object_id, label_selector
)
return label_selector
def _wait_for_pods_deletion(
self, namespace: str, deleted_pods: List[Dict], label_selector: str = None,
):
k8s_helper = get_k8s_helper()
deleted_pod_names = [pod_dict["metadata"]["name"] for pod_dict in deleted_pods]
def _verify_pods_removed():
pods = k8s_helper.v1api.list_namespaced_pod(
namespace, label_selector=label_selector
)
existing_pod_names = [pod.metadata.name for pod in pods.items]
still_in_deletion_pods = set(existing_pod_names).intersection(
deleted_pod_names
)
if still_in_deletion_pods:
raise RuntimeError(
f"Pods are still in deletion process: {still_in_deletion_pods}"
)
if deleted_pod_names:
timeout = 180
logger.debug(
"Waiting for pods deletion",
timeout=timeout,
interval=self.wait_for_deletion_interval,
)
mlrun.utils.retry_until_successful(
self.wait_for_deletion_interval,
timeout,
logger,
True,
_verify_pods_removed,
)
def _wait_for_crds_underlying_pods_deletion(
self, deleted_crds: List[Dict], label_selector: str = None,
):
# we're using here the run identifier as the common ground to identify which pods are relevant to which CRD, so
# if they are not coupled we are not able to wait - simply return
# NOTE - there are surely smarter ways to do this, without depending on the run object, but as of writing this
# none of the runtimes using CRDs are like that, so not handling it now
if not self._are_resources_coupled_to_run_object():
return
def _verify_crds_underlying_pods_removed():
project_uid_crd_map = {}
for crd in deleted_crds:
project, uid = self._resolve_runtime_resource_run(crd)
if not uid or not project:
logger.warning(
"Could not resolve run uid from crd. Skipping waiting for pods deletion",
crd=crd,
)
continue
project_uid_crd_map.setdefault(project, {})[uid] = crd["metadata"][
"name"
]
still_in_deletion_crds_to_pod_names = {}
jobs_runtime_resources: mlrun.api.schemas.GroupedByJobRuntimeResourcesOutput = self.list_resources(
"*",
label_selector=label_selector,
group_by=mlrun.api.schemas.ListRuntimeResourcesGroupByField.job,
)
for project, project_jobs in jobs_runtime_resources.items():
if project not in project_uid_crd_map:
continue
for job_uid, job_runtime_resources in jobs_runtime_resources[
project
].items():
if job_uid not in project_uid_crd_map[project]:
continue
if job_runtime_resources.pod_resources:
still_in_deletion_crds_to_pod_names[
project_uid_crd_map[project][job_uid]
] = [
pod_resource.name
for pod_resource in job_runtime_resources.pod_resources
]
if still_in_deletion_crds_to_pod_names:
raise RuntimeError(
f"CRD underlying pods are still in deletion process: {still_in_deletion_crds_to_pod_names}"
)
if deleted_crds:
timeout = 180
logger.debug(
"Waiting for CRDs underlying pods deletion",
timeout=timeout,
interval=self.wait_for_deletion_interval,
)
mlrun.utils.retry_until_successful(
self.wait_for_deletion_interval,
timeout,
logger,
True,
_verify_crds_underlying_pods_removed,
)
def _delete_pod_resources(
self,
db: DBInterface,
db_session: Session,
namespace: str,
label_selector: str = None,
force: bool = False,
grace_period: int = None,
) -> List[Dict]:
if grace_period is None:
grace_period = config.runtime_resources_deletion_grace_period
k8s_helper = get_k8s_helper()
pods = k8s_helper.v1api.list_namespaced_pod(
namespace, label_selector=label_selector
)
deleted_pods = []
for pod in pods.items:
pod_dict = pod.to_dict()
# best effort - don't let one failure in pod deletion to cut the whole operation
try:
(
in_terminal_state,
last_update,
run_state,
) = self._resolve_pod_status_info(db, db_session, pod_dict)
if not force:
if not in_terminal_state:
continue
# give some grace period if we have last update time
now = datetime.now(timezone.utc)
if (
last_update is not None
and last_update + timedelta(seconds=float(grace_period)) > now
):
continue
# if resources are tightly coupled to the run object - we want to perform some actions on the run object
# before deleting them
if self._are_resources_coupled_to_run_object():
try:
self._pre_deletion_runtime_resource_run_actions(
db, db_session, pod_dict, run_state
)
except Exception as exc:
# Don't prevent the deletion for failure in the pre deletion run actions
logger.warning(
"Failure in pod run pre-deletion actions. Continuing",
exc=repr(exc),
pod_name=pod.metadata.name,
)
self._delete_pod(namespace, pod)
deleted_pods.append(pod_dict)
except Exception as exc:
logger.warning(
f"Cleanup failed processing pod {pod.metadata.name}: {repr(exc)}. Continuing"
)
self._wait_for_pods_deletion(namespace, deleted_pods, label_selector)
return deleted_pods
def _delete_crd_resources(
self,
db: DBInterface,
db_session: Session,
namespace: str,
label_selector: str = None,
force: bool = False,
grace_period: int = None,
) -> List[Dict]:
if grace_period is None:
grace_period = config.runtime_resources_deletion_grace_period
k8s_helper = get_k8s_helper()
crd_group, crd_version, crd_plural = self._get_crd_info()
deleted_crds = []
try:
crd_objects = k8s_helper.crdapi.list_namespaced_custom_object(
crd_group,
crd_version,
namespace,
crd_plural,
label_selector=label_selector,
)
except ApiException as exc:
# ignore error if crd is not defined
if exc.status != 404:
raise
else:
for crd_object in crd_objects["items"]:
# best effort - don't let one failure in pod deletion to cut the whole operation
try:
(
in_terminal_state,
last_update,
desired_run_state,
) = self._resolve_crd_object_status_info(db, db_session, crd_object)
if not force:
if not in_terminal_state:
continue
# give some grace period if we have last update time
now = datetime.now(timezone.utc)
if (
last_update is not None
and last_update + timedelta(seconds=float(grace_period))
> now
):
continue
# if resources are tightly coupled to the run object - we want to perform some actions on the run
# object before deleting them
if self._are_resources_coupled_to_run_object():
try:
self._pre_deletion_runtime_resource_run_actions(
db, db_session, crd_object, desired_run_state,
)
except Exception as exc:
# Don't prevent the deletion for failure in the pre deletion run actions
logger.warning(
"Failure in crd object run pre-deletion actions. Continuing",
exc=str(exc),
crd_object_name=crd_object["metadata"]["name"],
)
self._delete_crd(
namespace, crd_group, crd_version, crd_plural, crd_object
)
deleted_crds.append(crd_object)
except Exception:
exc = traceback.format_exc()
crd_object_name = crd_object["metadata"]["name"]
logger.warning(
f"Cleanup failed processing CRD object {crd_object_name}: {exc}. Continuing"
)
self._wait_for_crds_underlying_pods_deletion(deleted_crds, label_selector)
return deleted_crds
def _pre_deletion_runtime_resource_run_actions(
self,
db: DBInterface,
db_session: Session,
runtime_resource: Dict,
run_state: str,
):
project, uid = self._resolve_runtime_resource_run(runtime_resource)
# if cannot resolve related run nothing to do
if not uid:
if not self._expect_pods_without_uid():
logger.warning(
"Could not resolve run uid from runtime resource. Skipping pre-deletion actions",
runtime_resource=runtime_resource,
)
raise ValueError("Could not resolve run uid from runtime resource")
else:
return
logger.info(
"Performing pre-deletion actions before cleaning up runtime resources",
project=project,
uid=uid,
)
self._ensure_run_state(db, db_session, project, uid, run_state)
self._ensure_run_logs_collected(db, db_session, project, uid)
def _is_runtime_resource_run_in_terminal_state(
self, db: DBInterface, db_session: Session, runtime_resource: Dict,
) -> Tuple[bool, Optional[datetime]]:
"""
A runtime can have different underlying resources (like pods or CRDs) - to generalize we call it runtime
resource. This function will verify whether the Run object related to this runtime resource is in transient
state. This is useful in order to determine whether an object can be removed. for example, a kubejob's pod
might be in completed state, but we would like to verify that the run is completed as well to verify the logs
were collected before we're removing the pod.
:returns: bool determining whether the run in terminal state, and the last update time if it exists
"""
project, uid = self._resolve_runtime_resource_run(runtime_resource)
# if no uid, assume in terminal state
if not uid:
return True, None
run = db.read_run(db_session, uid, project)
last_update = None
last_update_str = run.get("status", {}).get("last_update")
if last_update_str is not None:
last_update = datetime.fromisoformat(last_update_str)
if run.get("status", {}).get("state") not in RunStates.terminal_states():
return False, last_update
return True, last_update
def _list_runs_for_monitoring(
self, db: DBInterface, db_session: Session,
):
runs = db.list_runs(db_session, project="*")
project_run_uid_map = {}
run_with_missing_data = []
duplicated_runs = []
for run in runs:
project = run.get("metadata", {}).get("project")
uid = run.get("metadata", {}).get("uid")
if not uid or not project:
run_with_missing_data.append(run.get("metadata", {}))
continue
current_run = project_run_uid_map.setdefault(project, {}).get(uid)
# sanity
if current_run:
duplicated_runs = {
"monitored_run": current_run.get(["metadata"]),
"duplicated_run": run.get(["metadata"]),
}
continue
project_run_uid_map[project][uid] = run
# If there are duplications or runs with missing data it probably won't be fixed
# Monitoring is running periodically and we don't want to log on every problem we found which will spam the log
# so we're aggregating the problems and logging only once per aggregation
if duplicated_runs:
logger.warning(
"Found duplicated runs (same uid). Heuristically monitoring the first one found",
duplicated_runs=duplicated_runs,
)
if run_with_missing_data:
logger.warning(
"Found runs with missing data. They will not be monitored",
run_with_missing_data=run_with_missing_data,
)
return project_run_uid_map
def _monitor_runtime_resource(
self,
db: DBInterface,
db_session: Session,
project_run_uid_map: Dict,
runtime_resource: Dict,
runtime_resource_is_crd: bool,
namespace: str,
):
project, uid = self._resolve_runtime_resource_run(runtime_resource)
if not project or not uid:
# Currently any build pod won't have UID and therefore will cause this log message to be printed which
# spams the log
# TODO: uncomment the log message when builder become a kind / starts having a UID
# logger.warning(
# "Could not resolve run project or uid from runtime resource, can not monitor run. Continuing",
# project=project,
# uid=uid,
# runtime_resource_name=runtime_resource["metadata"]["name"],
# namespace=namespace,
# )
return
run = project_run_uid_map.get(project, {}).get(uid)
if runtime_resource_is_crd:
(_, _, run_state,) = self._resolve_crd_object_status_info(
db, db_session, runtime_resource
)
else:
(_, _, run_state,) = self._resolve_pod_status_info(
db, db_session, runtime_resource
)
self._update_ui_url(db, db_session, project, uid, runtime_resource, run)
_, updated_run_state = self._ensure_run_state(
db, db_session, project, uid, run_state, run, search_run=False,
)
if updated_run_state in RunStates.terminal_states():
self._ensure_run_logs_collected(db, db_session, project, uid)
def _build_list_resources_response(
self,
pod_resources: List[mlrun.api.schemas.RuntimeResource] = None,
crd_resources: List[mlrun.api.schemas.RuntimeResource] = None,
group_by: Optional[mlrun.api.schemas.ListRuntimeResourcesGroupByField] = None,
) -> Union[
mlrun.api.schemas.RuntimeResources,
mlrun.api.schemas.GroupedByJobRuntimeResourcesOutput,
mlrun.api.schemas.GroupedByProjectRuntimeResourcesOutput,
]:
if crd_resources is None:
crd_resources = []
if pod_resources is None:
pod_resources = []
if group_by is None:
return mlrun.api.schemas.RuntimeResources(
crd_resources=crd_resources, pod_resources=pod_resources
)
else:
if group_by == mlrun.api.schemas.ListRuntimeResourcesGroupByField.job:
return self._build_grouped_by_job_list_resources_response(
pod_resources, crd_resources
)
elif group_by == mlrun.api.schemas.ListRuntimeResourcesGroupByField.project:
return self._build_grouped_by_project_list_resources_response(
pod_resources, crd_resources
)
else:
raise NotImplementedError(
f"Provided group by field is not supported. group_by={group_by}"
)
def _build_grouped_by_project_list_resources_response(
self,
pod_resources: List[mlrun.api.schemas.RuntimeResource] = None,
crd_resources: List[mlrun.api.schemas.RuntimeResource] = None,
) -> mlrun.api.schemas.GroupedByProjectRuntimeResourcesOutput:
resources = {}
for pod_resource in pod_resources:
self._add_resource_to_grouped_by_project_resources_response(
resources, "pod_resources", pod_resource
)
for crd_resource in crd_resources:
self._add_resource_to_grouped_by_project_resources_response(
resources, "crd_resources", crd_resource
)
return resources
def _build_grouped_by_job_list_resources_response(
self,
pod_resources: List[mlrun.api.schemas.RuntimeResource] = None,
crd_resources: List[mlrun.api.schemas.RuntimeResource] = None,
) -> mlrun.api.schemas.GroupedByJobRuntimeResourcesOutput:
resources = {}
for pod_resource in pod_resources:
self._add_resource_to_grouped_by_job_resources_response(
resources, "pod_resources", pod_resource
)
for crd_resource in crd_resources:
self._add_resource_to_grouped_by_job_resources_response(
resources, "crd_resources", crd_resource
)
return resources
def _add_resource_to_grouped_by_project_resources_response(
self,
resources: mlrun.api.schemas.GroupedByJobRuntimeResourcesOutput,
resource_field_name: str,
resource: mlrun.api.schemas.RuntimeResource,
):
if "mlrun/class" in resource.labels:
project = resource.labels.get("mlrun/project", "")
mlrun_class = resource.labels["mlrun/class"]
kind = self._resolve_kind_from_class(mlrun_class)
self._add_resource_to_grouped_by_field_resources_response(
project, kind, resources, resource_field_name, resource
)
def _add_resource_to_grouped_by_job_resources_response(
self,
resources: mlrun.api.schemas.GroupedByJobRuntimeResourcesOutput,
resource_field_name: str,
resource: mlrun.api.schemas.RuntimeResource,
):
if "mlrun/uid" in resource.labels:
project = resource.labels.get("mlrun/project", config.default_project)
uid = resource.labels["mlrun/uid"]
self._add_resource_to_grouped_by_field_resources_response(
project, uid, resources, resource_field_name, resource
)
@staticmethod
def _add_resource_to_grouped_by_field_resources_response(
first_field_value: str,
second_field_value: str,
resources: mlrun.api.schemas.GroupedByJobRuntimeResourcesOutput,
resource_field_name: str,
resource: mlrun.api.schemas.RuntimeResource,
):
if first_field_value not in resources:
resources[first_field_value] = {}
if second_field_value not in resources[first_field_value]:
resources[first_field_value][
second_field_value
] = mlrun.api.schemas.RuntimeResources(pod_resources=[], crd_resources=[])
if not getattr(
resources[first_field_value][second_field_value], resource_field_name
):
setattr(
resources[first_field_value][second_field_value],
resource_field_name,
[],
)
getattr(
resources[first_field_value][second_field_value], resource_field_name
).append(resource)
@staticmethod
def _resolve_kind_from_class(mlrun_class: str) -> str:
class_to_kind_map = {}
for kind in mlrun.runtimes.RuntimeKinds.runtime_with_handlers():
runtime_handler = mlrun.runtimes.get_runtime_handler(kind)
class_values = runtime_handler._get_possible_mlrun_class_label_values()
for value in class_values:
class_to_kind_map[value] = kind
return class_to_kind_map[mlrun_class]
@staticmethod
def _get_run_label_selector(project: str, run_uid: str):
return f"mlrun/project={project},mlrun/uid={run_uid}"
@staticmethod
def _ensure_run_logs_collected(
db: DBInterface, db_session: Session, project: str, uid: str
):
# import here to avoid circular imports
import mlrun.api.crud as crud
log_file_exists = crud.Logs().log_file_exists(project, uid)
if not log_file_exists:
_, logs_from_k8s = crud.Logs().get_logs(
db_session, project, uid, source=LogSources.K8S
)
if logs_from_k8s:
logger.info("Storing run logs", project=project, uid=uid)
crud.Logs().store_log(logs_from_k8s, project, uid, append=False)
@staticmethod
def _ensure_run_state(
db: DBInterface,
db_session: Session,
project: str,
uid: str,
run_state: str,
run: Dict = None,
search_run: bool = True,
) -> Tuple[bool, str]:
if run is None:
run = {}
if search_run:
try:
run = db.read_run(db_session, uid, project)
except mlrun.errors.MLRunNotFoundError:
run = {}
if not run:
logger.warning(
"Run not found. A new run will be created",
project=project,
uid=uid,
desired_run_state=run_state,
search_run=search_run,
)
run = {"metadata": {"project": project, "uid": uid}}
db_run_state = run.get("status", {}).get("state")
if db_run_state:
if db_run_state == run_state:
return False, run_state
# if the current run state is terminal and different than the desired - log
if db_run_state in RunStates.terminal_states():
# This can happen when the SDK running in the user's Run updates the Run's state to terminal, but
# before it exits, when the runtime resource is still running, the API monitoring (here) is executed
if run_state not in RunStates.terminal_states():
now = datetime.now(timezone.utc)
last_update_str = run.get("status", {}).get("last_update")
if last_update_str is not None:
last_update = datetime.fromisoformat(last_update_str)
debounce_period = config.runs_monitoring_interval
if last_update > now - timedelta(
seconds=float(debounce_period)
):
logger.warning(
"Monitoring found non-terminal state on runtime resource but record has recently "
"updated to terminal state. Debouncing",
project=project,
uid=uid,
db_run_state=db_run_state,
run_state=run_state,
last_update=last_update,
now=now,
debounce_period=debounce_period,
)
return False, run_state
logger.warning(
"Run record has terminal state but monitoring found different state on runtime resource. Changing",
project=project,
uid=uid,
db_run_state=db_run_state,
run_state=run_state,
)
logger.info("Updating run state", run_state=run_state)
run.setdefault("status", {})["state"] = run_state
run.setdefault("status", {})["last_update"] = now_date().isoformat()
db.store_run(db_session, run, uid, project)
return True, run_state
@staticmethod
def _resolve_runtime_resource_run(runtime_resource: Dict) -> Tuple[str, str]:
project = (
runtime_resource.get("metadata", {}).get("labels", {}).get("mlrun/project")
)
if not project:
project = config.default_project
uid = runtime_resource.get("metadata", {}).get("labels", {}).get("mlrun/uid")
return project, uid
@staticmethod
def _delete_crd(namespace, crd_group, crd_version, crd_plural, crd_object):
k8s_helper = get_k8s_helper()
name = crd_object["metadata"]["name"]
try:
k8s_helper.crdapi.delete_namespaced_custom_object(
crd_group, crd_version, namespace, crd_plural, name,
)
logger.info(
"Deleted crd object",
name=name,
namespace=namespace,
crd_plural=crd_plural,
)
except ApiException as exc:
# ignore error if crd object is already removed
if exc.status != 404:
raise
@staticmethod
def _delete_pod(namespace, pod):
k8s_helper = get_k8s_helper()
try:
k8s_helper.v1api.delete_namespaced_pod(pod.metadata.name, namespace)
logger.info("Deleted pod", pod=pod.metadata.name)
except ApiException as exc:
# ignore error if pod is already removed
if exc.status != 404:
raise
@staticmethod
def _build_pod_resources(pods) -> List[mlrun.api.schemas.RuntimeResource]:
pod_resources = []
for pod in pods:
pod_resources.append(
mlrun.api.schemas.RuntimeResource(
name=pod["metadata"]["name"],
labels=pod["metadata"]["labels"],
status=pod["status"],
)
)
return pod_resources
@staticmethod
def _build_crd_resources(custom_objects) -> List[mlrun.api.schemas.RuntimeResource]:
crd_resources = []
for custom_object in custom_objects:
crd_resources.append(
mlrun.api.schemas.RuntimeResource(
name=custom_object["metadata"]["name"],
labels=custom_object["metadata"]["labels"],
status=custom_object.get("status", {}),
)
)
return crd_resources
|
[] |
[] |
[
"MLRUN_AUTH_SESSION\"\n ",
"V3IO_ACCESS_KEY"
] |
[]
|
["MLRUN_AUTH_SESSION\"\n ", "V3IO_ACCESS_KEY"]
|
python
| 2 | 0 | |
src/services/gcp/pkg/stub/handler.go
|
package stub
import (
"bytes"
"crypto/tls"
"crypto/x509"
b64 "encoding/base64"
"encoding/json"
"fmt"
"io/ioutil"
"mime/multipart"
"net/http"
"os"
"os/exec"
"path"
"strconv"
"strings"
"time"
uuid "github.com/satori/go.uuid"
"github.com/sap/infrabox/src/services/gcp/pkg/apis/gcp/v1alpha1"
"github.com/sap/infrabox/src/services/gcp/pkg/stub/cleaner"
goerrors "errors"
"k8s.io/client-go/discovery"
"k8s.io/client-go/discovery/cached"
"k8s.io/client-go/dynamic"
"k8s.io/client-go/kubernetes"
_ "k8s.io/client-go/plugin/pkg/client/auth/gcp"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/clientcmd"
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
"github.com/operator-framework/operator-sdk/pkg/sdk/action"
"github.com/operator-framework/operator-sdk/pkg/sdk/handler"
"github.com/operator-framework/operator-sdk/pkg/sdk/types"
"github.com/operator-framework/operator-sdk/pkg/util/k8sutil"
"github.com/sirupsen/logrus"
appsv1 "k8s.io/api/apps/v1"
v1 "k8s.io/api/core/v1"
rbacv1 "k8s.io/api/rbac/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/meta"
"k8s.io/apimachinery/pkg/api/resource"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/util/intstr"
"github.com/mholt/archiver"
)
const adminSAName = "admin"
type MasterAuth struct {
ClientCertificate string
ClientKey string
ClusterCaCertificate string
Username string
Password string
Token string
}
type RemoteCluster struct {
Name string
Status string
Zone string
Endpoint string
MasterAuth MasterAuth
}
func NewHandler() handler.Handler {
return &Handler{}
}
type Handler struct{}
func setClusterName(cr *v1alpha1.GKECluster, log *logrus.Entry) error {
finalizers := cr.GetFinalizers()
if len(finalizers) == 0 {
cr.SetFinalizers([]string{"gcp.service.infrabox.net"})
cr.Status.Status = "pending"
u := uuid.NewV4()
cr.Status.ClusterName = "ib-" + u.String()
return action.Update(cr)
}
return nil
}
func createCluster(cr *v1alpha1.GKECluster, log *logrus.Entry) (*v1alpha1.GKEClusterStatus, error) {
limit := os.Getenv("MAX_NUM_CLUSTERS")
status := cr.Status
if limit != "" {
gkeclusters, err := getRemoteClusters(log)
if err != nil && !errors.IsNotFound(err) {
err = fmt.Errorf("could not get GKE Clusters: %v", err)
log.Error(err)
return nil, err
}
l, err := strconv.Atoi(limit)
if err != nil {
log.Errorf("Failed to parse cluster limit: %v", err)
return nil, err
}
if len(gkeclusters) >= l {
status.Status = "pending"
status.Message = "Cluster limit reached, waiting..."
log.Debug(status.Message)
return &status, nil
}
}
log.Infof("Create GKE cluster %s", cr.Status.ClusterName)
args := []string{"container", "clusters",
"create", cr.Status.ClusterName,
"--async",
"--enable-autorepair",
"--scopes=gke-default,storage-rw",
"--zone", cr.Spec.Zone,
}
if cr.Spec.DiskSize != 0 {
args = append(args, "--disk-size")
args = append(args, strconv.Itoa(int(cr.Spec.DiskSize)))
}
if cr.Spec.MachineType != "" {
args = append(args, "--machine-type")
args = append(args, cr.Spec.MachineType)
}
if cr.Spec.EnableNetworkPolicy {
args = append(args, "--enable-network-policy")
}
if !cr.Spec.DisableLegacyAuthorization {
args = append(args, "--enable-legacy-authorization")
}
if cr.Spec.EnablePodSecurityPolicy {
args = append(args, "--enable-pod-security-policy")
args = append([]string{"beta"}, args...)
}
if cr.Spec.NumNodes != 0 {
args = append(args, "--num-nodes")
args = append(args, strconv.Itoa(int(cr.Spec.NumNodes)))
}
if cr.Spec.Preemptible {
args = append(args, "--preemptible")
}
if cr.Spec.EnableAutoscaling {
args = append(args, "--enable-autoscaling")
if cr.Spec.MaxNodes != 0 {
args = append(args, "--max-nodes")
args = append(args, strconv.Itoa(int(cr.Spec.MaxNodes)))
}
if cr.Spec.MinNodes != 0 {
args = append(args, "--min-nodes")
args = append(args, strconv.Itoa(int(cr.Spec.MinNodes)))
}
}
if cr.Spec.ClusterVersion != "" {
// find out the exact cluster version
version, channel, err := getExactClusterVersion(cr, log)
if err != nil {
return nil, err
}
if channel == "" {
channel = "stable"
}
args = append(args, "--cluster-version", version)
args = append(args, "--release-channel", channel)
}
args = append(args, "--enable-ip-alias")
args = append(args, "--create-subnetwork", "")
if cr.Spec.ClusterCidr == "" {
cr.Spec.ClusterCidr = "/18"
args = append(args, "--cluster-ipv4-cidr", cr.Spec.ClusterCidr)
}
if cr.Spec.ServiceCidr == "" {
cr.Spec.ServiceCidr = "/18"
args = append(args, "--services-ipv4-cidr", cr.Spec.ServiceCidr)
}
cmd := exec.Command("gcloud" , args...)
out, err := cmd.CombinedOutput()
if err != nil {
err = fmt.Errorf("failed to create GKE Cluster: %v, %s", err, out)
log.Error(err)
return nil, err
}
status.Status = "pending"
status.Message = "Cluster is being created"
return &status, nil
}
func syncGKECluster(cr *v1alpha1.GKECluster, log *logrus.Entry) (*v1alpha1.GKEClusterStatus, error) {
if cr.Status.Status == "ready" || cr.Status.Status == "error" {
return &cr.Status, nil
}
if err := setClusterName(cr, log); err != nil {
log.Errorf("Failed to set finalizers: %v", err)
return nil, err
}
// Get the GKE Cluster
gkecluster, err := getRemoteCluster(cr.Status.ClusterName, log)
if err != nil && !errors.IsNotFound(err) {
log.Errorf("Could not get GKE Cluster: %v", err)
return nil, err
}
if gkecluster == nil {
return createCluster(cr, log)
} else {
if gkecluster.Status == "RUNNING" {
err = injectAdminServiceAccount(gkecluster, log)
if err != nil {
log.Errorf("Failed to inject admin service account: %v", err)
return nil, err
}
err = injectCollector(gkecluster, log)
if err != nil {
log.Errorf("Failed to inject collector: %v", err)
return nil, err
}
err = action.Create(newSecret(cr, gkecluster))
if err != nil && !errors.IsAlreadyExists(err) {
log.Errorf("Failed to create secret: %v", err)
return nil, err
}
log.Infof("GKE cluster %s is ready", cr.Status.ClusterName)
status := cr.Status
status.Status = "ready"
status.Message = "Cluster ready"
return &status, nil
}
if gkecluster.Status == "ERROR" {
log.Errorf("Error creating cluster %s", cr.Status.ClusterName)
return nil, goerrors.New("error creating GKE cluster")
}
}
return &cr.Status, nil
}
func getAdminToken(gkecluster *RemoteCluster) (string, error) {
client, err := newRemoteClusterSDK(gkecluster)
c, err := kubernetes.NewForConfig(client.kubeConfig)
if err != nil {
return "", fmt.Errorf("error getting k8s client: %s, %v", gkecluster.Name, err)
}
sa, err := c.CoreV1().ServiceAccounts("kube-system").Get(adminSAName, metav1.GetOptions{})
if err != nil {
return "", fmt.Errorf("error getting admin service account: %s, %v", gkecluster.Name, err)
}
secret, err := c.CoreV1().Secrets("kube-system").Get(sa.Secrets[0].Name, metav1.GetOptions{})
if err != nil {
return "", fmt.Errorf("error getting admin sa secret: %s, %v", gkecluster.Name, err)
}
token := secret.Data["token"]
return string(token), nil
}
func injectAdminServiceAccount(gkecluster *RemoteCluster, log *logrus.Entry) error {
client, err := newRemoteClusterSDK(gkecluster)
if err != nil {
err = fmt.Errorf("failed to create remote cluster client: %v", err)
log.Error(err)
return err
}
err = client.Create(newAdminServiceAccount(), log)
if err != nil && !errors.IsAlreadyExists(err) {
err = fmt.Errorf("failed to create admin service account : %v", err)
log.Error(err)
return err
}
err = client.Create(newAdminCRB(), log)
if err != nil && !errors.IsAlreadyExists(err) {
err = fmt.Errorf("failed to create admin service account : %v", err)
log.Error(err)
return err
}
token, err := getAdminToken(gkecluster)
if err != nil {
err = fmt.Errorf("error getting admin token: %s", gkecluster.Name)
log.Error(err)
return err
}
gkecluster.MasterAuth.Token = token
return nil
}
func newAdminServiceAccount() *v1.ServiceAccount {
return &v1.ServiceAccount{
TypeMeta: metav1.TypeMeta{
Kind: "ServiceAccount",
APIVersion: "v1",
},
ObjectMeta: metav1.ObjectMeta{
Name: adminSAName,
Namespace: "kube-system",
},
}
}
func newAdminCRB() *rbacv1.ClusterRoleBinding {
return &rbacv1.ClusterRoleBinding{
TypeMeta: metav1.TypeMeta{
Kind: "ClusterRoleBinding",
APIVersion: "rbac.authorization.k8s.io/v1",
},
ObjectMeta: metav1.ObjectMeta{
Name: "admin-crb",
Namespace: "kube-system",
},
Subjects: []rbacv1.Subject{{
Kind: "ServiceAccount",
Name: "admin",
Namespace: "kube-system",
}},
RoleRef: rbacv1.RoleRef{
Kind: "ClusterRole",
Name: "cluster-admin",
APIGroup: "rbac.authorization.k8s.io",
},
}
}
func getGkeKubeConfig(gkecluster *RemoteCluster, log *logrus.Entry) error {
kubeConfigPath := "/tmp/kubeconfig-" + gkecluster.Name
if _, err := os.Stat(kubeConfigPath); !os.IsNotExist(err) {
return nil
}
cmd := exec.Command("gcloud", "container", "clusters", "get-credentials", gkecluster.Name,
"--zone", gkecluster.Zone)
cmd.Env = os.Environ()
cmd.Env = append(cmd.Env, "KUBECONFIG=" + kubeConfigPath)
out, err := cmd.CombinedOutput()
if err != nil {
log.Errorf("Failed to get kubeconfig for cluster: %s, %s, %v", gkecluster.Name, out, err)
os.Remove(kubeConfigPath)
return err
}
return nil
}
func deleteRemoteCluster(cr *v1alpha1.GKECluster, log *logrus.Entry) error {
log.Infof("Deleting cluster %s", cr.Status.ClusterName)
cmd := exec.Command("gcloud", "-q", "container", "clusters", "delete", cr.Status.ClusterName, "--async", "--zone", cr.Spec.Zone)
out, err := cmd.CombinedOutput()
os.Remove("/tmp/kubeconfig-" + cr.ClusterName)
if err != nil {
log.Errorf("Failed to delete cluster: %v", err)
log.Error(string(out))
}
return err
}
func collectLogs(c *RemoteCluster, cr *v1alpha1.GKECluster, log *logrus.Entry, started chan int) {
logPath := path.Join("/tmp", cr.Status.ClusterName)
err := os.Mkdir(logPath, os.ModePerm)
if err != nil {
log.Warningf("Failed to create pod logs dir, won't collect pod logs %v", err)
close(started)
return
}
close(started)
done := make(chan error)
go retrieveLogs(cr, c, log, logPath, done)
defer func() {
if _, err := os.Stat(logPath); !os.IsNotExist(err) {
_ = os.RemoveAll(logPath)
}
}()
for {
select {
case <-time.After(time.Minute * 5):
log.Infof("timeout collecting logs for %s", cr.Status.ClusterName)
return
case <-done:
log.Infof("finished collecting logs for %s", cr.Status.ClusterName)
return
}
}
}
func cleanUpCrd(cr *v1alpha1.GKECluster, log *logrus.Entry) error {
secretName := cr.ObjectMeta.Labels["service.infrabox.net/secret-name"]
secret := v1.Secret{
TypeMeta: metav1.TypeMeta{
Kind: "Secret",
APIVersion: "v1",
},
ObjectMeta: metav1.ObjectMeta{
Name: secretName,
Namespace: cr.Namespace,
},
}
err := action.Delete(&secret)
if err != nil && !errors.IsNotFound(err) {
log.Errorf("Failed to delete secret: %v", err)
return err
}
cr.SetFinalizers([]string{})
err = action.Update(cr)
if err != nil {
log.Errorf("Failed to remove finalizers: %v", err)
return err
}
err = action.Delete(cr)
if err != nil && !errors.IsNotFound(err) {
log.Errorf("Failed to delete cr: %v", err)
return err
}
return nil
}
func checkTimeout(cr *v1alpha1.GKECluster, log *logrus.Entry) error {
t, err := time.Parse(time.RFC1123, cr.Status.FirstCleanedAt)
if err != nil {
log.Debugf("couldn't parse stored timestamp ('%s', err: %s) => reset it", cr.Status.FirstCleanedAt, err.Error())
if err = setAndUpdateFirstCleaned(cr, log); err != nil {
log.Errorf("couldn't set first cleaned timestamp: %v", err)
return err
}
} else {
waitDur := time.Minute * 5
sinceFirstCleaned := time.Since(t).Truncate(time.Second)
if sinceFirstCleaned < waitDur {
log.Debugf("timestamp FirstCleaned: %s => %s since then. Wait until %s have elapsed since first cleaning", cr.Status.FirstCleanedAt, sinceFirstCleaned, waitDur)
} else {
log.Debugf("timestamp FirstCleaned: %s => %s since then. Proceed with deleting cluster", cr.Status.FirstCleanedAt, sinceFirstCleaned)
cr.Status.Message = "deleting cluster"
if err = action.Update(cr); err != nil {
log.Errorf("Failed to update status: %v", err)
return err
}
}
}
return nil
}
func deleteGKECluster(cr *v1alpha1.GKECluster, log *logrus.Entry) error {
// Get the GKE Cluster
gkecluster, err := getRemoteCluster(cr.Status.ClusterName, log)
if err != nil && !errors.IsNotFound(err) {
log.Errorf("Failed to get GKE Cluster: %v", err)
return err
}
if gkecluster == nil {
if err = cleanUpCrd(cr, log); err != nil {
log.Errorf("Failed to delete GKECluster CRD")
}
log.Infof("GKE cluster %s removed", cr.Status.ClusterName)
return err
}
if cr.Status.Status != "deleting" {
cr.Status.Status = "deleting"
// Don't collect logs for abnormal clusters
if gkecluster.Status != "RUNNING" {
cr.Status.Message = "cleaning cluster"
if err = action.Update(cr); err != nil {
log.Errorf("Failed to update status: %v", err)
return err
}
return nil
}
cr.Status.Message = "collecting logs"
if err := setAndUpdateFirstCleaned(cr, log); err != nil {
return err
}
log.Infof("Start clean up GKE cluster %s", cr.Status.ClusterName)
started := make(chan int)
go collectLogs(gkecluster, cr, log, started)
<- started
}
switch cr.Status.Message {
case "collecting logs":
if _, err := os.Stat(path.Join("/tmp", cr.Status.ClusterName)); os.IsNotExist(err) {
cr.Status.Message = "cleaning cluster"
err := action.Update(cr)
if err != nil {
log.Errorf("Failed to update status: %v", err)
return err
}
}
case "cleaning cluster":
isClean, err := cleanupK8s(gkecluster, log)
if err != nil {
_ = checkTimeout(cr, log)
return err
} else if !isClean { // don't proceed if cluster isn't clean
_ = checkTimeout(cr, log)
return nil
}
cr.Status.Message = "deleting cluster"
if err = action.Update(cr); err != nil {
log.Errorf("Failed to update status: %v", err)
return err
}
case "deleting cluster":
// cluster is being deleted
if gkecluster.Status == "STOPPING" {
return nil
}
if err = deleteRemoteCluster(cr, log); err != nil {
log.Errorf("Error delete gke cluster %s", cr.Status.ClusterName)
return err
}
}
return nil
}
func setAndUpdateFirstCleaned(cr *v1alpha1.GKECluster, log *logrus.Entry) error {
cr.Status.FirstCleanedAt = time.Now().Format(time.RFC1123)
log.Debug("set first-cleaned timestamp to ", cr.Status.FirstCleanedAt)
err := action.Update(cr)
if err != nil {
log.Errorf("Failed to update status: %v", err)
}
return err
}
func cleanupK8s(cluster *RemoteCluster, log *logrus.Entry) (bool, error) {
remoteClusterSdk, err := newRemoteClusterSDK(cluster)
if err != nil {
return false, err
}
cs, err := kubernetes.NewForConfig(remoteClusterSdk.kubeConfig)
if err != nil {
log.Errorf("Failed to create clientset from given kubeconfig: %v", err)
return false, err
}
isClean, err := cleaner.NewK8sCleaner(cs, log).Cleanup()
return isClean, err
}
func (h *Handler) Handle(ctx types.Context, event types.Event) error {
switch o := event.Object.(type) {
case *v1alpha1.GKECluster:
ns := o
if event.Deleted {
return nil
}
log := logrus.WithFields(logrus.Fields{
"namespace": ns.Namespace,
"name": ns.Name,
})
delTimestamp := ns.GetDeletionTimestamp()
if delTimestamp != nil {
return deleteGKECluster(ns, log)
} else {
status, err := syncGKECluster(ns, log)
if err != nil {
ns.Status.Status = "error"
ns.Status.Message = err.Error()
err = action.Update(ns)
return err
} else {
if ns.Status.Status != status.Status || ns.Status.Message != status.Message {
ns.Status = *status
err = action.Update(ns)
return err
}
}
}
}
return nil
}
func getLabels(cr *v1alpha1.GKECluster) map[string]string {
return map[string]string{}
}
type Channel struct {
Channel string `json:"channel"`
DefaultVersion string `json:"defaultVersion"`
ValidVersions []string `json:"validVersions"`
}
type ServerConfig struct {
ValidMasterVersions []string `json:"validMasterVersions"`
ValidNodeVersions []string `json:"validNodeVersions"`
Channels []Channel `json:"channels"`
}
func getExactClusterVersion(cr *v1alpha1.GKECluster, log *logrus.Entry) (string, string, error) {
cmd := exec.Command("gcloud", "container", "get-server-config",
"--format", "json",
"--zone", cr.Spec.Zone)
out, err := cmd.Output()
if err != nil {
log.Errorf("Could not get server config: %v", err)
return "", "", err
}
var config ServerConfig
err = json.Unmarshal(out, &config)
if err != nil {
log.Errorf("Could not parse cluster config: %v", err)
return "", "", err
}
if cr.Spec.ClusterVersion == "latest" {
version := "" //Store the latest available version
for _, c := range config.Channels {
if c.Channel == "RAPID" {
for i, v := range c.ValidVersions {
if i == 0 {
version = v
} else {
sliceVersion := strings.Split(version, ".")
sliceV := strings.Split(v, ".") //Compare major version at first and then minor version
if (sliceVersion[1] < sliceV[1]) || (sliceVersion[1] == sliceV[1] && sliceVersion[3] < sliceV[3]) {
version = v
}
}
}
return version, strings.ToLower(c.Channel), nil
}
}
}
for _, c := range config.Channels {
for _, v := range c.ValidVersions {
if strings.HasPrefix(v, cr.Spec.ClusterVersion) {
return v, strings.ToLower(c.Channel), nil
}
}
}
return "", "" , fmt.Errorf("Could not find a valid cluster version match for %v", cr.Spec.ClusterVersion)
}
func getRemoteCluster(name string, log *logrus.Entry) (*RemoteCluster, error) {
var out []byte
var err error
MAX_RETRY := 3
for i := 0; i < MAX_RETRY; i++ {
cmd := exec.Command("gcloud", "container", "clusters", "list",
"--filter", "name="+name, "--format", "json")
out, err = cmd.CombinedOutput()
if err == nil {
var gkeclusters []RemoteCluster
err = json.Unmarshal(out, &gkeclusters)
if err == nil {
if len(gkeclusters) == 0 {
return nil, nil
}
res := &gkeclusters[0]
if res.Status == "RUNNING" {
if err := getGkeKubeConfig(res, log); err != nil {
return nil, err
}
token, err := getAdminToken(res)
if err == nil {
res.MasterAuth.Token = token
}
}
return res, nil
} else {
log.Warningf("could not parse cluster list: %s, %v, will retry in 10s", out, err)
time.Sleep(time.Duration(int(time.Second) * 10))
}
} else {
log.Warningf("Could not list clusters, will retry in 10s")
time.Sleep(time.Duration(int(time.Second) * 10))
}
}
err = fmt.Errorf("Had tried 3 times but still get error: %v", err)
log.Error(err)
return nil, err
}
func getRemoteClusters(log *logrus.Entry) ([]RemoteCluster, error) {
var out []byte
var err error
MAX_RETRY := 3
for i := 0; i < MAX_RETRY; i++ {
cmd := exec.Command("gcloud", "container", "clusters", "list",
"--format", "json")
out, err = cmd.Output()
if err == nil {
var gkeclusters []RemoteCluster
err = json.Unmarshal(out, &gkeclusters)
if err == nil {
return gkeclusters, nil
} else {
log.Errorf("Could not parse cluster list: %v , will retry in 10s", err)
time.Sleep(time.Duration(int(time.Second) * 10))
}
} else {
log.Warningf("Could not list clusters, will retry in 10s")
time.Sleep(time.Duration(int(time.Second) * 10))
}
}
log.Errorf("Had tried 3 times but still get error: %v", err)
return nil, err
}
func GcLoop(maxAge string, interval int, log *logrus.Entry) {
log.Infof("Entering gc loop")
for ;; {
go cleanUpClusters(maxAge, log)
time.Sleep(time.Duration(int(time.Second) * interval))
}
}
func cleanUpClusters(maxAge string, log *logrus.Entry) {
log.Infof("Starting GC")
clusters, err := getOutdatedClusters(maxAge, log)
if err != nil {
log.Error("Error get outdated clusters")
return
}
log.Debug("Start clean up clusters")
for _, cluster := range clusters {
if cluster.Status == "STOPPING" {
continue
}
if _, err := cleanupK8s(&cluster, log); err != nil {
log.Errorf("Error clean up cluster: %v", err)
}
log.Infof("Deleting cluster %s", cluster.Name)
cmd := exec.Command("gcloud", "-q", "container", "clusters", "delete", cluster.Name, "--async", "--zone", cluster.Zone)
out, err := cmd.CombinedOutput()
if err != nil {
log.Errorf("Failed to delete cluster: %v", err)
log.Error(string(out))
}
}
log.Info("GC done")
}
func getOutdatedClusters(maxAge string, log *logrus.Entry) ([]RemoteCluster, error) {
cmd := exec.Command("bash", "-c", "gcloud container clusters list " +
"--filter='createTime<-P" + maxAge + " AND name:ib-*' --format json")
out, err := cmd.Output()
if err != nil {
log.Errorf("Could not list outdated clusters: %v, %v", err, out)
return nil, err
}
var gkeclusters []RemoteCluster
err = json.Unmarshal(out, &gkeclusters)
if err != nil {
log.Errorf("Could not parse cluster list: %v", err)
return nil, err
}
return gkeclusters, nil
}
func generateKubeconfig(c *RemoteCluster) []byte {
caCrt, _ := b64.StdEncoding.DecodeString(c.MasterAuth.ClusterCaCertificate)
clusters := make(map[string]*clientcmdapi.Cluster)
clusters[c.Name] = &clientcmdapi.Cluster{
Server: "https://" + c.Endpoint,
CertificateAuthorityData: caCrt,
}
contexts := make(map[string]*clientcmdapi.Context)
contexts["default-context"] = &clientcmdapi.Context{
Cluster: c.Name,
AuthInfo: "admin",
}
authinfos := make(map[string]*clientcmdapi.AuthInfo)
authinfos["admin"] = &clientcmdapi.AuthInfo{
Token: c.MasterAuth.Token,
}
clientConfig := clientcmdapi.Config{
Kind: "Config",
APIVersion: "v1",
Clusters: clusters,
Contexts: contexts,
CurrentContext: "default-context",
AuthInfos: authinfos,
}
kc, _ := clientcmd.Write(clientConfig)
return kc
}
func newSecret(cluster *v1alpha1.GKECluster, gke *RemoteCluster) *v1.Secret {
caCrt, _ := b64.StdEncoding.DecodeString(gke.MasterAuth.ClusterCaCertificate)
clientKey, _ := b64.StdEncoding.DecodeString(gke.MasterAuth.ClientKey)
clientCrt, _ := b64.StdEncoding.DecodeString(gke.MasterAuth.ClientCertificate)
secretName := cluster.ObjectMeta.Labels["service.infrabox.net/secret-name"]
return &v1.Secret{
TypeMeta: metav1.TypeMeta{
Kind: "Secret",
APIVersion: "v1",
},
ObjectMeta: metav1.ObjectMeta{
Name: secretName,
Namespace: cluster.Namespace,
OwnerReferences: []metav1.OwnerReference{
*metav1.NewControllerRef(cluster, schema.GroupVersionKind{
Group: v1alpha1.SchemeGroupVersion.Group,
Version: v1alpha1.SchemeGroupVersion.Version,
Kind: "Cluster",
}),
},
},
Type: "Opaque",
Data: map[string][]byte{
"ca.crt": []byte(caCrt),
"client.key": []byte(clientKey),
"client.crt": []byte(clientCrt),
"username": []byte(gke.MasterAuth.Username),
"password": []byte(gke.MasterAuth.Password),
"endpoint": []byte("https://" + gke.Endpoint),
"token": []byte(gke.MasterAuth.Token),
"kubeconfig": generateKubeconfig(gke),
},
}
}
func doCollectorRequest(cluster *RemoteCluster, log *logrus.Entry, endpoint string) (*[]byte, error) {
caCrt, _ := b64.StdEncoding.DecodeString(cluster.MasterAuth.ClusterCaCertificate)
caCertPool := x509.NewCertPool()
caCertPool.AppendCertsFromPEM(caCrt)
tlsConfig := &tls.Config{
RootCAs: caCertPool,
}
tlsConfig.BuildNameToCertificate()
transport := &http.Transport{TLSClientConfig: tlsConfig}
client := &http.Client{Transport: transport}
req, err := http.NewRequest("GET", "https://"+cluster.Endpoint+"/api/v1/namespaces/infrabox-collector/services/infrabox-collector-api:80/proxy"+endpoint, nil)
if err != nil {
log.Errorf("Failed to create new request: %v", err)
return nil, err
}
req.Header.Add("Authorization", "Bearer " + cluster.MasterAuth.Token)
resp, err := client.Do(req)
if err != nil {
log.Errorf("Failed to GET remote pod list: %v", err)
return nil, err
}
bodyText, err := ioutil.ReadAll(resp.Body)
if err != nil {
log.Errorf("Failed to read response body: %v", err)
return nil, err
}
if resp.StatusCode != 200 {
return &bodyText, goerrors.New(string(bodyText))
}
return &bodyText, nil
}
func uploadToArchive(cr *v1alpha1.GKECluster, log *logrus.Entry, data *[]byte, filename string) error {
annotations := cr.GetAnnotations()
root_url, _ := annotations["infrabox.net/root-url"]
job_token, _ := annotations["infrabox.net/job-token"]
body := new(bytes.Buffer)
writer := multipart.NewWriter(body)
part, err := writer.CreateFormFile(filename, filename)
if err != nil {
log.Warningf("Failed to create form file: %v", err)
return err
}
part.Write(*data)
err = writer.Close()
if err != nil {
log.Warningf("Failed to clise writer: %v", err)
return err
}
req, err := http.NewRequest("POST", root_url+"/api/job/archive", body)
if err != nil {
log.Warningf("Failed to create request: %v", err)
return err
}
req.Header.Set("Content-Type", writer.FormDataContentType())
req.Header.Set("Authorization", "token "+job_token)
tr := &http.Transport{
TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
}
client := &http.Client{Transport: tr}
response, err := client.Do(req)
if err != nil {
log.Warningf("Failed to execute request: %v", err)
return err
}
bodyText, err := ioutil.ReadAll(response.Body)
if response.StatusCode != 200 {
return goerrors.New(string(bodyText))
}
return nil
}
type CollectedPod struct {
NamespaceID string `json:"namespace_id"`
PodID string `json:"pod_id"`
Pod string `json:"pod_name"`
Containers []string `json:"containers"`
Namespace string `json:"namespace_name"`
}
func retrieveLogs(cr *v1alpha1.GKECluster, cluster *RemoteCluster, log *logrus.Entry, logPath string, done chan error) {
log.Infof("Collecting data from GKE cluster %s", cluster.Name)
defer close(done)
annotations := cr.GetAnnotations()
_, ok := annotations["infrabox.net/root-url"]
if !ok {
log.Warning("infrabox.net/root-url not set, not retrieving logs")
return
}
_, ok = annotations["infrabox.net/job-id"]
if !ok {
log.Warning("infrabox.net/job-id not set, not retrieving logs")
return
}
_, ok = annotations["infrabox.net/job-token"]
if !ok {
log.Warning("infrabox.net/job-token not set, not retrieving logs")
return
}
var pods []CollectedPod
data, err := doCollectorRequest(cluster, log, "/api/pods")
if err != nil {
log.Errorf("Failed to get collected pod list: %v", err)
return
}
err = json.Unmarshal(*data, &pods)
if err != nil {
log.Errorf("Failed to collected pod list: %v", err)
return
}
for _, pod := range pods {
for _, container := range pod.Containers {
log.Debug("Collecting logs for pod: ", pod.PodID)
data, err := doCollectorRequest(cluster, log, "/api/pods/"+pod.PodID+"/log/"+container)
if err != nil {
log.Warningf("Failed to get collected pod logs: %v", err)
continue
}
filename := "pod_" + pod.Namespace + "_" + pod.Pod + "_" + container + ".txt"
filename = path.Join(logPath, filename)
if err := ioutil.WriteFile(filename, *data, os.ModePerm); err != nil {
log.Debugf("Failed to write pod logs: %v", err)
continue
}
}
}
archivePath := path.Join(logPath, "pods_log.zip")
err = archiver.Archive([]string{logPath}, archivePath)
if err != nil {
log.Debugf("Failed to archive log: %v", err)
return
}
archiveData, err := ioutil.ReadFile(archivePath)
if err != nil {
log.Debugf("Failed to archive log: %v", err)
return
}
err = uploadToArchive(cr, log, &archiveData, archivePath)
if err != nil {
log.Warningf("Failed to upload log to archive: %v", err)
}
}
func injectCollector(cluster *RemoteCluster, log *logrus.Entry) error {
client, err := newRemoteClusterSDK(cluster)
if err != nil {
log.Errorf("Failed to create remote cluster client: %v", err)
return err
}
err = client.Create(newCollectorNamespace(), log)
if err != nil && !errors.IsAlreadyExists(err) {
log.Errorf("Failed to create collector deployment: %v", err)
return err
}
err = client.Create(newCollectorCRB(), log)
if err != nil && !errors.IsAlreadyExists(err) {
log.Errorf("Failed to create collector crb: %v", err)
return err
}
err = client.Create(newCollectorDeployment(), log)
if err != nil && !errors.IsAlreadyExists(err) {
log.Errorf("Failed to create collector deployment: %v", err)
return err
}
err = client.Create(newCollectorService(), log)
if err != nil && !errors.IsAlreadyExists(err) {
log.Errorf("Failed to create collector service: %v", err)
return err
}
err = client.Create(newFluentbitConfigMap(), log)
if err != nil && !errors.IsAlreadyExists(err) {
log.Errorf("Failed to create collector fluentbit config map: %v", err)
return err
}
err = client.Create(newCollectorDaemonSet(), log)
if err != nil && !errors.IsAlreadyExists(err) {
log.Errorf("Failed to create collector daemon set: %v", err)
return err
}
return nil
}
type RemoteClusterSDK struct {
kubeConfig *rest.Config
cluster *RemoteCluster
clientPool dynamic.ClientPool
restMapper *discovery.DeferredDiscoveryRESTMapper
}
func (r *RemoteClusterSDK) Create(object types.Object, log *logrus.Entry) (err error) {
_, namespace, err := k8sutil.GetNameAndNamespace(object)
if err != nil {
log.Errorf("Failed to get namespace: %v", err)
return err
}
gvk := object.GetObjectKind().GroupVersionKind()
apiVersion, kind := gvk.ToAPIVersionAndKind()
resourceClient, _, err := r.getRemoteResourceClient(apiVersion, kind, namespace)
if err != nil {
return fmt.Errorf("failed to get resource client: %v", err)
}
unstructObj := k8sutil.UnstructuredFromRuntimeObject(object)
unstructObj, err = resourceClient.Create(unstructObj)
if err != nil {
log.Errorf("Failed to create object: %v", err)
return err
}
// Update the arg object with the result
err = k8sutil.UnstructuredIntoRuntimeObject(unstructObj, object)
if err != nil {
return fmt.Errorf("failed to unmarshal the retrieved data: %v", err)
}
return nil
}
func newRemoteClusterSDK(cluster *RemoteCluster) (*RemoteClusterSDK, error) {
kubeConfigPath := "/tmp/kubeconfig-" + cluster.Name
kubeConfig, err := clientcmd.BuildConfigFromFlags("", kubeConfigPath)
if err != nil {
return nil, err
}
if len(cluster.MasterAuth.Token) > 0 {
kubeConfig.BearerToken = cluster.MasterAuth.Token
}
kubeClient := kubernetes.NewForConfigOrDie(kubeConfig)
cachedDiscoveryClient := cached.NewMemCacheClient(kubeClient.Discovery())
restMapper := discovery.NewDeferredDiscoveryRESTMapper(cachedDiscoveryClient, meta.InterfacesForUnstructured)
restMapper.Reset()
kubeConfig.ContentConfig = dynamic.ContentConfig()
clientPool := dynamic.NewClientPool(kubeConfig, restMapper, dynamic.LegacyAPIPathResolverFunc)
return &RemoteClusterSDK{
kubeConfig: kubeConfig,
clientPool: clientPool,
cluster: cluster,
restMapper: restMapper,
}, nil
}
func apiResource(gvk schema.GroupVersionKind, restMapper *discovery.DeferredDiscoveryRESTMapper) (*metav1.APIResource, error) {
mapping, err := restMapper.RESTMapping(gvk.GroupKind(), gvk.Version)
if err != nil {
return nil, fmt.Errorf("failed to get the resource REST mapping for GroupVersionKind(%s): %v", gvk.String(), err)
}
resource := &metav1.APIResource{
Name: mapping.Resource,
Namespaced: mapping.Scope == meta.RESTScopeNamespace,
Kind: gvk.Kind,
}
return resource, nil
}
func (r *RemoteClusterSDK) getRemoteResourceClient(apiVersion, kind, namespace string) (dynamic.ResourceInterface, string, error) {
gv, err := schema.ParseGroupVersion(apiVersion)
if err != nil {
return nil, "", fmt.Errorf("failed to parse apiVersion: %v", err)
}
gvk := schema.GroupVersionKind{
Group: gv.Group,
Version: gv.Version,
Kind: kind,
}
client, err := r.clientPool.ClientForGroupVersionKind(gvk)
if err != nil {
return nil, "", fmt.Errorf("failed to get client for GroupVersionKind(%s): %v", gvk.String(), err)
}
resource, err := apiResource(gvk, r.restMapper)
if err != nil {
return nil, "", fmt.Errorf("failed to get resource type: %v", err)
}
pluralName := resource.Name
resourceClient := client.Resource(resource, namespace)
return resourceClient, pluralName, nil
}
func newCollectorNamespace() *v1.Namespace {
return &v1.Namespace{
TypeMeta: metav1.TypeMeta{
Kind: "Namespace",
APIVersion: "v1",
},
ObjectMeta: metav1.ObjectMeta{
Name: "infrabox-collector",
},
}
}
func newCollectorCRB() *rbacv1.ClusterRoleBinding {
return &rbacv1.ClusterRoleBinding{
TypeMeta: metav1.TypeMeta{
Kind: "ClusterRoleBinding",
APIVersion: "rbac.authorization.k8s.io/v1",
},
ObjectMeta: metav1.ObjectMeta{
Name: "infrabox-collector-crb",
Namespace: "infrabox-collector",
},
Subjects: []rbacv1.Subject{{
Kind: "ServiceAccount",
Name: "default",
Namespace: "infrabox-collector",
}},
RoleRef: rbacv1.RoleRef{
Kind: "ClusterRole",
Name: "cluster-admin",
APIGroup: "rbac.authorization.k8s.io",
},
}
}
func newCollectorService() *v1.Service {
return &v1.Service{
TypeMeta: metav1.TypeMeta{
Kind: "Service",
APIVersion: "v1",
},
ObjectMeta: metav1.ObjectMeta{
Name: "infrabox-collector-api",
Namespace: "infrabox-collector",
},
Spec: v1.ServiceSpec{
Ports: []v1.ServicePort{{
Name: "http",
Port: 80,
TargetPort: intstr.FromInt(8080),
}},
Selector: map[string]string{
"app": "api.collector.infrabox.net",
},
},
}
}
func newCollectorDeployment() *appsv1.Deployment {
var replicas int32 = 1
collectorImage := os.Getenv("COLLECTOR_IMAGE")
if collectorImage == "" {
collectorImage = "quay.io/infrabox/collector-api"
}
return &appsv1.Deployment{
TypeMeta: metav1.TypeMeta{
Kind: "Deployment",
APIVersion: "apps/v1",
},
ObjectMeta: metav1.ObjectMeta{
Name: "infrabox-collector-api",
Namespace: "infrabox-collector",
},
Spec: appsv1.DeploymentSpec{
Replicas: &replicas,
Selector: &metav1.LabelSelector{
MatchLabels: map[string]string{
"app": "api.collector.infrabox.net",
},
},
Template: v1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{
"app": "api.collector.infrabox.net",
},
},
Spec: v1.PodSpec{
Containers: []v1.Container{{
Name: "api",
Image: collectorImage,
}},
},
},
},
}
}
func newFluentbitConfigMap() *v1.ConfigMap {
return &v1.ConfigMap{
TypeMeta: metav1.TypeMeta{
Kind: "ConfigMap",
APIVersion: "v1",
},
ObjectMeta: metav1.ObjectMeta{
Name: "infrabox-fluent-bit",
Namespace: "infrabox-collector",
},
Data: map[string]string{
"parsers.conf": `
[PARSER]
Name docker_utf8
Format json
Time_Key time
Time_Format %Y-%m-%dT%H:%M:%S.%L
Time_Keep On
Decode_Field_as escaped_utf8 log do_next
Decode_Field_as escaped log
`,
"fluent-bit.conf": `
[SERVICE]
Flush 2
Daemon Off
Log_Level info
Parsers_File parsers.conf
[INPUT]
Name tail
Path /var/log/containers/*.log
Parser docker_utf8
Tag kube.*
Refresh_Interval 2
Mem_Buf_Limit 50MB
Skip_Long_Lines On
[FILTER]
Name kubernetes
Match kube.*
Kube_URL https://kubernetes.default.svc.cluster.local:443
Kube_CA_File /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
Kube_Token_File /var/run/secrets/kubernetes.io/serviceaccount/token
[OUTPUT]
Name http
Match *
Host infrabox-collector-api.infrabox-collector
Port 80
URI /api/log
Format json
`,
},
}
}
func newCollectorDaemonSet() *appsv1.DaemonSet {
return &appsv1.DaemonSet{
TypeMeta: metav1.TypeMeta{
Kind: "DaemonSet",
APIVersion: "apps/v1",
},
ObjectMeta: metav1.ObjectMeta{
Name: "infrabox-collector-fluent-bit",
Namespace: "infrabox-collector",
},
Spec: appsv1.DaemonSetSpec{
Selector: &metav1.LabelSelector{
MatchLabels: map[string]string{
"app": "fluentbit.collector.infrabox.net",
},
},
Template: v1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{
"app": "fluentbit.collector.infrabox.net",
},
},
Spec: v1.PodSpec{
Containers: []v1.Container{{
Name: "fluent-bit",
Image: "fluent/fluent-bit:0.13",
Resources: v1.ResourceRequirements{
Limits: v1.ResourceList{
"memory": resource.MustParse("100Mi"),
},
Requests: v1.ResourceList{
"cpu": resource.MustParse("100m"),
"memory": resource.MustParse("100Mi"),
},
},
VolumeMounts: []v1.VolumeMount{{
Name: "varlog",
MountPath: "/var/log",
}, {
Name: "varlibdockercontainers",
MountPath: "/var/lib/docker/containers",
ReadOnly: true,
}, {
Name: "config",
MountPath: "/fluent-bit/etc/parsers.conf",
SubPath: "parsers.conf",
}, {
Name: "config",
MountPath: "/fluent-bit/etc/fluent-bit.conf",
SubPath: "fluent-bit.conf",
}},
}},
Volumes: []v1.Volume{{
Name: "varlibdockercontainers",
VolumeSource: v1.VolumeSource{
HostPath: &v1.HostPathVolumeSource{
Path: "/var/lib/docker/containers",
},
},
}, {
Name: "varlog",
VolumeSource: v1.VolumeSource{
HostPath: &v1.HostPathVolumeSource{
Path: "/var/log",
},
},
}, {
Name: "config",
VolumeSource: v1.VolumeSource{
ConfigMap: &v1.ConfigMapVolumeSource{
LocalObjectReference: v1.LocalObjectReference{
Name: "infrabox-fluent-bit",
},
},
},
}},
},
},
},
}
}
|
[
"\"MAX_NUM_CLUSTERS\"",
"\"COLLECTOR_IMAGE\""
] |
[] |
[
"COLLECTOR_IMAGE",
"MAX_NUM_CLUSTERS"
] |
[]
|
["COLLECTOR_IMAGE", "MAX_NUM_CLUSTERS"]
|
go
| 2 | 0 | |
examples/myserver-server.py
|
#!/bin/env python
import os
import psutil
import socket
import ssl
from SocketServer import ThreadingMixIn
from jrpc import SimpleTCPServer
from jrpc import SimpleTCPRequestHandler
from jrpc.functions import execute
from jrpc.functions import parseProcesses
from jrpc.functions import parseNetstat
from jrpc.functions import parseIfcfg
from jrpc.functions import parseIO
from jrpc.Logger import Logger
from jrpc.utils import deamonize
__author__ = 'umairghani'
LOG = Logger("/var/log/myserver", "myserver").get_logger()
class MyServerRequestHandler(SimpleTCPRequestHandler):
"""
Override Logging
"""
def log_request(self, code="-", size="-"):
message = "%s - %s %s %s" % \
(self.client_address[0],
self.requestline, str(code), str(size))
if code == 200:
LOG.info(message)
else:
LOG.error(message)
def log_error(self, format, *args):
message = "%s - %s" % \
(self.client_address[0],
format%args)
LOG.error(message)
class MyServer(ThreadingMixIn,SimpleTCPServer):
def __init__(self, bindaddr):
SimpleTCPServer.__init__(self, bindaddr,
MyServerRequestHandler)
self.register_function(ping)
self.register_function(mount)
self.register_function(iostat)
self.register_function(uptime)
self.register_function(get_env)
self.register_function(netstat)
self.register_function(cpuinfo)
self.register_function(meminfo)
self.register_function(ifconfig)
self.register_function(disk_usage)
self.register_function(check_process)
self.register_function(running_process)
def ping():
"""
:return: response to ping
"""
return "pong"
def uptime():
"""
:return uptime of the system
:return {"output": output, "error": error}
"""
LOG.info("Checking uptime")
output = ""
try:
output, error = execute("uptime")
except Exception, e:
LOG.exception(e)
error = str(e)
return {"output": output, "error": error}
def mount():
"""
:return mount information of the system
:return {"output": <array>, "error": error}
"""
LOG.info("Getting mount")
output = []
try:
mounts = psutil.disk_partitions()
for mount in mounts:
output.append({
"device": mount.device,
"mountpoint": mount.mountpoint,
"fstype": mount.fstype,
"opts": mount.opts
})
return {"output": output, "error": ""}
except Exception, e:
LOG.exception(e)
return {"output": output, "error": str(e)}
def iostat():
"""
:return iostat information of the system
:return {"output": <array>, "error": error}
"""
LOG.info("Getting iostat")
try:
iostat = psutil.disk_io_counters(perdisk=True)
output = parseIO(iostat)
return {"output": output, "error": ""}
except Exception, e:
LOG.exception(e)
return {"output": [], "error": str(e)}
def ifconfig():
"""
:return ifconfig information of the system
:return {"output": <array>, "error": error}
"""
LOG.info("Getting ifconfig info")
try:
ifcfg = psutil.network_io_counters(pernic=True)
output = parseIfcfg(ifcfg)
return {"output": output, "error": ""}
except Exception, e:
LOG.exception(e)
return {"output": [], "error": str(e)}
def cpuinfo():
"""
:return cpu information of the system
:return {"output": <dict>, "error": error}
"""
LOG.info("Getting cpu info")
try:
percent_per_cpu = psutil.cpu_percent(percpu=True)
avg_cpu_percent = psutil.cpu_percent()
no_of_cpu = psutil.NUM_CPUS
cpu_time = psutil.cpu_times()
output = {
"no_of_cpu": no_of_cpu,
"cpu_percent": avg_cpu_percent,
"percent_per_cpu": percent_per_cpu,
"cputimes": {
"user": cpu_time.user,
"nice": cpu_time.nice,
"system": cpu_time.system,
"idle": cpu_time.idle,
"iowait": cpu_time.iowait,
"irq": cpu_time.irq,
"softirq": cpu_time.softirq
}
}
return {"output": output, "error": ""}
except Exception, e:
LOG.exception(e)
return {"output": {}, "error": str(e)}
def meminfo():
"""
:return memory information of the system
:return {"output": <dict>, "error": error}
"""
LOG.info("Getting memory info")
try:
ram = psutil.virtual_memory()
swap = psutil.swap_memory()
output = {
"memory": {
"total": ram.total,
"available": ram.available,
"percent": ram.percent,
"used": ram.used,
"free": ram.free,
"active": ram.active,
"inactive": ram.inactive,
"buffers": ram.buffers,
"cached": ram.cached
},
"swap": {
"total": swap.total,
"used": swap.used,
"free": swap.free,
"percent": swap.percent,
"sin": swap.sin,
"sout": swap.sout
}
}
return {"output": output, "error": ""}
except Exception, e:
LOG.exception(e)
return {"output": {}, "error": str(e)}
def disk_usage():
"""
:return disk usage of the system
:return {"output": <array>, "error": error}
"""
LOG.info("Getting disk usage of the system")
output = []
try:
disks = psutil.disk_partitions()
for disk in disks:
mountpoint = disk.mountpoint
usage = psutil.disk_usage(mountpoint)
output.append({
mountpoint: {
"total": usage.total,
"used": usage.used,
"free": usage.free,
"percent": usage.percent,
},
})
return {"output": output, "error": ""}
except Exception ,e:
LOG.exception(e)
return {"output": output, "error": str(e)}
def netstat():
"""
:return netstat output of the system
:return {"output": <array>, "error": error}
"""
LOG.info("Getting netstat/ss output")
## TO DO
#cmd = "netstat -antp"
try:
cmd = "ss -t -u -a"
out, err = execute(cmd)
if out:
stats = out.strip().split("\n")
output = parseNetstat(stats)
return {"output": output, "error": err}
else:
LOG.error("[%s] command failed" %cmd)
return {"output":[], "error": err}
except Exception, e:
return {"output": [], "error": str(e)}
def running_process():
"""
Get all the process running on the system
:return: array of process
:return {"output": <array>, "error": error}
"""
LOG.info("Getting all the process running")
try:
cmd = "ps -eo args"
output, error = execute(cmd)
plist = output.strip().split("\n")
return {"output": plist, "error": error}
except Exception, e:
return {"output": [], "error": str(e)}
def check_process(process):
"""
:param process
:return usage of the process on the system
:return {"output": <array>, "error": error}
"""
LOG.info("Getting [%s] process information from ps" % process)
try:
cmd = "ps -eo user,pid,pcpu,pmem,lstart,args | grep -i %s | grep -v grep" % \
process
out, err = execute(cmd)
if out:
plist = out.strip().split("\n")
output = parseProcesses(plist)
return {"output": output, "error": err}
else:
LOG.error("[%s] process not found" %process)
return {"output":[], "error": err}
except Exception, e:
LOG.exception(e)
return {"output": [], "error": str(e)}
def get_env():
"""
Return the systems environment variable
:return dict of env variables
:return {"output": <dict>, "error": error}
"""
LOG.info("Getting environment variables")
return {"output": str(os.environ), "error": ""}
def main():
"""
Main
"""
deamonize("myserver-server")
HOST, PORT = "localhost", 9999
LOG.info("** Starting myserver server on https://%s:%d **" % (HOST, PORT))
server = MyServer((HOST, PORT))
# For SSL - uncomment the following line
#server.socket = ssl.wrap_socket(server.socket, keyfile='<path_to_keyfile>', certfile='<path_to_cert>', server_side=True)
server.serve_forever()
if __name__ == "__main__":
main()
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
Python-3.5.5/Lib/_osx_support.py
|
"""Shared OS X support functions."""
import os
import re
import sys
__all__ = [
'compiler_fixup',
'customize_config_vars',
'customize_compiler',
'get_platform_osx',
]
# configuration variables that may contain universal build flags,
# like "-arch" or "-isdkroot", that may need customization for
# the user environment
_UNIVERSAL_CONFIG_VARS = ('CFLAGS', 'LDFLAGS', 'CPPFLAGS', 'BASECFLAGS',
'BLDSHARED', 'LDSHARED', 'CC', 'CXX',
'PY_CFLAGS', 'PY_LDFLAGS', 'PY_CPPFLAGS',
'PY_CORE_CFLAGS')
# configuration variables that may contain compiler calls
_COMPILER_CONFIG_VARS = ('BLDSHARED', 'LDSHARED', 'CC', 'CXX')
# prefix added to original configuration variable names
_INITPRE = '_OSX_SUPPORT_INITIAL_'
def _find_executable(executable, path=None):
"""Tries to find 'executable' in the directories listed in 'path'.
A string listing directories separated by 'os.pathsep'; defaults to
os.environ['PATH']. Returns the complete filename or None if not found.
"""
if path is None:
path = os.environ['PATH']
paths = path.split(os.pathsep)
base, ext = os.path.splitext(executable)
if (sys.platform == 'win32') and (ext != '.exe'):
executable = executable + '.exe'
if not os.path.isfile(executable):
for p in paths:
f = os.path.join(p, executable)
if os.path.isfile(f):
# the file exists, we have a shot at spawn working
return f
return None
else:
return executable
def _read_output(commandstring):
"""Output from successful command execution or None"""
# Similar to os.popen(commandstring, "r").read(),
# but without actually using os.popen because that
# function is not usable during python bootstrap.
# tempfile is also not available then.
import contextlib
try:
import tempfile
fp = tempfile.NamedTemporaryFile()
except ImportError:
fp = open("/tmp/_osx_support.%s"%(
os.getpid(),), "w+b")
with contextlib.closing(fp) as fp:
cmd = "%s 2>/dev/null >'%s'" % (commandstring, fp.name)
return fp.read().decode('utf-8').strip() if not os.system(cmd) else None
def _find_build_tool(toolname):
"""Find a build tool on current path or using xcrun"""
return (_find_executable(toolname)
or _read_output("/usr/bin/xcrun -find %s" % (toolname,))
or ''
)
_SYSTEM_VERSION = None
def _get_system_version():
"""Return the OS X system version as a string"""
# Reading this plist is a documented way to get the system
# version (see the documentation for the Gestalt Manager)
# We avoid using platform.mac_ver to avoid possible bootstrap issues during
# the build of Python itself (distutils is used to build standard library
# extensions).
global _SYSTEM_VERSION
if _SYSTEM_VERSION is None:
_SYSTEM_VERSION = ''
try:
f = open('/System/Library/CoreServices/SystemVersion.plist')
except OSError:
# We're on a plain darwin box, fall back to the default
# behaviour.
pass
else:
try:
m = re.search(r'<key>ProductUserVisibleVersion</key>\s*'
r'<string>(.*?)</string>', f.read())
finally:
f.close()
if m is not None:
_SYSTEM_VERSION = '.'.join(m.group(1).split('.')[:2])
# else: fall back to the default behaviour
return _SYSTEM_VERSION
def _remove_original_values(_config_vars):
"""Remove original unmodified values for testing"""
# This is needed for higher-level cross-platform tests of get_platform.
for k in list(_config_vars):
if k.startswith(_INITPRE):
del _config_vars[k]
def _save_modified_value(_config_vars, cv, newvalue):
"""Save modified and original unmodified value of configuration var"""
oldvalue = _config_vars.get(cv, '')
if (oldvalue != newvalue) and (_INITPRE + cv not in _config_vars):
_config_vars[_INITPRE + cv] = oldvalue
_config_vars[cv] = newvalue
def _supports_universal_builds():
"""Returns True if universal builds are supported on this system"""
# As an approximation, we assume that if we are running on 10.4 or above,
# then we are running with an Xcode environment that supports universal
# builds, in particular -isysroot and -arch arguments to the compiler. This
# is in support of allowing 10.4 universal builds to run on 10.3.x systems.
osx_version = _get_system_version()
if osx_version:
try:
osx_version = tuple(int(i) for i in osx_version.split('.'))
except ValueError:
osx_version = ''
return bool(osx_version >= (10, 4)) if osx_version else False
def _find_appropriate_compiler(_config_vars):
"""Find appropriate C compiler for extension module builds"""
# Issue #13590:
# The OSX location for the compiler varies between OSX
# (or rather Xcode) releases. With older releases (up-to 10.5)
# the compiler is in /usr/bin, with newer releases the compiler
# can only be found inside Xcode.app if the "Command Line Tools"
# are not installed.
#
# Furthermore, the compiler that can be used varies between
# Xcode releases. Up to Xcode 4 it was possible to use 'gcc-4.2'
# as the compiler, after that 'clang' should be used because
# gcc-4.2 is either not present, or a copy of 'llvm-gcc' that
# miscompiles Python.
# skip checks if the compiler was overridden with a CC env variable
if 'CC' in os.environ:
return _config_vars
# The CC config var might contain additional arguments.
# Ignore them while searching.
cc = oldcc = _config_vars['CC'].split()[0]
if not _find_executable(cc):
# Compiler is not found on the shell search PATH.
# Now search for clang, first on PATH (if the Command LIne
# Tools have been installed in / or if the user has provided
# another location via CC). If not found, try using xcrun
# to find an uninstalled clang (within a selected Xcode).
# NOTE: Cannot use subprocess here because of bootstrap
# issues when building Python itself (and os.popen is
# implemented on top of subprocess and is therefore not
# usable as well)
cc = _find_build_tool('clang')
elif os.path.basename(cc).startswith('gcc'):
# Compiler is GCC, check if it is LLVM-GCC
data = _read_output("'%s' --version"
% (cc.replace("'", "'\"'\"'"),))
if data and 'llvm-gcc' in data:
# Found LLVM-GCC, fall back to clang
cc = _find_build_tool('clang')
if not cc:
raise SystemError(
"Cannot locate working compiler")
if cc != oldcc:
# Found a replacement compiler.
# Modify config vars using new compiler, if not already explicitly
# overridden by an env variable, preserving additional arguments.
for cv in _COMPILER_CONFIG_VARS:
if cv in _config_vars and cv not in os.environ:
cv_split = _config_vars[cv].split()
cv_split[0] = cc if cv != 'CXX' else cc + '++'
_save_modified_value(_config_vars, cv, ' '.join(cv_split))
return _config_vars
def _remove_universal_flags(_config_vars):
"""Remove all universal build arguments from config vars"""
for cv in _UNIVERSAL_CONFIG_VARS:
# Do not alter a config var explicitly overridden by env var
if cv in _config_vars and cv not in os.environ:
flags = _config_vars[cv]
flags = re.sub(r'-arch\s+\w+\s', ' ', flags, flags=re.ASCII)
flags = re.sub('-isysroot [^ \t]*', ' ', flags)
_save_modified_value(_config_vars, cv, flags)
return _config_vars
def _remove_unsupported_archs(_config_vars):
"""Remove any unsupported archs from config vars"""
# Different Xcode releases support different sets for '-arch'
# flags. In particular, Xcode 4.x no longer supports the
# PPC architectures.
#
# This code automatically removes '-arch ppc' and '-arch ppc64'
# when these are not supported. That makes it possible to
# build extensions on OSX 10.7 and later with the prebuilt
# 32-bit installer on the python.org website.
# skip checks if the compiler was overridden with a CC env variable
if 'CC' in os.environ:
return _config_vars
if re.search('-arch\s+ppc', _config_vars['CFLAGS']) is not None:
# NOTE: Cannot use subprocess here because of bootstrap
# issues when building Python itself
status = os.system(
"""echo 'int main{};' | """
"""'%s' -c -arch ppc -x c -o /dev/null /dev/null 2>/dev/null"""
%(_config_vars['CC'].replace("'", "'\"'\"'"),))
if status:
# The compile failed for some reason. Because of differences
# across Xcode and compiler versions, there is no reliable way
# to be sure why it failed. Assume here it was due to lack of
# PPC support and remove the related '-arch' flags from each
# config variables not explicitly overridden by an environment
# variable. If the error was for some other reason, we hope the
# failure will show up again when trying to compile an extension
# module.
for cv in _UNIVERSAL_CONFIG_VARS:
if cv in _config_vars and cv not in os.environ:
flags = _config_vars[cv]
flags = re.sub('-arch\s+ppc\w*\s', ' ', flags)
_save_modified_value(_config_vars, cv, flags)
return _config_vars
def _override_all_archs(_config_vars):
"""Allow override of all archs with ARCHFLAGS env var"""
# NOTE: This name was introduced by Apple in OSX 10.5 and
# is used by several scripting languages distributed with
# that OS release.
if 'ARCHFLAGS' in os.environ:
arch = os.environ['ARCHFLAGS']
for cv in _UNIVERSAL_CONFIG_VARS:
if cv in _config_vars and '-arch' in _config_vars[cv]:
flags = _config_vars[cv]
flags = re.sub('-arch\s+\w+\s', ' ', flags)
flags = flags + ' ' + arch
_save_modified_value(_config_vars, cv, flags)
return _config_vars
def _check_for_unavailable_sdk(_config_vars):
"""Remove references to any SDKs not available"""
# If we're on OSX 10.5 or later and the user tries to
# compile an extension using an SDK that is not present
# on the current machine it is better to not use an SDK
# than to fail. This is particularly important with
# the standalone Command Line Tools alternative to a
# full-blown Xcode install since the CLT packages do not
# provide SDKs. If the SDK is not present, it is assumed
# that the header files and dev libs have been installed
# to /usr and /System/Library by either a standalone CLT
# package or the CLT component within Xcode.
cflags = _config_vars.get('CFLAGS', '')
m = re.search(r'-isysroot\s+(\S+)', cflags)
if m is not None:
sdk = m.group(1)
if not os.path.exists(sdk):
for cv in _UNIVERSAL_CONFIG_VARS:
# Do not alter a config var explicitly overridden by env var
if cv in _config_vars and cv not in os.environ:
flags = _config_vars[cv]
flags = re.sub(r'-isysroot\s+\S+(?:\s|$)', ' ', flags)
_save_modified_value(_config_vars, cv, flags)
return _config_vars
def compiler_fixup(compiler_so, cc_args):
"""
This function will strip '-isysroot PATH' and '-arch ARCH' from the
compile flags if the user has specified one them in extra_compile_flags.
This is needed because '-arch ARCH' adds another architecture to the
build, without a way to remove an architecture. Furthermore GCC will
barf if multiple '-isysroot' arguments are present.
"""
stripArch = stripSysroot = False
compiler_so = list(compiler_so)
if not _supports_universal_builds():
# OSX before 10.4.0, these don't support -arch and -isysroot at
# all.
stripArch = stripSysroot = True
else:
stripArch = '-arch' in cc_args
stripSysroot = '-isysroot' in cc_args
if stripArch or 'ARCHFLAGS' in os.environ:
while True:
try:
index = compiler_so.index('-arch')
# Strip this argument and the next one:
del compiler_so[index:index+2]
except ValueError:
break
if 'ARCHFLAGS' in os.environ and not stripArch:
# User specified different -arch flags in the environ,
# see also distutils.sysconfig
compiler_so = compiler_so + os.environ['ARCHFLAGS'].split()
if stripSysroot:
while True:
try:
index = compiler_so.index('-isysroot')
# Strip this argument and the next one:
del compiler_so[index:index+2]
except ValueError:
break
# Check if the SDK that is used during compilation actually exists,
# the universal build requires the usage of a universal SDK and not all
# users have that installed by default.
sysroot = None
if '-isysroot' in cc_args:
idx = cc_args.index('-isysroot')
sysroot = cc_args[idx+1]
elif '-isysroot' in compiler_so:
idx = compiler_so.index('-isysroot')
sysroot = compiler_so[idx+1]
if sysroot and not os.path.isdir(sysroot):
from distutils import log
log.warn("Compiling with an SDK that doesn't seem to exist: %s",
sysroot)
log.warn("Please check your Xcode installation")
return compiler_so
def customize_config_vars(_config_vars):
"""Customize Python build configuration variables.
Called internally from sysconfig with a mutable mapping
containing name/value pairs parsed from the configured
makefile used to build this interpreter. Returns
the mapping updated as needed to reflect the environment
in which the interpreter is running; in the case of
a Python from a binary installer, the installed
environment may be very different from the build
environment, i.e. different OS levels, different
built tools, different available CPU architectures.
This customization is performed whenever
distutils.sysconfig.get_config_vars() is first
called. It may be used in environments where no
compilers are present, i.e. when installing pure
Python dists. Customization of compiler paths
and detection of unavailable archs is deferred
until the first extension module build is
requested (in distutils.sysconfig.customize_compiler).
Currently called from distutils.sysconfig
"""
if not _supports_universal_builds():
# On Mac OS X before 10.4, check if -arch and -isysroot
# are in CFLAGS or LDFLAGS and remove them if they are.
# This is needed when building extensions on a 10.3 system
# using a universal build of python.
_remove_universal_flags(_config_vars)
# Allow user to override all archs with ARCHFLAGS env var
_override_all_archs(_config_vars)
# Remove references to sdks that are not found
_check_for_unavailable_sdk(_config_vars)
return _config_vars
def customize_compiler(_config_vars):
"""Customize compiler path and configuration variables.
This customization is performed when the first
extension module build is requested
in distutils.sysconfig.customize_compiler).
"""
# Find a compiler to use for extension module builds
_find_appropriate_compiler(_config_vars)
# Remove ppc arch flags if not supported here
_remove_unsupported_archs(_config_vars)
# Allow user to override all archs with ARCHFLAGS env var
_override_all_archs(_config_vars)
return _config_vars
def get_platform_osx(_config_vars, osname, release, machine):
"""Filter values for get_platform()"""
# called from get_platform() in sysconfig and distutils.util
#
# For our purposes, we'll assume that the system version from
# distutils' perspective is what MACOSX_DEPLOYMENT_TARGET is set
# to. This makes the compatibility story a bit more sane because the
# machine is going to compile and link as if it were
# MACOSX_DEPLOYMENT_TARGET.
macver = _config_vars.get('MACOSX_DEPLOYMENT_TARGET', '')
macrelease = _get_system_version() or macver
macver = macver or macrelease
if macver:
release = macver
osname = "macosx"
# Use the original CFLAGS value, if available, so that we
# return the same machine type for the platform string.
# Otherwise, distutils may consider this a cross-compiling
# case and disallow installs.
cflags = _config_vars.get(_INITPRE+'CFLAGS',
_config_vars.get('CFLAGS', ''))
if macrelease:
try:
macrelease = tuple(int(i) for i in macrelease.split('.')[0:2])
except ValueError:
macrelease = (10, 0)
else:
# assume no universal support
macrelease = (10, 0)
if (macrelease >= (10, 4)) and '-arch' in cflags.strip():
# The universal build will build fat binaries, but not on
# systems before 10.4
machine = 'fat'
archs = re.findall('-arch\s+(\S+)', cflags)
archs = tuple(sorted(set(archs)))
if len(archs) == 1:
machine = archs[0]
elif archs == ('i386', 'ppc'):
machine = 'fat'
elif archs == ('i386', 'x86_64'):
machine = 'intel'
elif archs == ('i386', 'ppc', 'x86_64'):
machine = 'fat3'
elif archs == ('ppc64', 'x86_64'):
machine = 'fat64'
elif archs == ('i386', 'ppc', 'ppc64', 'x86_64'):
machine = 'universal'
else:
raise ValueError(
"Don't know machine value for archs=%r" % (archs,))
elif machine == 'i386':
# On OSX the machine type returned by uname is always the
# 32-bit variant, even if the executable architecture is
# the 64-bit variant
if sys.maxsize >= 2**32:
machine = 'x86_64'
elif machine in ('PowerPC', 'Power_Macintosh'):
# Pick a sane name for the PPC architecture.
# See 'i386' case
if sys.maxsize >= 2**32:
machine = 'ppc64'
else:
machine = 'ppc'
return (osname, release, machine)
|
[] |
[] |
[
"PATH",
"ARCHFLAGS"
] |
[]
|
["PATH", "ARCHFLAGS"]
|
python
| 2 | 0 | |
policies/conftest.py
|
import logging
import os
import uuid
import pandas as pd
import pytest
import yaml
# from output import build_tag
from pybatfish.client.session import Session
from pybatfish.datamodel import AddressGroup, ReferenceBook
BF_NETWORK = os.environ["BF_NETWORK"]
BF_SNAPSHOT = os.environ["BF_SNAPSHOT"]
BF_INIT_SNAPSHOT = os.environ.get("BF_INIT_SNAPSHOT", "yes")
BF_SNAPSHOT_DIR = '{}/'.format(os.environ.get("BF_SNAPSHOT_DIR", "."))
BF_DASHBOARD = os.environ.get("BF_DASHBOARD", "http://localhost:3000/dashboard")
NETWORK_FIXTURES = ['demonet']
ADDRESS_GROUPS_FILE = os.path.join(os.path.dirname(os.path.realpath(__file__)), "address-groups.yml")
####################
# Set pandas options
####################
pd.set_option('display.max_rows', None)
pd.set_option('display.max_columns', None)
pd.set_option('display.width', 1000)
#######################
# Set pybatfish options
#######################
logging.getLogger('pybatfish').setLevel(logging.WARN)
class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
RESET = '\033[39;49m'
def pytest_addoption(parser):
parser.addoption("--min-severity", action="store", default=0, type=int,
help="Minimal FindIssues severity to care about")
@pytest.fixture(scope="session")
def bf():
try:
bf = Session.get('bfe')
os.environ["SESSION_TYPE"] = 'bfe'
except:
bf = Session.get('bf')
os.environ["SESSION_TYPE"] = 'bf'
session_type = os.environ.get('SESSION_TYPE')
bf.enable_diagnostics = False
bf.set_network(BF_NETWORK)
if BF_INIT_SNAPSHOT == "yes":
bf.init_snapshot(BF_SNAPSHOT_DIR, name=BF_SNAPSHOT, overwrite=True)
else:
bf.set_snapshot(BF_SNAPSHOT)
if session_type == 'bfe':
bf.get_node_roles()
add_address_groups(bf)
return bf
@pytest.fixture
def min_severity(request):
return request.config.getoption("--min-severity")
def pytest_report_header(config):
return [
bcolors.BOLD + bcolors.OKBLUE + "Running Intentionet CI tests" + bcolors.RESET]
def pytest_terminal_summary(terminalreporter, exitstatus, config):
if exitstatus != 0 and BF_DASHBOARD is not None:
url = "{BF_DASHBOARD}/{BF_NETWORK}/{BF_SNAPSHOT}/policies".format(
BF_DASHBOARD=BF_DASHBOARD, BF_NETWORK=BF_NETWORK, BF_SNAPSHOT=BF_SNAPSHOT)
terminalreporter.write_line(
"\n\n"
+ bcolors.BOLD + bcolors.FAIL
+ "There have been failures, explore more using Intentionet Dashboard at {}".format(
url)
+ " " # saves URL
)
def pytest_sessionstart(session):
os.environ['bf_policy_name'] = session.name
p_id = uuid.uuid4().hex
def pytest_runtest_setup(item):
# Get test file name
test_file_name = os.path.basename(item.parent.name)
test_name = item.name
os.environ['bf_policy_name'] = test_file_name
os.environ['bf_policy_id'] = p_id
os.environ['bf_test_name'] = test_name
def subdict(d, keys):
return {k: d.get(k) for k in keys}
def add_address_groups(bf):
with open(ADDRESS_GROUPS_FILE, "r") as f:
groups = yaml.safe_load(f)
address_groups = [AddressGroup(g["name"], g["addresses"]) for g in groups["metadata"]]
bf.put_reference_book(ReferenceBook(name="metadata", addressGroups=address_groups))
|
[] |
[] |
[
"BF_SNAPSHOT_DIR",
"bf_policy_name",
"SESSION_TYPE",
"BF_SNAPSHOT",
"bf_test_name",
"BF_NETWORK",
"bf_policy_id",
"BF_INIT_SNAPSHOT",
"BF_DASHBOARD"
] |
[]
|
["BF_SNAPSHOT_DIR", "bf_policy_name", "SESSION_TYPE", "BF_SNAPSHOT", "bf_test_name", "BF_NETWORK", "bf_policy_id", "BF_INIT_SNAPSHOT", "BF_DASHBOARD"]
|
python
| 9 | 0 | |
superpoint/datasets_V2/synthetic_shapes.py
|
import numpy as np
import tensorflow as tf
import cv2
import os
import tarfile
from pathlib import Path
from tqdm import tqdm
import shutil
from .base_dataset import BaseDataset
from superpoint.datasets_V2 import synthetic_dataset
from .utils import pipeline
from .utils.pipeline import parse_primitives
from superpoint.settings import DATA_PATH
def add_dummy_valid_mask(data):
with tf.name_scope('dummy_valid_mask'):
valid_mask = tf.ones(tf.shape(input=data['image'])[:2], dtype=tf.int32)
return {**data, 'valid_mask': valid_mask}
class SyntheticShapes(BaseDataset):
default_config = {
'primitives': 'all',
'truncate': {},
'validation_size': -1,
'test_size': -1,
'on-the-fly': False,
'cache_in_memory': False,
'suffix': None,
'add_augmentation_to_test_set': False,
'num_parallel_calls': 10,
'generation': {
'split_sizes': {'training': 10000, 'validation': 200, 'test': 500},
'image_size': [960, 1280],
'random_seed': 0,
'params': {
'generate_background': {
'min_kernel_size': 150, 'max_kernel_size': 500,
'min_rad_ratio': 0.02, 'max_rad_ratio': 0.031},
'draw_stripes': {'transform_params': (0.1, 0.1)},
'draw_multiple_polygons': {'kernel_boundaries': (50, 100)}
},
},
'preprocessing': {
'resize': [240, 320],
'blur_size': 11,
},
'augmentation': {
'photometric': {
'enable': False,
'primitives': 'all',
'params': {},
'random_order': True,
},
'homographic': {
'enable': False,
'params': {},
'valid_border_margin': 0,
},
}
}
drawing_primitives = [
'draw_lines',
'draw_polygon',
'draw_multiple_polygons',
'draw_ellipses',
'draw_star',
'draw_checkerboard',
'draw_stripes',
'draw_cube',
'gaussian_noise'
]
drawing_primitives = [ # TODO remove reduction
'draw_lines'
]
def dump_primitive_data(self, primitive, tar_path, config):
temp_dir = Path(os.environ['TMPDIR'], primitive)
tf.compat.v1.logging.info('Generating tarfile for primitive {}.'.format(primitive))
synthetic_dataset.set_random_state(np.random.RandomState(
config['generation']['random_seed']))
for split, size in self.config['generation']['split_sizes'].items():
im_dir, pts_dir = [Path(temp_dir, i, split) for i in ['images', 'points']]
im_dir.mkdir(parents=True, exist_ok=True)
pts_dir.mkdir(parents=True, exist_ok=True)
for i in tqdm(range(size), desc=split, leave=False):
image = synthetic_dataset.generate_background(
config['generation']['image_size'],
**config['generation']['params']['generate_background'])
points = np.array(getattr(synthetic_dataset, primitive)(
image, **config['generation']['params'].get(primitive, {})))
points = np.flip(points, 1) # reverse convention with opencv
b = config['preprocessing']['blur_size']
image = cv2.GaussianBlur(image, (b, b), 0)
points = (points * np.array(config['preprocessing']['resize'], np.float)
/ np.array(config['generation']['image_size'], np.float))
image = cv2.resize(image, tuple(config['preprocessing']['resize'][::-1]),
interpolation=cv2.INTER_LINEAR)
cv2.imwrite(str(Path(im_dir, '{}.png'.format(i))), image)
np.save(Path(pts_dir, '{}.npy'.format(i)), points)
# Pack into a tar file
tar = tarfile.open(tar_path, mode='w:gz')
tar.add(temp_dir, arcname=primitive)
tar.close()
shutil.rmtree(temp_dir)
tf.compat.v1.logging.info('Tarfile dumped to {}.'.format(tar_path))
def _init_dataset(self, **config):
# Parse drawing primitives
primitives = parse_primitives(config['primitives'], self.drawing_primitives)
tf.data.Dataset.map_parallel = lambda self, fn: self.map(
fn, num_parallel_calls=config['num_parallel_calls'])
if config['on-the-fly']:
return None
basepath = Path(
DATA_PATH, 'synthetic_shapes' +
('_{}'.format(config['suffix']) if config['suffix'] is not None else ''))
basepath.mkdir(parents=True, exist_ok=True)
splits = {s: {'images': [], 'points': []}
for s in ['training', 'validation', 'test']}
for primitive in primitives:
tar_path = Path(basepath, '{}.tar.gz'.format(primitive))
if not tar_path.exists():
self.dump_primitive_data(primitive, tar_path, config)
# Untar locally
tf.compat.v1.logging.info('Extracting archive for primitive {}.'.format(primitive))
tar = tarfile.open(tar_path)
# temp_dir = Path(os.environ['TMPDIR'])
temp_dir = "/tmp/"
tar.extractall(path=temp_dir)
tar.close()
# Gather filenames in all splits, optionally truncate
truncate = config['truncate'].get(primitive, 1)
path = Path(temp_dir, primitive)
for s in splits:
e = [str(p) for p in Path(path, 'images', s).iterdir()]
f = [p.replace('images', 'points') for p in e]
f = [p.replace('.png', '.npy') for p in f]
splits[s]['images'].extend(e[:int(truncate*len(e))])
splits[s]['points'].extend(f[:int(truncate*len(f))])
# Shuffle
for s in splits:
perm = np.random.RandomState(0).permutation(len(splits[s]['images']))
for obj in ['images', 'points']:
splits[s][obj] = np.array(splits[s][obj])[perm].tolist()
# dataset = tf.data.Dataset.from_tensor_slices((images, labels))
return splits
def _get_data(self, filenames, split_name, **config):
def _gen_shape():
primitives = parse_primitives(config['primitives'], self.drawing_primitives)
while True:
primitive = np.random.choice(primitives)
image = synthetic_dataset.generate_background(
config['generation']['image_size'],
**config['generation']['params']['generate_background'])
points = np.array(getattr(synthetic_dataset, primitive)(
image, **config['generation']['params'].get(primitive, {})))
yield (np.expand_dims(image, axis=-1).astype(np.float32),
np.flip(points.astype(np.float32), 1))
def _read_image(filename):
image = tf.io.read_file(filename)
image = tf.image.decode_png(image, channels=1)
return tf.cast(image, tf.float32)
# Python function
def _read_points(filename):
points = np.load(filename.decode('utf-8')).astype(np.float32)
# force a shape of points so it can be batched
if(tf.shape(points)[0]<= 20):
paddings = [[0, 20-np.shape(points)[0]], [0, 0]]
points = np.pad(points, paddings, mode='constant', constant_values=np.NAN) # pad to a cosntnat depth so
else:
points = points[:20]
return points
if config['on-the-fly']:
data = tf.data.Dataset.from_generator(
_gen_shape, (tf.float32, tf.float32),
(tf.TensorShape(config['generation']['image_size']+[1]),
tf.TensorShape([None, 2])))
data = data.map(lambda i, c: pipeline.downsample(
i, c, **config['preprocessing']))
else:
# Initialize dataset with file names
data = tf.data.Dataset.from_tensor_slices(
(filenames[split_name]['images'], filenames[split_name]['points']))
# Read image and point coordinates
# tf.py_func is deprecated in TF V2.
data = data.map(
lambda image, points:
(_read_image(image), tf.numpy_function(_read_points, [points], tf.float32)))
data = data.map(lambda image, points: (image, tf.reshape(points, [-1, 2])))
if split_name == 'validation':
data = data.take(config['validation_size'])
elif split_name == 'test':
data = data.take(config['test_size'])
data = data.map(lambda image, kp: {'image': image, 'keypoints': kp})
data = data.map(pipeline.add_dummy_valid_mask)
#data = data.map(add_dummy_valid_mask)
# if config['cache_in_memory'] and not config['on-the-fly']:
# tf.compat.v1.logging.info('Caching data, fist access will take some time.')
# data = data.cache()
# Apply augmentation
# if split_name == 'training' or config['add_augmentation_to_test_set']:
# if config['augmentation']['photometric']['enable']:
# data = data.map_parallel(lambda d: pipeline.photometric_augmentation(
# d, **config['augmentation']['photometric']))
# if config['augmentation']['homographic']['enable']:
# data = data.map_parallel(lambda d: pipeline.homographic_augmentation(
# d, **config['augmentation']['homographic']))
# Convert the point coordinates to a dense keypoint map
data = data.map_parallel(pipeline.add_keypoint_map)
data = data.map_parallel(lambda d: {**d, 'image': tf.cast(d['image'], dtype=tf.float32) / 255.})
return data
|
[] |
[] |
[
"TMPDIR"
] |
[]
|
["TMPDIR"]
|
python
| 1 | 0 | |
vendor/github.com/elastic/beats/metricbeat/module/docker/vendor/github.com/docker/docker/pkg/archive/example_changes.go
|
// +build ignore
// Simple tool to create an archive stream from an old and new directory
//
// By default it will stream the comparison of two temporary directories with junk files
package main
import (
"flag"
"fmt"
"io"
"io/ioutil"
"os"
"path"
"github.com/Sirupsen/logrus"
"github.com/docker/docker/pkg/archive"
)
var (
flDebug = flag.Bool("D", false, "debugging output")
flNewDir = flag.String("newdir", "", "")
flOldDir = flag.String("olddir", "", "")
log = logrus.New()
)
func main() {
flag.Usage = func() {
fmt.Println("Produce a tar from comparing two directory paths. By default a demo tar is created of around 200 files (including hardlinks)")
fmt.Printf("%s [OPTIONS]\n", os.Args[0])
flag.PrintDefaults()
}
flag.Parse()
log.Out = os.Stderr
if (len(os.Getenv("DEBUG")) > 0) || *flDebug {
logrus.SetLevel(logrus.DebugLevel)
}
var newDir, oldDir string
if len(*flNewDir) == 0 {
var err error
newDir, err = ioutil.TempDir("", "docker-test-newDir")
if err != nil {
log.Fatal(err)
}
defer os.RemoveAll(newDir)
if _, err := prepareUntarSourceDirectory(100, newDir, true); err != nil {
log.Fatal(err)
}
} else {
newDir = *flNewDir
}
if len(*flOldDir) == 0 {
oldDir, err := ioutil.TempDir("", "docker-test-oldDir")
if err != nil {
log.Fatal(err)
}
defer os.RemoveAll(oldDir)
} else {
oldDir = *flOldDir
}
changes, err := archive.ChangesDirs(newDir, oldDir)
if err != nil {
log.Fatal(err)
}
a, err := archive.ExportChanges(newDir, changes)
if err != nil {
log.Fatal(err)
}
defer a.Close()
i, err := io.Copy(os.Stdout, a)
if err != nil && err != io.EOF {
log.Fatal(err)
}
fmt.Fprintf(os.Stderr, "wrote archive of %d bytes", i)
}
func prepareUntarSourceDirectory(numberOfFiles int, targetPath string, makeLinks bool) (int, error) {
fileData := []byte("fooo")
for n := 0; n < numberOfFiles; n++ {
fileName := fmt.Sprintf("file-%d", n)
if err := ioutil.WriteFile(path.Join(targetPath, fileName), fileData, 0700); err != nil {
return 0, err
}
if makeLinks {
if err := os.Link(path.Join(targetPath, fileName), path.Join(targetPath, fileName+"-link")); err != nil {
return 0, err
}
}
}
totalSize := numberOfFiles * len(fileData)
return totalSize, nil
}
|
[
"\"DEBUG\""
] |
[] |
[
"DEBUG"
] |
[]
|
["DEBUG"]
|
go
| 1 | 0 | |
dev-tools/smoke_test_rc.py
|
# Licensed to Elasticsearch under one or more contributor
# license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright
# ownership. Elasticsearch licenses this file to you under
# the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on
# an 'AS IS' BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
# Smoke-tests a release candidate
#
# 1. Downloads the tar.gz, deb, RPM and zip file from the staging URL
# 2. Verifies it's sha1 hashes and GPG signatures against the release key
# 3. Installs all official plugins
# 4. Starts one node for tar.gz and zip packages and checks:
# -- if it runs with Java 1.8
# -- if the build hash given is the one that is returned by the status response
# -- if the build is a release version and not a snapshot version
# -- if all plugins are loaded
# -- if the status response returns the correct version
#
# USAGE:
#
# python3 -B ./dev-tools/smoke_test_rc.py --version 2.0.0-beta1 --hash bfa3e47
#
# to also test other plugins try run
#
# python3 -B ./dev-tools/smoke_test_rc.py --version 2.0.0-beta1 --hash bfa3e47 --plugins license,shield,watcher
#
# Note: Ensure the script is run from the elasticsearch top level directory
#
# For testing a release from sonatype try this:
#
# python3 -B dev-tools/smoke_test_rc.py --version 2.0.0-beta1 --hash bfa3e47 --fetch_url https://oss.sonatype.org/content/repositories/releases/
#
import argparse
import tempfile
import os
from os.path import basename, dirname, isdir, join
import signal
import shutil
import urllib
import urllib.request
import hashlib
import time
import socket
import json
import base64
from urllib.parse import urlparse
from http.client import HTTPConnection
def find_official_plugins():
plugins_dir = join(dirname(dirname(__file__)), 'plugins')
plugins = []
for plugin in os.listdir(plugins_dir):
if isdir(join(plugins_dir, plugin)):
plugins.append(plugin)
return plugins
DEFAULT_PLUGINS = find_official_plugins()
try:
JAVA_HOME = os.environ['JAVA_HOME']
except KeyError:
raise RuntimeError("""
Please set JAVA_HOME in the env before running release tool
On OSX use: export JAVA_HOME=`/usr/libexec/java_home -v '1.8*'`""")
# console colors
COLOR_OK = '\033[92m'
COLOR_END = '\033[0m'
def run(command, env_vars=None):
if env_vars:
for key, value in env_vars.items():
os.putenv(key, value)
print('*** Running: %s%s%s' % (COLOR_OK, command, COLOR_END))
if os.system(command):
raise RuntimeError(' FAILED: %s' % (command))
def java_exe():
path = JAVA_HOME
return 'export JAVA_HOME="%s" PATH="%s/bin:$PATH" JAVACMD="%s/bin/java"' % (path, path, path)
def verify_java_version(version):
s = os.popen('%s; java -version 2>&1' % java_exe()).read()
if ' version "%s.' % version not in s:
raise RuntimeError('got wrong version for java %s:\n%s' % (version, s))
def sha1(file):
with open(file, 'rb') as f:
return hashlib.sha1(f.read()).hexdigest()
def read_fully(file):
with open(file, encoding='utf-8') as f:
return f.read()
def wait_for_node_startup(es_dir, timeout=60, header={}):
print(' Waiting until node becomes available for at most %s seconds' % timeout)
for _ in range(timeout):
conn = None
try:
time.sleep(1)
host = get_host_from_ports_file(es_dir)
conn = HTTPConnection(host, timeout=1)
conn.request('GET', '/', headers=header)
res = conn.getresponse()
if res.status == 200:
return True
except IOError as e:
pass
#that is ok it might not be there yet
finally:
if conn:
conn.close()
return False
def download_and_verify(version, hash, files, base_url, plugins=DEFAULT_PLUGINS):
print('Downloading and verifying release %s from %s' % (version, base_url))
tmp_dir = tempfile.mkdtemp()
try:
downloaded_files = []
print(' ' + '*' * 80)
# here we create a temp gpg home where we download the release key as the only key into
# when we verify the signature it will fail if the signed key is not in the keystore and that
# way we keep the executing host unmodified since we don't have to import the key into the default keystore
gpg_home_dir = os.path.join(tmp_dir, "gpg_home_dir")
os.makedirs(gpg_home_dir, 0o700)
run('gpg --homedir %s --keyserver pool.sks-keyservers.net --recv-key D88E42B4' % gpg_home_dir)
for file in files:
name = os.path.basename(file)
print(' Smoketest file: %s' % name)
url = '%s/%s' % (base_url, file)
print(' Downloading %s' % (url))
artifact_path = os.path.join(tmp_dir, file)
downloaded_files.append(artifact_path)
current_artifact_dir = os.path.dirname(artifact_path)
urllib.request.urlretrieve(url, os.path.join(tmp_dir, file))
sha1_url = ''.join([url, '.sha1'])
checksum_file = artifact_path + ".sha1"
print(' Downloading %s' % (sha1_url))
urllib.request.urlretrieve(sha1_url, checksum_file)
print(' Verifying checksum %s' % (checksum_file))
expected = read_fully(checksum_file)
actual = sha1(artifact_path)
if expected != actual :
raise RuntimeError('sha1 hash for %s doesn\'t match %s != %s' % (name, expected, actual))
gpg_url = ''.join([url, '.asc'])
gpg_file = artifact_path + ".asc"
print(' Downloading %s' % (gpg_url))
urllib.request.urlretrieve(gpg_url, gpg_file)
print(' Verifying gpg signature %s' % (gpg_file))
run('cd %s && gpg --homedir %s --verify %s' % (current_artifact_dir, gpg_home_dir, os.path.basename(gpg_file)))
print(' ' + '*' * 80)
print()
smoke_test_release(version, downloaded_files, hash, plugins)
print(' SUCCESS')
finally:
shutil.rmtree(tmp_dir)
def get_host_from_ports_file(es_dir):
return read_fully(os.path.join(es_dir, 'logs/http.ports')).splitlines()[0]
def smoke_test_release(release, files, hash, plugins):
for release_file in files:
if not os.path.isfile(release_file):
raise RuntimeError('Smoketest failed missing file %s' % (release_file))
tmp_dir = tempfile.mkdtemp()
if release_file.endswith('tar.gz'):
run('tar -xzf %s -C %s' % (release_file, tmp_dir))
elif release_file.endswith('zip'):
run('unzip %s -d %s' % (release_file, tmp_dir))
else:
print(' Skip SmokeTest for [%s]' % release_file)
continue # nothing to do here
es_dir = os.path.join(tmp_dir, 'elasticsearch-%s' % (release))
es_run_path = os.path.join(es_dir, 'bin/elasticsearch')
print(' Smoke testing package [%s]' % release_file)
es_plugin_path = os.path.join(es_dir, 'bin/elasticsearch-plugin')
plugin_names = {}
for plugin in plugins:
print(' Install plugin [%s]' % (plugin))
run('%s; export ES_JAVA_OPTS="-Des.plugins.staging=%s"; %s %s %s' % (java_exe(), hash, es_plugin_path, 'install -b', plugin))
plugin_names[plugin] = True
if 'x-pack' in plugin_names:
headers = { 'Authorization' : 'Basic %s' % base64.b64encode(b"es_admin:foobar").decode("UTF-8") }
es_shield_path = os.path.join(es_dir, 'bin/x-pack/users')
print(" Install dummy shield user")
run('%s; %s useradd es_admin -r superuser -p foobar' % (java_exe(), es_shield_path))
else:
headers = {}
print(' Starting elasticsearch deamon from [%s]' % es_dir)
try:
run('%s; %s -Enode.name=smoke_tester -Ecluster.name=prepare_release -Erepositories.url.allowed_urls=http://snapshot.test* %s -Epidfile=%s -Enode.portsfile=true'
% (java_exe(), es_run_path, '-d', os.path.join(es_dir, 'es-smoke.pid')))
if not wait_for_node_startup(es_dir, header=headers):
print("elasticsearch logs:")
print('*' * 80)
logs = read_fully(os.path.join(es_dir, 'logs/prepare_release.log'))
print(logs)
print('*' * 80)
raise RuntimeError('server didn\'t start up')
try: # we now get / and /_nodes to fetch basic infos like hashes etc and the installed plugins
host = get_host_from_ports_file(es_dir)
conn = HTTPConnection(host, timeout=20)
conn.request('GET', '/', headers=headers)
res = conn.getresponse()
if res.status == 200:
version = json.loads(res.read().decode("utf-8"))['version']
if release != version['number']:
raise RuntimeError('Expected version [%s] but was [%s]' % (release, version['number']))
if version['build_snapshot']:
raise RuntimeError('Expected non snapshot version')
print(' Verify if plugins are listed in _nodes')
conn.request('GET', '/_nodes/plugins?pretty=true', headers=headers)
res = conn.getresponse()
if res.status == 200:
nodes = json.loads(res.read().decode("utf-8"))['nodes']
for _, node in nodes.items():
node_plugins = node['plugins']
for node_plugin in node_plugins:
if not plugin_names.get(node_plugin['name'].strip(), False):
raise RuntimeError('Unexpected plugin %s' % node_plugin['name'])
del plugin_names[node_plugin['name']]
if plugin_names:
raise RuntimeError('Plugins not loaded %s' % list(plugin_names.keys()))
else:
raise RuntimeError('Expected HTTP 200 but got %s' % res.status)
else:
raise RuntimeError('Expected HTTP 200 but got %s' % res.status)
finally:
conn.close()
finally:
pid_path = os.path.join(es_dir, 'es-smoke.pid')
if os.path.exists(pid_path): # try reading the pid and kill the node
pid = int(read_fully(pid_path))
os.kill(pid, signal.SIGKILL)
shutil.rmtree(tmp_dir)
print(' ' + '*' * 80)
print()
def parse_list(string):
return [x.strip() for x in string.split(',')]
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='SmokeTests a Release Candidate from S3 staging repo')
parser.add_argument('--version', '-v', dest='version', default=None,
help='The Elasticsearch Version to smoke-tests', required=True)
parser.add_argument('--hash', '-s', dest='hash', default=None, required=True,
help='The hash of the unified release')
parser.add_argument('--plugins', '-p', dest='plugins', default=[], required=False, type=parse_list,
help='A list of additional plugins to smoketest')
parser.add_argument('--fetch_url', '-u', dest='url', default=None,
help='Fetched from the specified URL')
parser.set_defaults(hash=None)
parser.set_defaults(plugins=[])
parser.set_defaults(version=None)
parser.set_defaults(url=None)
args = parser.parse_args()
plugins = args.plugins
version = args.version
hash = args.hash
url = args.url
files = [ x % {'version': version} for x in [
'elasticsearch-%(version)s.tar.gz',
'elasticsearch-%(version)s.zip',
'elasticsearch-%(version)s.deb',
'elasticsearch-%(version)s.rpm'
]]
verify_java_version('1.8')
if url:
download_url = url
else:
download_url = 'https://staging.elastic.co/%s-%s/downloads/elasticsearch' % (version, hash)
download_and_verify(version, hash, files, download_url, plugins=DEFAULT_PLUGINS + plugins)
|
[] |
[] |
[
"JAVA_HOME"
] |
[]
|
["JAVA_HOME"]
|
python
| 1 | 0 | |
tools/test_dota_base_q.py
|
# -*- coding:utf-8 -*-
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
import os
import sys
import tensorflow as tf
import cv2
import numpy as np
import math
from tqdm import tqdm
import argparse
from multiprocessing import Queue, Process
from utils import tools
from libs.label_name_dict.label_dict import LabelMap
from libs.utils.draw_box_in_img import DrawBox
from libs.utils.coordinate_convert import forward_convert, backward_convert
from libs.utils import nms_rotate
from libs.utils.rotate_polygon_nms import rotate_gpu_nms
from dataloader.pretrained_weights.pretrain_zoo import PretrainModelZoo
def parse_args():
parser = argparse.ArgumentParser('Start testing.')
parser.add_argument('--test_dir', dest='test_dir',
help='evaluate imgs dir ',
default='/data/dataset/DOTA/test/images/', type=str)
parser.add_argument('--gpus', dest='gpus',
help='gpu id',
default='0,1,2,3,4,5,6,7', type=str)
parser.add_argument('--show_box', '-s', default=False,
action='store_true')
parser.add_argument('--multi_scale', '-ms', default=False,
action='store_true')
parser.add_argument('--flip_img', '-f', default=False,
action='store_true')
parser.add_argument('--num_imgs', dest='num_imgs',
help='test image number',
default=np.inf, type=int)
parser.add_argument('--h_len', dest='h_len',
help='image height',
default=600, type=int)
parser.add_argument('--w_len', dest='w_len',
help='image width',
default=600, type=int)
parser.add_argument('--h_overlap', dest='h_overlap',
help='height overlap',
default=150, type=int)
parser.add_argument('--w_overlap', dest='w_overlap',
help='width overlap',
default=150, type=int)
args = parser.parse_args()
return args
class TestDOTA(object):
def __init__(self, cfgs):
self.cfgs = cfgs
self.args = parse_args()
label_map = LabelMap(cfgs)
self.name_label_map, self.label_name_map = label_map.name2label(), label_map.label2name()
def worker(self, gpu_id, images, det_net, result_queue):
os.environ["CUDA_VISIBLE_DEVICES"] = str(gpu_id)
img_plac = tf.placeholder(dtype=tf.uint8, shape=[None, None, 3]) # is RGB. not BGR
img_batch = tf.cast(img_plac, tf.float32)
pretrain_zoo = PretrainModelZoo()
if self.cfgs.NET_NAME in pretrain_zoo.pth_zoo or self.cfgs.NET_NAME in pretrain_zoo.mxnet_zoo:
img_batch = (img_batch / 255 - tf.constant(self.cfgs.PIXEL_MEAN_)) / tf.constant(self.cfgs.PIXEL_STD)
else:
img_batch = img_batch - tf.constant(self.cfgs.PIXEL_MEAN)
img_batch = tf.expand_dims(img_batch, axis=0)
detection_boxes, detection_scores, detection_category = det_net.build_whole_detection_network(
input_img_batch=img_batch)
init_op = tf.group(
tf.global_variables_initializer(),
tf.local_variables_initializer()
)
restorer, restore_ckpt = det_net.get_restorer()
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
with tf.Session(config=config) as sess:
sess.run(init_op)
if not restorer is None:
restorer.restore(sess, restore_ckpt)
print('restore model %d ...' % gpu_id)
for img_path in images:
# if 'P0016' not in img_path:
# continue
img = cv2.imread(img_path)
box_res_rotate = []
label_res_rotate = []
score_res_rotate = []
imgH = img.shape[0]
imgW = img.shape[1]
img_short_side_len_list = self.cfgs.IMG_SHORT_SIDE_LEN if isinstance(self.cfgs.IMG_SHORT_SIDE_LEN, list) else [
self.cfgs.IMG_SHORT_SIDE_LEN]
img_short_side_len_list = [img_short_side_len_list[0]] if not self.args.multi_scale else img_short_side_len_list
if imgH < self.args.h_len:
temp = np.zeros([self.args.h_len, imgW, 3], np.float32)
temp[0:imgH, :, :] = img
img = temp
imgH = self.args.h_len
if imgW < self.args.w_len:
temp = np.zeros([imgH, self.args.w_len, 3], np.float32)
temp[:, 0:imgW, :] = img
img = temp
imgW = self.args.w_len
for hh in range(0, imgH, self.args.h_len - self.args.h_overlap):
if imgH - hh - 1 < self.args.h_len:
hh_ = imgH - self.args.h_len
else:
hh_ = hh
for ww in range(0, imgW, self.args.w_len - self.args.w_overlap):
if imgW - ww - 1 < self.args.w_len:
ww_ = imgW - self.args.w_len
else:
ww_ = ww
src_img = img[hh_:(hh_ + self.args.h_len), ww_:(ww_ + self.args.w_len), :]
for short_size in img_short_side_len_list:
max_len = self.cfgs.IMG_MAX_LENGTH
if self.args.h_len < self.args.w_len:
new_h, new_w = short_size, min(int(short_size * float(self.args.w_len) / self.args.h_len), max_len)
else:
new_h, new_w = min(int(short_size * float(self.args.h_len) / self.args.w_len), max_len), short_size
img_resize = cv2.resize(src_img, (new_w, new_h))
resized_img, det_boxes_r_, det_scores_r_, det_category_r_ = \
sess.run(
[img_batch, detection_boxes, detection_scores, detection_category],
feed_dict={img_plac: img_resize[:, :, ::-1]}
)
resized_h, resized_w = resized_img.shape[1], resized_img.shape[2]
src_h, src_w = src_img.shape[0], src_img.shape[1]
if len(det_boxes_r_) > 0:
# det_boxes_r_ = forward_convert(det_boxes_r_, False)
det_boxes_r_[:, 0::2] *= (src_w / resized_w)
det_boxes_r_[:, 1::2] *= (src_h / resized_h)
for ii in range(len(det_boxes_r_)):
box_rotate = det_boxes_r_[ii]
box_rotate[0::2] = box_rotate[0::2] + ww_
box_rotate[1::2] = box_rotate[1::2] + hh_
box_res_rotate.append(box_rotate)
label_res_rotate.append(det_category_r_[ii])
score_res_rotate.append(det_scores_r_[ii])
if self.args.flip_img:
det_boxes_r_flip, det_scores_r_flip, det_category_r_flip = \
sess.run(
[detection_boxes, detection_scores, detection_category],
feed_dict={img_plac: cv2.flip(img_resize, flipCode=1)[:, :, ::-1]}
)
if len(det_boxes_r_flip) > 0:
det_boxes_r_flip = forward_convert(det_boxes_r_flip, False)
det_boxes_r_flip[:, 0::2] *= (src_w / resized_w)
det_boxes_r_flip[:, 1::2] *= (src_h / resized_h)
for ii in range(len(det_boxes_r_flip)):
box_rotate = det_boxes_r_flip[ii]
box_rotate[0::2] = (src_w - box_rotate[0::2]) + ww_
box_rotate[1::2] = box_rotate[1::2] + hh_
box_res_rotate.append(box_rotate)
label_res_rotate.append(det_category_r_flip[ii])
score_res_rotate.append(det_scores_r_flip[ii])
det_boxes_r_flip, det_scores_r_flip, det_category_r_flip = \
sess.run(
[detection_boxes, detection_scores, detection_category],
feed_dict={img_plac: cv2.flip(img_resize, flipCode=0)[:, :, ::-1]}
)
if len(det_boxes_r_flip) > 0:
det_boxes_r_flip = forward_convert(det_boxes_r_flip, False)
det_boxes_r_flip[:, 0::2] *= (src_w / resized_w)
det_boxes_r_flip[:, 1::2] *= (src_h / resized_h)
for ii in range(len(det_boxes_r_flip)):
box_rotate = det_boxes_r_flip[ii]
box_rotate[0::2] = box_rotate[0::2] + ww_
box_rotate[1::2] = (src_h - box_rotate[1::2]) + hh_
box_res_rotate.append(box_rotate)
label_res_rotate.append(det_category_r_flip[ii])
score_res_rotate.append(det_scores_r_flip[ii])
box_res_rotate = np.array(box_res_rotate)
label_res_rotate = np.array(label_res_rotate)
score_res_rotate = np.array(score_res_rotate)
box_res_rotate_ = []
label_res_rotate_ = []
score_res_rotate_ = []
threshold = {'roundabout': 0.1, 'tennis-court': 0.3, 'swimming-pool': 0.1, 'storage-tank': 0.2,
'soccer-ball-field': 0.3, 'small-vehicle': 0.2, 'ship': 0.2, 'plane': 0.3,
'large-vehicle': 0.1, 'helicopter': 0.2, 'harbor': 0.0001, 'ground-track-field': 0.3,
'bridge': 0.0001, 'basketball-court': 0.3, 'baseball-diamond': 0.3}
for sub_class in range(1, self.cfgs.CLASS_NUM + 1):
index = np.where(label_res_rotate == sub_class)[0]
if len(index) == 0:
continue
tmp_boxes_r = box_res_rotate[index]
tmp_label_r = label_res_rotate[index]
tmp_score_r = score_res_rotate[index]
tmp_boxes_r_ = backward_convert(tmp_boxes_r, False)
# try:
# inx = nms_rotate.nms_rotate_cpu(boxes=np.array(tmp_boxes_r_),
# scores=np.array(tmp_score_r),
# iou_threshold=threshold[self.label_name_map[sub_class]],
# max_output_size=5000)
#
# except:
tmp_boxes_r_ = np.array(tmp_boxes_r_)
tmp = np.zeros([tmp_boxes_r_.shape[0], tmp_boxes_r_.shape[1] + 1])
tmp[:, 0:-1] = tmp_boxes_r_
tmp[:, -1] = np.array(tmp_score_r)
# Note: the IoU of two same rectangles is 0, which is calculated by rotate_gpu_nms
jitter = np.zeros([tmp_boxes_r_.shape[0], tmp_boxes_r_.shape[1] + 1])
jitter[:, 0] += np.random.rand(tmp_boxes_r_.shape[0], ) / 1000
inx = rotate_gpu_nms(np.array(tmp, np.float32) + np.array(jitter, np.float32),
float(threshold[self.label_name_map[sub_class]]), 0)
box_res_rotate_.extend(np.array(tmp_boxes_r)[inx])
score_res_rotate_.extend(np.array(tmp_score_r)[inx])
label_res_rotate_.extend(np.array(tmp_label_r)[inx])
result_dict = {'boxes': np.array(box_res_rotate_), 'scores': np.array(score_res_rotate_),
'labels': np.array(label_res_rotate_), 'image_id': img_path}
result_queue.put_nowait(result_dict)
def test_dota(self, det_net, real_test_img_list, txt_name):
save_path = os.path.join('/data2/pd/sdc/shipdet/v1/works_dir/rodet/test_dota', self.cfgs.VERSION)
nr_records = len(real_test_img_list)
pbar = tqdm(total=nr_records)
gpu_num = len(self.args.gpus.strip().split(','))
nr_image = math.ceil(nr_records / gpu_num)
result_queue = Queue(500)
procs = []
for i, gpu_id in enumerate(self.args.gpus.strip().split(',')):
start = i * nr_image
end = min(start + nr_image, nr_records)
split_records = real_test_img_list[start:end]
proc = Process(target=self.worker, args=(int(gpu_id), split_records, det_net, result_queue))
print('process:%d, start:%d, end:%d' % (i, start, end))
proc.start()
procs.append(proc)
for i in range(nr_records):
res = result_queue.get()
if self.args.show_box:
nake_name = res['image_id'].split('/')[-1]
tools.makedirs(os.path.join(save_path, 'dota_img_vis'))
draw_path = os.path.join(save_path, 'dota_img_vis', nake_name)
draw_img = np.array(cv2.imread(res['image_id']), np.float32)
# detected_boxes = backward_convert(res['boxes'], with_label=False)
detected_indices = res['scores'] >= self.cfgs.VIS_SCORE
detected_scores = res['scores'][detected_indices]
detected_boxes = res['boxes'][detected_indices]
detected_categories = res['labels'][detected_indices]
drawer = DrawBox(self.cfgs)
final_detections = drawer.draw_boxes_with_label_and_scores(draw_img,
boxes=detected_boxes,
labels=detected_categories,
scores=detected_scores,
method=2,
is_csl=False,
in_graph=False)
cv2.imwrite(draw_path, final_detections)
else:
CLASS_DOTA = self.name_label_map.keys()
write_handle = {}
tools.makedirs(os.path.join(save_path, 'dota_res'))
for sub_class in CLASS_DOTA:
if sub_class == 'back_ground':
continue
write_handle[sub_class] = open(os.path.join(save_path, 'dota_res', 'Task1_%s.txt' % sub_class), 'a+')
for i, rbox in enumerate(res['boxes']):
command = '%s %.3f %.1f %.1f %.1f %.1f %.1f %.1f %.1f %.1f\n' % (res['image_id'].split('/')[-1].split('.')[0],
res['scores'][i],
rbox[0], rbox[1], rbox[2], rbox[3],
rbox[4], rbox[5], rbox[6], rbox[7],)
write_handle[self.label_name_map[res['labels'][i]]].write(command)
for sub_class in CLASS_DOTA:
if sub_class == 'back_ground':
continue
write_handle[sub_class].close()
fw = open(txt_name, 'a+')
fw.write('{}\n'.format(res['image_id'].split('/')[-1]))
fw.close()
pbar.set_description("Test image %s" % res['image_id'].split('/')[-1])
pbar.update(1)
for p in procs:
p.join()
def get_test_image(self):
txt_name = '{}.txt'.format(self.cfgs.VERSION)
if not self.args.show_box:
if not os.path.exists(txt_name):
fw = open(txt_name, 'w')
fw.close()
fr = open(txt_name, 'r')
img_filter = fr.readlines()
print('****************************' * 3)
print('Already tested imgs:', img_filter)
print('****************************' * 3)
fr.close()
test_imgname_list = [os.path.join(self.args.test_dir, img_name) for img_name in os.listdir(self.args.test_dir)
if img_name.endswith(('.jpg', '.png', '.jpeg', '.tif', '.tiff')) and
(img_name + '\n' not in img_filter)]
else:
test_imgname_list = [os.path.join(self.args.test_dir, img_name) for img_name in os.listdir(self.args.test_dir)
if img_name.endswith(('.jpg', '.png', '.jpeg', '.tif', '.tiff'))]
assert len(test_imgname_list) != 0, 'test_dir has no imgs there.' \
' Note that, we only support img format of (.jpg, .png, and .tiff) '
if self.args.num_imgs == np.inf:
real_test_img_list = test_imgname_list
else:
real_test_img_list = test_imgname_list[: self.args.num_imgs]
return real_test_img_list
|
[] |
[] |
[
"CUDA_VISIBLE_DEVICES"
] |
[]
|
["CUDA_VISIBLE_DEVICES"]
|
python
| 1 | 0 | |
kazoo/testing/harness.py
|
"""Kazoo testing harnesses"""
import atexit
import logging
import os
import uuid
import threading
import unittest
from kazoo.client import KazooClient
from kazoo.exceptions import NotEmptyError
from kazoo.protocol.states import (
KazooState
)
from kazoo.testing.common import ZookeeperCluster
from kazoo.protocol.connection import _SESSION_EXPIRED
log = logging.getLogger(__name__)
CLUSTER = None
def get_global_cluster():
global CLUSTER
if CLUSTER is None:
ZK_HOME = os.environ.get("ZOOKEEPER_PATH")
ZK_CLASSPATH = os.environ.get("ZOOKEEPER_CLASSPATH")
ZK_PORT_OFFSET = int(os.environ.get("ZOOKEEPER_PORT_OFFSET", 20000))
assert ZK_HOME or ZK_CLASSPATH, (
"Either ZOOKEEPER_PATH or ZOOKEEPER_CLASSPATH environment "
"variable must be defined.\n"
"For deb package installations this is /usr/share/java")
CLUSTER = ZookeeperCluster(
install_path=ZK_HOME,
classpath=ZK_CLASSPATH,
port_offset=ZK_PORT_OFFSET,
)
atexit.register(lambda cluster: cluster.terminate(), CLUSTER)
return CLUSTER
class KazooTestHarness(unittest.TestCase):
"""Harness for testing code that uses Kazoo
This object can be used directly or as a mixin. It supports starting
and stopping a complete ZooKeeper cluster locally and provides an
API for simulating errors and expiring sessions.
Example::
class MyTestCase(KazooTestHarness):
def setUp(self):
self.setup_zookeeper()
# additional test setup
def tearDown(self):
self.teardown_zookeeper()
def test_something(self):
something_that_needs_a_kazoo_client(self.client)
def test_something_else(self):
something_that_needs_zk_servers(self.servers)
"""
def __init__(self, *args, **kw):
super(KazooTestHarness, self).__init__(*args, **kw)
self.client = None
self._clients = []
@property
def cluster(self):
return get_global_cluster()
@property
def servers(self):
return ",".join([s.address for s in self.cluster])
def _get_nonchroot_client(self):
return KazooClient(self.servers)
def _get_client(self, **kwargs):
c = KazooClient(self.hosts, **kwargs)
try:
self._clients.append(c)
except AttributeError:
self._client = [c]
return c
def expire_session(self, client_id=None):
"""Force ZK to expire a client session
:param client_id: id of client to expire. If unspecified, the id of
self.client will be used.
"""
client_id = client_id or self.client.client_id
lost = threading.Event()
safe = threading.Event()
def watch_loss(state):
if state == KazooState.LOST:
lost.set()
if lost.is_set() and state == KazooState.CONNECTED:
safe.set()
return True
self.client.add_listener(watch_loss)
self.client._call(_SESSION_EXPIRED, None)
lost.wait(5)
if not lost.isSet():
raise Exception("Failed to get notified of session loss")
# Wait for the reconnect now
safe.wait(15)
if not safe.isSet():
raise Exception("Failed to see client reconnect")
self.client.retry(self.client.get_async, '/')
def setup_zookeeper(self, **client_options):
"""Create a ZK cluster and chrooted :class:`KazooClient`
The cluster will only be created on the first invocation and won't be
fully torn down until exit.
"""
if not self.cluster[0].running:
self.cluster.start()
namespace = "/kazootests" + uuid.uuid4().hex
self.hosts = self.servers + namespace
if 'timeout' not in client_options:
client_options['timeout'] = 0.8
self.client = self._get_client(**client_options)
self.client.start()
self.client.ensure_path("/")
def teardown_zookeeper(self):
"""Clean up any ZNodes created during the test
"""
if not self.cluster[0].running:
self.cluster.start()
tries = 0
if self.client and self.client.connected:
while tries < 3:
try:
self.client.retry(self.client.delete, '/', recursive=True)
break
except NotEmptyError:
pass
tries += 1
self.client.stop()
self.client.close()
del self.client
else:
client = self._get_client()
client.start()
client.retry(client.delete, '/', recursive=True)
client.stop()
client.close()
del client
for client in self._clients:
client.stop()
del client
self._clients = None
class KazooTestCase(KazooTestHarness):
def setUp(self):
self.setup_zookeeper()
def tearDown(self):
self.teardown_zookeeper()
|
[] |
[] |
[
"ZOOKEEPER_PATH",
"ZOOKEEPER_PORT_OFFSET",
"ZOOKEEPER_CLASSPATH"
] |
[]
|
["ZOOKEEPER_PATH", "ZOOKEEPER_PORT_OFFSET", "ZOOKEEPER_CLASSPATH"]
|
python
| 3 | 0 | |
scripts/missing_taskinstance.py
|
import os
import re
import unicodedata
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "systori.settings")
import django
django.setup()
from systori.apps.company.models import Company
from systori.apps.task.models import *
from systori.apps.project.models import *
c = Company.objects.get(schema=input("Company Schema:")).activate()
p = Project.objects.get(id=input("Project ID: "))
for job in p.jobs.all():
for taskgroup in job.taskgroups.all():
for task in taskgroup.tasks.all():
if task.taskinstances.all():
for taskinstance in task.taskinstances.all():
print("found taskinstances: {}".format(taskinstance))
else:
print("missing taskinstance at {}:{}\n".format(task.id, task.name))
if input("Add missing taskinstance: ") == "yes":
TaskInstance.objects.create(task=task, selected=True)
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
pkg/gits/github.go
|
package gits
import (
"context"
"fmt"
"io/ioutil"
"net/http"
"os"
"strconv"
"strings"
"time"
"github.com/jenkins-x/jx/v2/pkg/util/trace"
"github.com/pkg/errors"
"github.com/google/go-github/github"
"github.com/jenkins-x/jx/v2/pkg/auth"
"github.com/jenkins-x/jx/v2/pkg/log"
"github.com/jenkins-x/jx/v2/pkg/util"
"golang.org/x/oauth2"
)
const (
pageSize = 100
)
type GitHubProvider struct {
Username string
Client *github.Client
Context context.Context
Server auth.AuthServer
User auth.UserAuth
Git Gitter
}
func NewGitHubProvider(server *auth.AuthServer, user *auth.UserAuth, git Gitter) (GitProvider, error) {
ctx := context.Background()
provider := GitHubProvider{
Server: *server,
User: *user,
Context: ctx,
Username: user.Username,
Git: git,
}
ts := oauth2.StaticTokenSource(
&oauth2.Token{AccessToken: user.ApiToken},
)
tc := oauth2.NewClient(ctx, ts)
traceGitHubAPI := os.Getenv("TRACE_GITHUB_API")
if traceGitHubAPI == "1" || traceGitHubAPI == "on" {
tc.Transport = &trace.Tracer{tc.Transport}
}
return newGitHubProviderFromOauthClient(tc, provider)
}
// NewAnonymousGitHubProvider returns a new GitHubProvider without any authentication
func NewAnonymousGitHubProvider(server *auth.AuthServer, git Gitter) (GitProvider, error) {
ctx := context.Background()
provider := GitHubProvider{
Server: *server,
User: auth.UserAuth{},
Context: ctx,
Git: git,
}
return newGitHubProviderFromOauthClient(nil, provider)
}
func newGitHubProviderFromOauthClient(tc *http.Client, provider GitHubProvider) (GitProvider, error) {
var err error
u := provider.Server.URL
if IsGitHubServerURL(u) {
provider.Client = github.NewClient(tc)
} else {
u = GitHubEnterpriseApiEndpointURL(u)
provider.Client, err = github.NewEnterpriseClient(u, u, tc)
}
return &provider, err
}
func GitHubEnterpriseApiEndpointURL(u string) string {
if IsGitHubServerURL(u) {
return u
}
// lets ensure we use the API endpoint to login
if strings.Index(u, "/api/") < 0 {
u = util.UrlJoin(u, "/api/v3/")
}
return u
}
// GetEnterpriseApiURL returns the github enterprise API URL or blank if this
// provider is for the https://github.com service
func (p *GitHubProvider) GetEnterpriseApiURL() string {
u := p.Server.URL
if IsGitHubServerURL(u) {
return ""
}
return GitHubEnterpriseApiEndpointURL(u)
}
func IsGitHubServerURL(u string) bool {
u = strings.TrimSuffix(u, "/")
return u == "" || u == "https://github.com" || u == "http://github.com"
}
func (p *GitHubProvider) ListOrganisations() ([]GitOrganisation, error) {
answer := []GitOrganisation{}
options := github.ListOptions{
Page: 0,
PerPage: pageSize,
}
for {
orgs, _, err := p.Client.Organizations.List(p.Context, "", &options)
if err != nil {
return answer, err
}
for _, org := range orgs {
name := org.Login
if name != nil {
o := GitOrganisation{
Login: *name,
}
answer = append(answer, o)
}
}
if len(orgs) < pageSize || len(orgs) == 0 {
break
}
options.Page += 1
}
return answer, nil
}
func (p *GitHubProvider) IsUserInOrganisation(user string, org string) (bool, error) {
membership, _, err := p.Client.Organizations.GetOrgMembership(p.Context, user, org)
if err != nil {
return false, err
}
if membership != nil {
return true, nil
}
return false, nil
}
func (p *GitHubProvider) ListRepositoriesForUser(user string) ([]*GitRepository, error) {
owner := user
answer := []*GitRepository{}
options := &github.RepositoryListOptions{
ListOptions: github.ListOptions{
Page: 0,
PerPage: pageSize,
},
}
for {
repos, _, err := p.Client.Repositories.List(p.Context, owner, options)
if err != nil {
options := &github.RepositoryListOptions{
ListOptions: github.ListOptions{
Page: 0,
PerPage: pageSize,
},
}
repos, _, err = p.Client.Repositories.List(p.Context, owner, options)
if err != nil {
return answer, err
}
}
for _, repo := range repos {
answer = append(answer, toGitHubRepo(asText(repo.Name), owner, repo))
}
if len(repos) < pageSize || len(repos) == 0 {
break
}
options.ListOptions.Page += 1
}
return answer, nil
}
// IsOwnerGitHubUser checks to see if the owner is the GitHub User
func IsOwnerGitHubUser(owner string, gitHubUser string) bool {
return owner == gitHubUser && gitHubUser != ""
}
func (p *GitHubProvider) ListRepositories(org string) ([]*GitRepository, error) {
owner := org
answer := []*GitRepository{}
options := &github.RepositoryListByOrgOptions{
ListOptions: github.ListOptions{
Page: 0,
PerPage: pageSize,
},
}
if IsOwnerGitHubUser(owner, p.Username) {
log.Logger().Infof("Owner of repo is same as username, using GitHub API for Users")
return p.ListRepositoriesForUser(p.Username)
}
for {
repos, _, err := p.Client.Repositories.ListByOrg(p.Context, owner, options)
if err != nil {
options := &github.RepositoryListOptions{
ListOptions: github.ListOptions{
Page: 0,
PerPage: pageSize,
},
}
repos, _, err = p.Client.Repositories.List(p.Context, org, options)
if err != nil {
return answer, err
}
}
for _, repo := range repos {
answer = append(answer, toGitHubRepo(asText(repo.Name), org, repo))
}
if len(repos) < pageSize || len(repos) == 0 {
break
}
options.ListOptions.Page += 1
}
return answer, nil
}
func (p *GitHubProvider) ListReleases(org string, name string) ([]*GitRelease, error) {
owner := org
if owner == "" {
owner = p.Username
}
answer := []*GitRelease{}
options := &github.ListOptions{
Page: 0,
PerPage: pageSize,
}
for {
repos, _, err := p.Client.Repositories.ListReleases(p.Context, owner, name, options)
if err != nil {
return answer, err
}
for _, repo := range repos {
answer = append(answer, toGitHubRelease(org, name, repo))
}
if len(repos) < pageSize || len(repos) == 0 {
break
}
options.Page += 1
}
return answer, nil
}
// GetRelease gets the release info for org, repo name and tag
func (p *GitHubProvider) GetRelease(org string, name string, tag string) (*GitRelease, error) {
owner := org
if owner == "" {
owner = p.Username
}
repo, _, err := p.Client.Repositories.GetReleaseByTag(p.Context, owner, name, tag)
if err != nil {
return nil, err
}
return toGitHubRelease(owner, name, repo), nil
}
func toGitHubRelease(org string, name string, release *github.RepositoryRelease) *GitRelease {
totalDownloadCount := 0
assets := make([]GitReleaseAsset, 0)
for _, asset := range release.Assets {
p := asset.DownloadCount
if p != nil {
totalDownloadCount = totalDownloadCount + *p
}
assets = append(assets, toGitHubAsset(asset))
}
return &GitRelease{
Name: asText(release.Name),
TagName: asText(release.TagName),
Body: asText(release.Body),
URL: asText(release.URL),
HTMLURL: asText(release.HTMLURL),
DownloadCount: totalDownloadCount,
Assets: &assets,
}
}
func toGitHubAsset(asset github.ReleaseAsset) GitReleaseAsset {
return GitReleaseAsset{
ID: util.DereferenceInt64(asset.ID),
Name: asText(asset.Name),
BrowserDownloadURL: asText(asset.BrowserDownloadURL),
ContentType: asText(asset.ContentType),
}
}
func (p *GitHubProvider) GetRepository(org string, name string) (*GitRepository, error) {
repo, _, err := p.Client.Repositories.Get(p.Context, org, name)
if err != nil {
return nil, fmt.Errorf("failed to get repository %s/%s due to: %s", org, name, err)
}
return toGitHubRepo(name, org, repo), nil
}
// CreateRepository creates a new repository under owner/name, optionally private
func (p *GitHubProvider) CreateRepository(owner string, name string, private bool) (*GitRepository, error) {
repoConfig := &github.Repository{
Name: github.String(name),
Private: github.Bool(private),
}
visibility := "public"
if private {
visibility = "private"
}
orgName := owner
isOrg := true
org, _, err := p.Client.Organizations.Get(p.Context, orgName)
if err != nil || org == nil {
isOrg = false
}
if !isOrg {
log.Logger().Debugf("repository owner for %s is the authenticated user %s, setting orgName to the empty string '%s'", name, p.Username, orgName)
orgName = ""
}
log.Logger().Debugf("creating %s repository %s with orgName '%s'", visibility, name, orgName)
repo, _, err := p.Client.Repositories.Create(p.Context, orgName, repoConfig)
if err != nil {
msg := fmt.Sprintf("failed to create %s repository %s/%s due to: %s", visibility, owner, name, err)
if strings.Contains(err.Error(), "Visibility can't be private") {
msg = msg + "\ntip: free GitHub organization accounts do not allow private repositories"
}
return nil, errors.New(msg)
}
// Sleep 5 seconds to ensure repository exists enough to be pushed to.
time.Sleep(5 * time.Second)
return toGitHubRepo(name, owner, repo), nil
}
func (p *GitHubProvider) DeleteRepository(org string, name string) error {
owner := org
if owner == "" {
owner = p.Username
}
_, err := p.Client.Repositories.Delete(p.Context, owner, name)
if err != nil {
return fmt.Errorf("failed to delete repository %s/%s due to: %s", owner, name, err)
}
return err
}
func toGitHubRepo(name string, org string, repo *github.Repository) *GitRepository {
var id int64
if repo.ID != nil {
id = *repo.ID
}
return &GitRepository{
ID: id,
Name: name,
AllowMergeCommit: util.DereferenceBool(repo.AllowMergeCommit),
CloneURL: asText(repo.CloneURL),
HTMLURL: asText(repo.HTMLURL),
SSHURL: asText(repo.SSHURL),
URL: util.DereferenceString(repo.URL),
Fork: util.DereferenceBool(repo.Fork),
Language: asText(repo.Language),
Stars: asInt(repo.StargazersCount),
Private: util.DereferenceBool(repo.Private),
Organisation: org,
HasIssues: util.DereferenceBool(repo.HasIssues),
OpenIssueCount: util.DereferenceInt(repo.OpenIssuesCount),
HasWiki: repo.GetHasWiki(),
HasProjects: repo.GetHasProjects(),
Archived: repo.GetArchived(),
}
}
func (p *GitHubProvider) ForkRepository(originalOrg string, name string, destinationOrg string) (*GitRepository, error) {
repoConfig := &github.RepositoryCreateForkOptions{}
if destinationOrg != "" {
repoConfig.Organization = destinationOrg
}
repo, _, err := p.Client.Repositories.CreateFork(p.Context, originalOrg, name, repoConfig)
if err != nil {
msg := ""
if destinationOrg != "" {
msg = fmt.Sprintf(" to %s", destinationOrg)
}
if destinationOrg == "" {
destinationOrg = p.Username
}
if strings.Contains(err.Error(), "try again later") {
log.Logger().Warnf("Waiting for the fork of %s/%s to appear...", destinationOrg, name)
// lets wait for the fork to occur...
start := time.Now()
deadline := start.Add(time.Minute)
for {
time.Sleep(5 * time.Second)
repo, _, err = p.Client.Repositories.Get(p.Context, destinationOrg, name)
if repo != nil && err == nil {
break
}
t := time.Now()
if t.After(deadline) {
return nil, fmt.Errorf("gave up waiting for Repository %s/%s to appear: %s", destinationOrg, name, err)
}
}
} else {
return nil, fmt.Errorf("failed to fork repository %s/%s%s due to: %s", originalOrg, name, msg, err)
}
}
// Sleep 5 seconds to ensure repository exists enough to be pushed to.
time.Sleep(5 * time.Second)
answer := &GitRepository{
Name: name,
AllowMergeCommit: util.DereferenceBool(repo.AllowMergeCommit),
CloneURL: asText(repo.CloneURL),
HTMLURL: asText(repo.HTMLURL),
URL: util.DereferenceString(repo.URL),
SSHURL: asText(repo.SSHURL),
Organisation: destinationOrg,
Fork: true,
}
return answer, nil
}
func (p *GitHubProvider) CreateWebHook(data *GitWebHookArguments) error {
owner := data.Owner
if owner == "" {
owner = p.Username
}
repo := data.Repo.Name
if repo == "" {
return fmt.Errorf("Missing property Repo")
}
webhookUrl := data.URL
if webhookUrl == "" {
return fmt.Errorf("Missing property URL")
}
hooks, _, err := p.Client.Repositories.ListHooks(p.Context, owner, repo, nil)
if err != nil {
log.Logger().Warnf("Querying webhooks on %s/%s: %s", owner, repo, err)
}
for _, hook := range hooks {
c := hook.Config["url"]
u, ok := c.(string)
if ok && u == webhookUrl {
s, ok := hook.Config["secret"]
if ok && s != data.Secret {
// lets remove this hook as its using an old secret
if hook.ID == nil {
return fmt.Errorf("webook at %s for %s/%s has no ID", asText(hook.URL), owner, repo)
}
id := *hook.ID
_, err = p.Client.Repositories.DeleteHook(p.Context, owner, repo, id)
if err != nil {
return errors.Wrapf(err, "failed to remove old webhook on %s/%s with ID %v with old secret", owner, repo, id)
}
} else {
log.Logger().Warnf("Already has a webhook registered for %s", webhookUrl)
return nil
}
}
}
// 0 makes insecure SSL not enabled
insecureSSL := "0"
if data.InsecureSSL {
// this is insecure and should only be used in test scenarios
insecureSSL = "1"
}
config := map[string]interface{}{
"url": webhookUrl,
"content_type": "json",
"insecure_ssl": insecureSSL,
}
if data.Secret != "" {
config["secret"] = data.Secret
}
hook := &github.Hook{
Name: github.String("web"),
Config: config,
Events: []string{"*"},
}
log.Logger().Infof("Creating GitHub webhook for %s/%s for url %s", util.ColorInfo(owner), util.ColorInfo(repo), util.ColorInfo(webhookUrl))
_, resp, err := p.Client.Repositories.CreateHook(p.Context, owner, repo, hook)
if err != nil {
if resp.StatusCode == 404 || resp.StatusCode == 403 {
return errors.Wrapf(err, "unable to create webhook for %s/%s - permission denied", owner, repo)
}
}
return err
}
func (p *GitHubProvider) ListWebHooks(owner string, repo string) ([]*GitWebHookArguments, error) {
webHooks := []*GitWebHookArguments{}
if owner == "" {
owner = p.Username
}
if repo == "" {
return webHooks, fmt.Errorf("Missing property Repo")
}
hooks, _, err := p.Client.Repositories.ListHooks(p.Context, owner, repo, nil)
if err != nil {
return webHooks, nil
}
for _, hook := range hooks {
c := hook.Config["url"]
s, ok := c.(string)
if ok {
webHook := &GitWebHookArguments{
ID: hook.GetID(),
Owner: owner,
Repo: nil,
URL: s,
}
webHooks = append(webHooks, webHook)
}
}
return webHooks, nil
}
func (p *GitHubProvider) UpdateWebHook(data *GitWebHookArguments) error {
owner := data.Owner
if owner == "" {
owner = p.Username
}
repo := data.Repo.Name
if repo == "" {
return fmt.Errorf("Missing property Repo")
}
webhookUrl := data.URL
if repo == "" {
return fmt.Errorf("Missing property URL")
}
hooks, _, err := p.Client.Repositories.ListHooks(p.Context, owner, repo, nil)
if err != nil {
log.Logger().Warnf("Querying webhooks on %s/%s: %s", owner, repo, err)
}
dataId := data.ID
if dataId == 0 {
for _, hook := range hooks {
c := hook.Config["url"]
s, ok := c.(string)
if ok && s == data.ExistingURL {
log.Logger().Warnf("Found existing webhook for url %s", data.ExistingURL)
dataId = hook.GetID()
}
}
}
if dataId != 0 {
config := map[string]interface{}{
"url": webhookUrl,
"content_type": "json",
}
if data.Secret != "" {
config["secret"] = data.Secret
}
hook := &github.Hook{
Name: github.String("web"),
Config: config,
Events: []string{"*"},
}
log.Logger().Infof("Updating GitHub webhook for %s/%s for url %s", util.ColorInfo(owner), util.ColorInfo(repo), util.ColorInfo(webhookUrl))
_, _, err = p.Client.Repositories.EditHook(p.Context, owner, repo, dataId, hook)
} else {
log.Logger().Warn("No webhooks found to update")
}
return err
}
func (p *GitHubProvider) CreatePullRequest(data *GitPullRequestArguments) (*GitPullRequest, error) {
owner := data.GitRepository.Organisation
repo := data.GitRepository.Name
title := data.Title
body := data.Body
head := data.Head
base := data.Base
config := &github.NewPullRequest{}
if title != "" {
config.Title = github.String(title)
}
if body != "" {
config.Body = github.String(body)
}
if head != "" {
config.Head = github.String(head)
}
if base != "" {
config.Base = github.String(base)
}
pr, resp, err := p.Client.PullRequests.Create(p.Context, owner, repo, config)
if err != nil {
if resp != nil && resp.Body != nil {
data, err2 := ioutil.ReadAll(resp.Body)
if err2 == nil && len(data) > 0 {
return nil, errors.Wrapf(err, "response: %s", string(data))
}
}
return nil, err
}
return &GitPullRequest{
URL: notNullString(pr.HTMLURL),
Owner: owner,
Repo: repo,
Number: pr.Number,
}, nil
}
// UpdatePullRequest updates pull request with number using data
func (p *GitHubProvider) UpdatePullRequest(data *GitPullRequestArguments, number int) (*GitPullRequest, error) {
owner := data.GitRepository.Organisation
repo := data.GitRepository.Name
title := data.Title
body := data.Body
head := data.Head
base := data.Base
config := &github.PullRequest{
Head: &github.PullRequestBranch{},
Base: &github.PullRequestBranch{},
}
if title != "" {
config.Title = github.String(title)
}
if body != "" {
config.Body = github.String(body)
}
if head != "" {
config.Head.Ref = github.String(head)
}
if base != "" {
config.Base.Ref = github.String(base)
}
pr, resp, err := p.Client.PullRequests.Edit(p.Context, owner, repo, number, config)
if err != nil {
if resp != nil && resp.Body != nil {
data, err2 := ioutil.ReadAll(resp.Body)
if err2 == nil && len(data) > 0 {
return nil, errors.Wrapf(err, "response: %s", string(data))
}
}
return nil, err
}
return &GitPullRequest{
URL: notNullString(pr.HTMLURL),
Owner: owner,
Repo: repo,
Number: pr.Number,
}, nil
}
func (p *GitHubProvider) UpdatePullRequestStatus(pr *GitPullRequest) error {
if pr.Number == nil {
return fmt.Errorf("Missing Number for GitPullRequest %#v", pr)
}
n := *pr.Number
result, _, err := p.Client.PullRequests.Get(p.Context, pr.Owner, pr.Repo, n)
if err != nil {
return err
}
p.updatePullRequest(pr, result)
return nil
}
// AddLabelsToIssue adds labels to an issue
func (p *GitHubProvider) AddLabelsToIssue(owner string, repo string, number int, labels []string) error {
_, result, err := p.Client.Issues.AddLabelsToIssue(p.Context, owner, repo, number, labels)
if err != nil {
return err
}
if result.StatusCode > 201 {
return errors.Wrapf(err, "failed to add labels to issue on %s/%s with ID %v", owner, repo, number)
}
return nil
}
// updatePullRequest updates the pr with the data from GitHub
func (p *GitHubProvider) updatePullRequest(pr *GitPullRequest, source *github.PullRequest) {
head := source.Head
if head != nil {
pr.LastCommitSha = notNullString(head.SHA)
} else {
pr.LastCommitSha = ""
}
if pr.Author == nil && source.User != nil && source.User.Login != nil {
pr.Author = &GitUser{
Login: *source.User.Login,
AvatarURL: *source.User.AvatarURL,
URL: *source.User.HTMLURL,
}
}
pr.Assignees = make([]*GitUser, 0)
for _, u := range source.Assignees {
if u != nil {
pr.Assignees = append(pr.Assignees, &GitUser{
Login: *u.Login,
})
}
}
pr.RequestedReviewers = make([]*GitUser, 0)
for _, u := range source.RequestedReviewers {
if u != nil {
pr.RequestedReviewers = append(pr.RequestedReviewers, &GitUser{
Login: *u.Login,
})
}
}
pr.Labels = make([]*Label, 0)
for _, l := range source.Labels {
if l != nil {
pr.Labels = append(pr.Labels, &Label{
Name: l.Name,
URL: l.URL,
ID: l.ID,
Color: l.Color,
Default: l.Default,
Description: l.Description,
})
}
}
if source.Mergeable != nil {
pr.Mergeable = source.Mergeable
}
pr.MergeCommitSHA = source.MergeCommitSHA
if source.Merged != nil {
pr.Merged = source.Merged
}
if source.ClosedAt != nil {
pr.ClosedAt = source.ClosedAt
}
if source.MergedAt != nil {
pr.MergedAt = source.MergedAt
}
if source.State != nil {
pr.State = source.State
}
if source.Head != nil {
pr.HeadRef = source.Head.Ref
if source.Head.Repo != nil {
pr.HeadOwner = source.Head.Repo.Owner.Login
}
}
if source.StatusesURL != nil {
pr.StatusesURL = source.StatusesURL
}
if source.IssueURL != nil {
pr.IssueURL = source.IssueURL
}
if source.DiffURL != nil {
pr.IssueURL = source.DiffURL
}
if source.Title != nil {
pr.Title = *source.Title
}
if source.Body != nil {
pr.Body = *source.Body
}
if source.HTMLURL != nil {
pr.URL = *source.HTMLURL
}
if source.UpdatedAt != nil {
pr.UpdatedAt = source.UpdatedAt
}
}
func (p *GitHubProvider) toPullRequest(owner string, repo string, pr *github.PullRequest) *GitPullRequest {
answer := &GitPullRequest{
URL: asText(pr.URL),
Owner: owner,
Repo: repo,
Number: pr.Number,
}
p.updatePullRequest(answer, pr)
return answer
}
func (p *GitHubProvider) GetPullRequest(owner string, repo *GitRepository, number int) (*GitPullRequest, error) {
pr := &GitPullRequest{
Owner: owner,
Repo: repo.Name,
Number: &number,
}
err := p.UpdatePullRequestStatus(pr)
return pr, err
}
// ListOpenPullRequests lists the open pull requests
func (p *GitHubProvider) ListOpenPullRequests(owner string, repo string) ([]*GitPullRequest, error) {
opt := &github.PullRequestListOptions{
State: "open",
ListOptions: github.ListOptions{
Page: 0,
PerPage: pageSize,
},
}
answer := []*GitPullRequest{}
for {
prs, _, err := p.Client.PullRequests.List(p.Context, owner, repo, opt)
if err != nil {
return answer, err
}
for _, pr := range prs {
answer = append(answer, p.toPullRequest(owner, repo, pr))
}
if len(prs) < pageSize || len(prs) == 0 {
break
}
opt.Page++
}
return answer, nil
}
func extractRepositoryCommitAuthor(rc *github.RepositoryCommit) (gu *GitUser) {
gu = &GitUser{}
if rc.Commit.Author != nil {
gu.Email = rc.Commit.Author.GetEmail()
gu.Name = rc.Commit.Author.GetName()
if rc.Author != nil {
gu.Login = rc.Author.GetLogin()
gu.URL = rc.Author.GetURL()
gu.AvatarURL = rc.Author.GetAvatarURL()
}
}
return
}
func (p *GitHubProvider) asGitHubCommit(commit *github.RepositoryCommit) GitCommit {
message := ""
if commit.Commit != nil {
message = util.DereferenceString(commit.Commit.Message)
} else {
log.Logger().Warnf("No Commit object for for commit: %s", commit.GetSHA())
}
author := extractRepositoryCommitAuthor(commit)
return GitCommit{
Message: message,
URL: commit.GetURL(),
SHA: commit.GetSHA(),
Author: author,
}
}
func (p *GitHubProvider) GetPullRequestCommits(owner string, repository *GitRepository, number int) ([]*GitCommit, error) {
repo := repository.Name
commits, _, err := p.Client.PullRequests.ListCommits(p.Context, owner, repo, number, nil)
if err != nil {
return nil, err
}
answer := []*GitCommit{}
for _, commit := range commits {
summary := p.asGitHubCommit(commit)
answer = append(answer, &summary)
}
return answer, nil
}
func (p *GitHubProvider) MergePullRequest(pr *GitPullRequest, message string) error {
if pr.Number == nil {
return fmt.Errorf("Missing Number for GitPullRequest %#v", pr)
}
n := *pr.Number
ref := pr.LastCommitSha
options := &github.PullRequestOptions{
SHA: ref,
}
result, _, err := p.Client.PullRequests.Merge(p.Context, pr.Owner, pr.Repo, n, message, options)
if err != nil {
return err
}
if result.Merged == nil || *result.Merged == false {
return fmt.Errorf("Failed to merge PR %s for ref %s as result did not return merged", pr.URL, ref)
}
return nil
}
func (p *GitHubProvider) AddPRComment(pr *GitPullRequest, comment string) error {
if pr.Number == nil {
return fmt.Errorf("Missing Number for GitPullRequest %#v", pr)
}
n := *pr.Number
prComment := &github.IssueComment{
Body: &comment,
}
_, _, err := p.Client.Issues.CreateComment(p.Context, pr.Owner, pr.Repo, n, prComment)
if err != nil {
return err
}
return nil
}
func (p *GitHubProvider) CreateIssueComment(owner string, repo string, number int, comment string) error {
issueComment := &github.IssueComment{
Body: &comment,
}
_, _, err := p.Client.Issues.CreateComment(p.Context, owner, repo, number, issueComment)
if err != nil {
return err
}
return nil
}
func (p *GitHubProvider) PullRequestLastCommitStatus(pr *GitPullRequest) (string, error) {
ref := pr.LastCommitSha
if ref == "" {
return "", fmt.Errorf("Missing String for LastCommitSha %#v", pr)
}
results, _, err := p.Client.Repositories.ListStatuses(p.Context, pr.Owner, pr.Repo, ref, nil)
if err != nil {
return "", err
}
for _, result := range results {
if result.State != nil && notNullString(result.Context) != "tide" {
return *result.State, nil
}
}
return "", fmt.Errorf("Could not find a status for repository %s/%s with ref %s", pr.Owner, pr.Repo, ref)
}
func (p *GitHubProvider) ListCommitStatus(org string, repo string, sha string) ([]*GitRepoStatus, error) {
answer := []*GitRepoStatus{}
if sha == "" {
return answer, fmt.Errorf("Missing String for sha %s/%s", org, repo)
}
results, _, err := p.Client.Repositories.ListStatuses(p.Context, org, repo, sha, nil)
if err != nil {
return answer, fmt.Errorf("Could not find a status for repository %s/%s with ref %s", org, repo, sha)
}
for _, result := range results {
status := &GitRepoStatus{
ID: strconv.FormatInt(notNullInt64(result.ID), 10),
Context: notNullString(result.Context),
URL: notNullString(result.URL),
TargetURL: notNullString(result.TargetURL),
State: notNullString(result.State),
Description: notNullString(result.Description),
}
answer = append(answer, status)
}
return answer, nil
}
func (p *GitHubProvider) UpdateCommitStatus(org string, repo string, sha string, status *GitRepoStatus) (*GitRepoStatus, error) {
id64 := int64(0)
if status.ID != "" {
id, err := strconv.Atoi(status.ID)
if err != nil {
return &GitRepoStatus{}, err
}
id64 = int64(id)
}
repoStatus := github.RepoStatus{
Context: &status.Context,
State: &status.State,
Description: &status.Description,
TargetURL: &status.TargetURL,
URL: &status.URL,
ID: &id64,
}
result, _, err := p.Client.Repositories.CreateStatus(p.Context, org, repo, sha, &repoStatus)
if err != nil {
return &GitRepoStatus{}, err
}
return &GitRepoStatus{
ID: strconv.FormatInt(notNullInt64(result.ID), 10),
Context: notNullString(result.Context),
URL: notNullString(result.URL),
TargetURL: notNullString(result.TargetURL),
State: notNullString(result.State),
Description: notNullString(result.Description),
}, nil
}
func (p *GitHubProvider) GetContent(org string, name string, path string, ref string) (*GitFileContent, error) {
fileContent, _, _, err := p.Client.Repositories.GetContents(p.Context, org, name, path, &github.RepositoryContentGetOptions{Ref: ref})
if err != nil {
return nil, err
}
if fileContent != nil {
return &GitFileContent{
Name: notNullString(fileContent.Name),
Url: notNullString(fileContent.URL),
Path: notNullString(fileContent.Path),
Type: notNullString(fileContent.Type),
Content: notNullString(fileContent.Content),
DownloadUrl: notNullString(fileContent.DownloadURL),
Encoding: notNullString(fileContent.Encoding),
GitUrl: notNullString(fileContent.GitURL),
HtmlUrl: notNullString(fileContent.HTMLURL),
Sha: notNullString(fileContent.SHA),
Size: notNullInt(fileContent.Size),
}, nil
} else {
return nil, fmt.Errorf("Directory Content not yet supported")
}
}
func notNullInt64(n *int64) int64 {
if n != nil {
return *n
}
return 0
}
func notNullInt(n *int) int {
if n != nil {
return *n
}
return 0
}
func notNullString(tp *string) string {
if tp == nil {
return ""
}
return *tp
}
func (p *GitHubProvider) RenameRepository(org string, name string, newName string) (*GitRepository, error) {
if org == "" {
org = p.Username
}
config := &github.Repository{
Name: github.String(newName),
}
repo, _, err := p.Client.Repositories.Edit(p.Context, org, name, config)
if err != nil {
return nil, fmt.Errorf("Failed to edit repository %s/%s due to: %s", org, name, err)
}
answer := &GitRepository{
Name: name,
AllowMergeCommit: util.DereferenceBool(repo.AllowMergeCommit),
CloneURL: asText(repo.CloneURL),
HTMLURL: asText(repo.HTMLURL),
SSHURL: asText(repo.SSHURL),
Organisation: org,
}
return answer, nil
}
func (p *GitHubProvider) ValidateRepositoryName(org string, name string) error {
_, r, err := p.Client.Repositories.Get(p.Context, org, name)
if err == nil {
return fmt.Errorf("Repository %s already exists", p.Git.RepoName(org, name))
}
if r != nil && r.StatusCode == 404 {
return nil
}
return err
}
func (p *GitHubProvider) UpdateRelease(owner string, repo string, tag string, releaseInfo *GitRelease) error {
release := &github.RepositoryRelease{}
rel, r, err := p.Client.Repositories.GetReleaseByTag(p.Context, owner, repo, tag)
if r != nil && r.StatusCode == 404 && !strings.HasPrefix(tag, "v") {
// sometimes we prepend a v for example when using gh-release
// so lets make sure we don't create a double release
vtag := "v" + tag
rel2, r2, err2 := p.Client.Repositories.GetReleaseByTag(p.Context, owner, repo, vtag)
if r2.StatusCode != 404 {
rel = rel2
r = r2
err = err2
tag = vtag
}
}
if r != nil && err == nil {
release = rel
}
// lets populate the release
if release.Name == nil && releaseInfo.Name != "" {
release.Name = &releaseInfo.Name
}
if release.TagName == nil && releaseInfo.TagName != "" {
release.TagName = &releaseInfo.TagName
}
if (release.Body == nil || *release.Body == "") && releaseInfo.Body != "" {
release.Body = &releaseInfo.Body
}
if r != nil && r.StatusCode == 404 {
log.Logger().Warnf("No release found for %s/%s and tag %s so creating a new release", owner, repo, tag)
rel, _, err := p.Client.Repositories.CreateRelease(p.Context, owner, repo, release)
if rel != nil {
releaseInfo.ID = util.DereferenceInt64(rel.ID)
}
return err
}
id := release.ID
if id == nil {
return fmt.Errorf("the release for %s/%s tag %s has no ID", owner, repo, tag)
}
releaseInfo.ID = util.DereferenceInt64(id)
r2, _, err := p.Client.Repositories.EditRelease(p.Context, owner, repo, *id, release)
if r != nil {
releaseInfo.URL = asText(r2.URL)
releaseInfo.HTMLURL = asText(r2.HTMLURL)
}
return err
}
// UpdateReleaseStatus updates the state (release/prerelease) of a release
func (p *GitHubProvider) UpdateReleaseStatus(owner string, repo string, tag string, releaseInfo *GitRelease) error {
release := &github.RepositoryRelease{}
rel, r, err := p.Client.Repositories.GetReleaseByTag(p.Context, owner, repo, tag)
if r != nil && r.StatusCode == 404 && !strings.HasPrefix(tag, "v") {
// sometimes we prepend a v for example when using gh-release
// so lets make sure we don't create a double release
vtag := "v" + tag
rel2, r2, err2 := p.Client.Repositories.GetReleaseByTag(p.Context, owner, repo, vtag)
if r2.StatusCode != 405 {
rel = rel2
r = r2
err = err2
tag = vtag
}
}
if r != nil && err == nil {
release = rel
}
if r != nil && r.StatusCode == 404 {
log.Logger().Warnf("No release found for %s/%s and tag %s", owner, repo, tag)
return err
}
if release.Prerelease != &releaseInfo.PreRelease {
release.Prerelease = &releaseInfo.PreRelease
}
id := release.ID
if id == nil {
return fmt.Errorf("the release for %s/%s tag %s has no ID!", owner, repo, tag)
}
_, _, err = p.Client.Repositories.EditRelease(p.Context, owner, repo, *id, release)
return err
}
func (p *GitHubProvider) GetIssue(org string, name string, number int) (*GitIssue, error) {
i, r, err := p.Client.Issues.Get(p.Context, org, name, number)
if r != nil && r.StatusCode == 404 {
return nil, nil
}
if err != nil {
return nil, err
}
return p.fromGithubIssue(org, name, number, i)
}
func (p *GitHubProvider) SearchIssues(org string, name string, filter string) ([]*GitIssue, error) {
opts := &github.IssueListByRepoOptions{
State: filter,
}
return p.searchIssuesWithOptions(org, name, opts)
}
func (p *GitHubProvider) SearchIssuesClosedSince(org string, name string, t time.Time) ([]*GitIssue, error) {
opts := &github.IssueListByRepoOptions{
State: "closed",
}
issues, err := p.searchIssuesWithOptions(org, name, opts)
if err != nil {
return issues, err
}
issues = FilterIssuesClosedSince(issues, t)
return issues, nil
}
func (p *GitHubProvider) searchIssuesWithOptions(org string, name string, opts *github.IssueListByRepoOptions) ([]*GitIssue, error) {
opts.Page = 0
opts.PerPage = pageSize
answer := []*GitIssue{}
for {
issues, r, err := p.Client.Issues.ListByRepo(p.Context, org, name, opts)
if r != nil && r.StatusCode == 404 {
return answer, nil
}
if err != nil {
return answer, err
}
for _, issue := range issues {
if issue.Number != nil && !issue.IsPullRequest() {
n := *issue.Number
i, err := p.fromGithubIssue(org, name, n, issue)
if err != nil {
return answer, err
}
// TODO apply the filter?
answer = append(answer, i)
}
}
if len(issues) < pageSize || len(issues) == 0 {
break
}
opts.ListOptions.Page += 1
}
return answer, nil
}
func (p *GitHubProvider) CreateIssue(owner string, repo string, issue *GitIssue) (*GitIssue, error) {
labels := []string{}
for _, label := range issue.Labels {
name := label.Name
if name != "" {
labels = append(labels, name)
}
}
config := &github.IssueRequest{
Title: &issue.Title,
Body: &issue.Body,
Labels: &labels,
}
i, _, err := p.Client.Issues.Create(p.Context, owner, repo, config)
if err != nil {
return nil, err
}
number := 0
if i.Number != nil {
number = *i.Number
}
return p.fromGithubIssue(owner, repo, number, i)
}
func (p *GitHubProvider) fromGithubIssue(org string, name string, number int, i *github.Issue) (*GitIssue, error) {
isPull := i.IsPullRequest()
url := p.IssueURL(org, name, number, isPull)
labels := []GitLabel{}
for _, label := range i.Labels {
labels = append(labels, toGitHubLabel(&label))
}
assignees := []GitUser{}
for _, assignee := range i.Assignees {
assignees = append(assignees, *toGitHubUser(assignee))
}
return &GitIssue{
Number: &number,
URL: url,
State: i.State,
Title: asText(i.Title),
Body: asText(i.Body),
IsPullRequest: isPull,
Labels: labels,
User: toGitHubUser(i.User),
CreatedAt: i.CreatedAt,
UpdatedAt: i.UpdatedAt,
ClosedAt: i.ClosedAt,
ClosedBy: toGitHubUser(i.ClosedBy),
Assignees: assignees,
}, nil
}
func (p *GitHubProvider) IssueURL(org string, name string, number int, isPull bool) string {
serverPrefix := p.Server.URL
if !strings.HasPrefix(serverPrefix, "https://") {
serverPrefix = "https://" + serverPrefix
}
path := "issues"
if isPull {
path = "pull"
}
url := util.UrlJoin(serverPrefix, org, name, path, strconv.Itoa(number))
return url
}
func toGitHubUser(user *github.User) *GitUser {
if user == nil {
return nil
}
return &GitUser{
Login: asText(user.Login),
Name: asText(user.Name),
Email: asText(user.Email),
AvatarURL: asText(user.AvatarURL),
}
}
func toGitHubLabel(label *github.Label) GitLabel {
return GitLabel{
Name: asText(label.Name),
Color: asText(label.Color),
URL: asText(label.URL),
}
}
func (p *GitHubProvider) HasIssues() bool {
return true
}
func (p *GitHubProvider) IsGitHub() bool {
return true
}
func (p *GitHubProvider) IsGitea() bool {
return false
}
func (p *GitHubProvider) IsBitbucketCloud() bool {
return false
}
func (p *GitHubProvider) IsBitbucketServer() bool {
return false
}
func (p *GitHubProvider) IsGerrit() bool {
return false
}
func (p *GitHubProvider) Kind() string {
return KindGitHub
}
func (p *GitHubProvider) JenkinsWebHookPath(gitURL string, secret string) string {
return "/github-webhook/"
}
func GitHubAccessTokenURL(url string) string {
if strings.Index(url, "://") < 0 {
url = "https://" + url
}
return util.UrlJoin(url, "/settings/tokens/new?scopes=repo,read:user,read:org,user:email,write:repo_hook,delete_repo")
}
func (p *GitHubProvider) Label() string {
return p.Server.Label()
}
func (p *GitHubProvider) ServerURL() string {
return p.Server.URL
}
func (p *GitHubProvider) BranchArchiveURL(org string, name string, branch string) string {
return util.UrlJoin("https://codeload.github.com", org, name, "zip", branch)
}
func (p *GitHubProvider) CurrentUsername() string {
return p.Username
}
func (p *GitHubProvider) UserAuth() auth.UserAuth {
return p.User
}
func (p *GitHubProvider) UserInfo(username string) *GitUser {
user, _, err := p.Client.Users.Get(p.Context, username)
if user == nil || err != nil {
log.Logger().Errorf("Unable to fetch user info for %s", username)
return nil
}
return &GitUser{
Login: username,
Name: user.GetName(),
AvatarURL: user.GetAvatarURL(),
URL: user.GetHTMLURL(),
Email: user.GetEmail(),
}
}
func (p *GitHubProvider) AddCollaborator(user string, organisation string, repo string) error {
log.Logger().Infof("Automatically adding the pipeline user: %v as a collaborator.", user)
_, err := p.Client.Repositories.AddCollaborator(p.Context, organisation, repo, user, &github.RepositoryAddCollaboratorOptions{})
if err != nil {
return err
}
return nil
}
func (p *GitHubProvider) ListInvitations() ([]*github.RepositoryInvitation, *github.Response, error) {
return p.Client.Users.ListInvitations(p.Context, &github.ListOptions{})
}
func (p *GitHubProvider) AcceptInvitation(ID int64) (*github.Response, error) {
log.Logger().Infof("Automatically accepted invitation: %v for the pipeline user.", ID)
return p.Client.Users.AcceptInvitation(p.Context, ID)
}
// ShouldForkForPullRequest returns true if we should create a personal fork of this repository
// before creating a pull request
func (p *GitHubProvider) ShouldForkForPullRequest(originalOwner string, repoName string, username string) bool {
if strings.HasSuffix(username, "[bot]") || originalOwner == username {
return false
}
// lets check if the repo is private as that disables forking on github
repo, err := p.GetRepository(originalOwner, repoName)
if err != nil {
return false
}
if repo.Private {
return false
}
return true
}
func asInt(i *int) int {
if i != nil {
return *i
}
return 0
}
func asText(text *string) string {
if text != nil {
return *text
}
return ""
}
func (p *GitHubProvider) ListCommits(owner, repo string, opt *ListCommitsArguments) ([]*GitCommit, error) {
githubOpt := &github.CommitsListOptions{
SHA: opt.SHA,
Path: opt.Path,
ListOptions: github.ListOptions{
Page: opt.Page,
PerPage: opt.PerPage,
},
}
githubCommits, _, err := p.Client.Repositories.ListCommits(p.Context, owner, repo, githubOpt)
if err != nil {
log.Logger().Errorf("%s", err)
return nil, fmt.Errorf("Could not find commits for repository %s/%s", owner, repo)
}
var commits []*GitCommit
for _, commit := range githubCommits {
if commit.Commit != nil {
author := extractRepositoryCommitAuthor(commit)
commits = append(commits, &GitCommit{
SHA: asText(commit.SHA),
Message: asText(commit.Commit.Message),
URL: asText(commit.Commit.URL),
Author: author,
})
}
}
return commits, nil
}
// GetLatestRelease fetches the latest release from the git provider for org and name
func (p *GitHubProvider) GetLatestRelease(org string, name string) (*GitRelease, error) {
repoRelease, _, err := p.Client.Repositories.GetLatestRelease(p.Context, org, name)
if err != nil {
return nil, errors.Wrapf(err, "getting latest release for %s/%s", org, name)
}
return toGitHubRelease(org, name, repoRelease), nil
}
// UploadReleaseAsset will upload an asset to org/repo to a release with id, giving it a name, it will return the release asset from the git provider
func (p *GitHubProvider) UploadReleaseAsset(org string, repo string, id int64, name string, asset *os.File) (*GitReleaseAsset, error) {
answer, _, err := p.Client.Repositories.UploadReleaseAsset(p.Context, org, repo, id, &github.UploadOptions{
Name: name,
}, asset)
if err != nil {
return nil, errors.Wrapf(err, "uploading asset %s to release %d in %s/%s", asset.Name(), id, org, repo)
}
if answer != nil {
a := toGitHubAsset(*answer)
return &a, nil
}
return nil, nil
}
// GetBranch returns the branch information for an owner/repo, including the commit at the tip
func (p *GitHubProvider) GetBranch(owner string, repo string, branch string) (*GitBranch, error) {
b, _, err := p.Client.Repositories.GetBranch(p.Context, owner, repo, branch)
if err != nil {
return nil, errors.Wrapf(err, "getting branch %s on %s/%s", branch, owner, repo)
}
commit := p.asGitHubCommit(b.Commit)
answer := GitBranch{
Name: util.DereferenceString(b.Name),
Protected: util.DereferenceBool(b.Protected),
Commit: &commit,
}
return &answer, nil
}
// GetProjects returns all the git projects in owner/repo
func (p *GitHubProvider) GetProjects(owner string, repo string) ([]GitProject, error) {
answer := make([]GitProject, 0)
projects, _, err := p.Client.Repositories.ListProjects(p.Context, owner, repo, &github.ProjectListOptions{State: "open"})
if err != nil {
return nil, errors.Wrapf(err, "listing projects for %s/%s", owner, repo)
}
for _, project := range projects {
answer = append(answer, p.toProject(project, ProjectOpen))
}
projects, _, err = p.Client.Repositories.ListProjects(p.Context, owner, repo, &github.ProjectListOptions{State: "closed"})
if err != nil {
return nil, errors.Wrapf(err, "listing projects for %s/%s", owner, repo)
}
for _, project := range projects {
answer = append(answer, p.toProject(project, ProjectClosed))
}
return answer, nil
}
func (p *GitHubProvider) toProject(project *github.Project, state string) GitProject {
return GitProject{
Name: project.GetName(),
Description: project.GetBody(),
Number: project.GetNumber(),
State: state,
}
}
//ConfigureFeatures sets specific features as enabled or disabled for owner/repo
func (p *GitHubProvider) ConfigureFeatures(owner string, repo string, issues *bool, projects *bool, wikis *bool) (*GitRepository, error) {
r, _, err := p.Client.Repositories.Get(p.Context, owner, repo)
if err != nil {
return nil, errors.Wrapf(err, "Failed to get repository %s/%s", owner, repo)
}
if projects != nil {
r.HasProjects = projects
}
if wikis != nil {
r.HasWiki = wikis
}
if issues != nil {
r.HasIssues = issues
}
r, _, err = p.Client.Repositories.Edit(p.Context, owner, repo, r)
if err != nil {
return nil, errors.Wrapf(err, "Failed to edit repository %s/%s", owner, repo)
}
return toGitHubRepo(repo, owner, r), nil
}
// IsWikiEnabled returns true if a wiki is enabled for owner/repo
func (p *GitHubProvider) IsWikiEnabled(owner string, repo string) (bool, error) {
gitURL := fmt.Sprintf("%s/%s/%s.wiki.git", p.Server.URL, owner, repo)
dir, err := ioutil.TempDir("", "")
defer func() {
err := os.RemoveAll(dir)
if err != nil {
log.Logger().Warnf("unable to delete temp dir %s", dir)
}
}()
if err != nil {
return false, errors.Wrapf(err, "creating temp dir")
}
err = p.Git.Clone(gitURL, dir)
if err != nil {
if IsRepositoryNotExportedError(err) {
return false, nil
}
return false, errors.Wrapf(err, "cloning %s", gitURL)
}
return true, nil
}
|
[
"\"TRACE_GITHUB_API\""
] |
[] |
[
"TRACE_GITHUB_API"
] |
[]
|
["TRACE_GITHUB_API"]
|
go
| 1 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.